LASSO #
Main results #
This file mainly concentrates on the definition and the proof of the convergence rate of the Lasso problem using proximal gradient method.
theorem
dot_mul_eq_transpose_mul_dot
{n : ℕ+}
{m : ℕ+}
{A : Matrix (Fin ↑m) (Fin ↑n) ℝ}
(u : EuclideanSpace ℝ (Fin ↑m))
(v : EuclideanSpace ℝ (Fin ↑n))
:
Matrix.dotProduct u (A.mulVec v) = Matrix.dotProduct (A.transpose.mulVec u) v
theorem
real_inner_eq_dot
{m : ℕ+}
(x : EuclideanSpace ℝ (Fin ↑m))
(y : EuclideanSpace ℝ (Fin ↑m))
:
⟪x, y⟫_ℝ = Matrix.dotProduct x y
theorem
quadratic_gradient
{n : ℕ+}
{m : ℕ+}
{A : Matrix (Fin ↑m) (Fin ↑n) ℝ}
(x : EuclideanSpace ℝ (Fin ↑n))
:
HasGradientAt (fun (x : EuclideanSpace ℝ (Fin ↑n)) => Matrix.dotProduct (A.mulVec x) (A.mulVec x))
(2 • A.transpose.mulVec (A.mulVec x)) x
theorem
norm_one_proximal
{n : ℕ+}
{t : ℝ}
{μ : ℝ}
{h : EuclideanSpace ℝ (Fin ↑n) → ℝ}
(lasso : h = fun (y : EuclideanSpace ℝ (Fin ↑n)) => μ • ∑ i : Fin ↑n, ‖y i‖)
(x : EuclideanSpace ℝ (Fin ↑n))
(xm : EuclideanSpace ℝ (Fin ↑n))
(tpos : 0 < t)
(μpos : 0 < μ)
(minpoint : ∀ (i : Fin ↑n), xm i = Real.sign (x i) * max (|x i| - t * μ) 0)
:
class
LASSO
{n : ℕ+}
{m : ℕ+}
(A : Matrix (Fin ↑m) (Fin ↑n) ℝ)
(b : Fin ↑m → ℝ)
(μ : ℝ)
(μpos : 0 < μ)
(Ane0 : A ≠ 0)
(x₀ : EuclideanSpace ℝ (Fin ↑n))
:
- f : EuclideanSpace ℝ (Fin ↑n) → ℝ
- h : EuclideanSpace ℝ (Fin ↑n) → ℝ
- f' : EuclideanSpace ℝ (Fin ↑n) → EuclideanSpace ℝ (Fin ↑n)
- L : NNReal
- t : ℝ
- xm : EuclideanSpace ℝ (Fin ↑n)
- x : ℕ → EuclideanSpace ℝ (Fin ↑n)
- y : ℕ → EuclideanSpace ℝ (Fin ↑n)
Instances
instance
instProximal_gradient_methodEuclideanSpaceRealFinValFHF'
{n : ℕ+}
{m : ℕ+}
{A : Matrix (Fin ↑m) (Fin ↑n) ℝ}
{b : Fin ↑m → ℝ}
{μ : ℝ}
{μpos : 0 < μ}
{Ane0 : A ≠ 0}
{x₀ : EuclideanSpace ℝ (Fin ↑n)}
[p : LASSO A b μ μpos Ane0 x₀]
:
proximal_gradient_method (LASSO.f b μpos Ane0 x₀) (LASSO.h b μpos Ane0 x₀) (LASSO.f' b μpos Ane0 x₀) x₀
Equations
- One or more equations did not get rendered due to their size.