Documentation

Convex.Algorithm.SubgradientMethod

SubgradientMethod #

Main results #

This file mainly concentrates on the subgradient algorithm for unconstrained nonsmooth optimization problems.

We prove the convergence rate with different kinds of step size.

Convergence of Subgradient method #

theorem bounded_subgradient_to_Lipschitz {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] [CompleteSpace E] {f : E} {G : NNReal} (hf : ConvexOn Set.univ f) (hc : ContinuousOn f Set.univ) (h : ∀ ⦃x g : E⦄, g SubderivAt f xg G) :
theorem Lipschitz_to_bounded_subgradient {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} {G : NNReal} (h : LipschitzWith G f) ⦃x : E ⦃g : E :
g SubderivAt f xg G
theorem bounded_subgradient_iff_Lipschitz {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] [CompleteSpace E] {f : E} {G : NNReal} (hf : ConvexOn Set.univ f) (hc : ContinuousOn f Set.univ) :
(∀ ⦃x g : E⦄, g SubderivAt f xg G) LipschitzWith G f
class subgradient_method {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] (f : E) (x₀ : E) :
Type u_1
Instances
    theorem subgradient_method.ha {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} {x₀ : E} [self : subgradient_method f x₀] (n : ) :
    theorem subgradient_method.initial {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} {x₀ : E} [self : subgradient_method f x₀] :
    subgradient_method.x f x₀ 0 = x₀
    theorem subgradient_method.update {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} {x₀ : E} [self : subgradient_method f x₀] (k : ) :
    theorem subgradient_method.hg {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} {x₀ : E} [self : subgradient_method f x₀] (n : ) :
    theorem subgradient_method_converge {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} (xm : E) (x₀ : E) {alg : subgradient_method f x₀} (k : ) :
    2 * (Finset.range (k + 1)).sum (subgradient_method.a f x₀) * (sInf {x : | iFinset.range (k + 1), f (subgradient_method.x f x₀ i) = x} - f xm) x₀ - xm ^ 2 + (subgradient_method.G f x₀) ^ 2 * iFinset.range (k + 1), subgradient_method.a f x₀ i ^ 2
    theorem subgradient_method_fix_step_size {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} (xm : E) (x₀ : E) {alg : subgradient_method f x₀} {t : } (ha' : ∀ (n : ), subgradient_method.a f x₀ n = t) (k : ) :
    sInf {x : | iFinset.range (k + 1), f (subgradient_method.x f x₀ i) = x} - f xm x₀ - xm ^ 2 / (2 * (k + 1) * t) + (subgradient_method.G f x₀) ^ 2 * t / 2

    convergence with fixed step size -

    theorem subgradient_method_fixed_distance {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} (xm : E) (x₀ : E) (hm : IsMinOn f Set.univ xm) {alg : subgradient_method f x₀} {s : } (ha' : ∀ (n : ), subgradient_method.a f x₀ n * subgradient_method.g f x₀ n = s) (hs : s > 0) (k : ) :
    sInf {x : | iFinset.range (k + 1), f (subgradient_method.x f x₀ i) = x} - f xm (subgradient_method.G f x₀) * x₀ - xm ^ 2 / (2 * (k + 1) * s) + (subgradient_method.G f x₀) * s / 2

    convergence with fixed $‖x^{i+1}-x^{i}‖$ -

    theorem subgradient_method_diminishing_step_size {E : Type u_1} [NormedAddCommGroup E] [InnerProductSpace E] {f : E} (xm : E) (x₀ : E) (hm : IsMinOn f Set.univ xm) {alg : subgradient_method f x₀} (ha' : Filter.Tendsto (subgradient_method.a f x₀) Filter.atTop (nhds 0)) (ha'' : Filter.Tendsto (fun (k : ) => (Finset.range (k + 1)).sum (subgradient_method.a f x₀)) Filter.atTop Filter.atTop) :
    Filter.Tendsto (fun (k : ) => sInf {x : | iFinset.range (k + 1), f (subgradient_method.x f x₀ i) = x}) Filter.atTop (nhds (f xm))

    convergence with diminishing step size -