NPL
Neurological Programs and Libraries
linesearch.h
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright 2014 Micah C Chambers (micahc.vt@gmail.com)
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  * @file linesearch.h Definitions for classes which of several line search
17  * algorithms.
18  *
19  *****************************************************************************/
20 
21 #ifndef LINESEARCH_H
22 #define LINESEARCH_H
23 
24 #include "opt.h"
25 
26 namespace npl
27 {
28 
36 class Armijo
37 {
38 public:
39  Armijo(const ValFunc& valFunc);
40 
44  double opt_s;
45 
49  double opt_minstep;
50 
55  double opt_beta;
56 
60  double opt_sigma;
61 
65  int opt_maxIt;
66 
78  double search(double init_val, const VectorXd& init_x, const VectorXd& init_g,
79  const VectorXd& direction);
80 
81 private:
82  ValFunc compVal;
83 };
84 
88 class Wolfe
89 {
90 public:
91 
100  Wolfe(const ValFunc& valFunc, const GradFunc& gradFunc);
101 
105  double opt_minstep;
106 
110  double opt_s;
111 
116  double opt_beta;
117 
121  double opt_c1;
122 
126  double opt_c2;
127 
132 
144  double search(double init_val, const VectorXd& init_x, const VectorXd& init_g,
145  const VectorXd& direction);
146 
147 private:
148  ValFunc compVal;
149  GradFunc compGrad;
150 };
151 
152 
153 //class StrongWolfe
154 //{
155 //public:
156 // StrongWolfe(const ComputeValFunc& valFunc const ComputeGradFunc& gradFunc)
157 // {
158 // compVal = valFunc;
159 // compGrad = gradFunc;
160 // }
161 //
162 // /**
163 // * @brief Maximum step, if this is <= 0, then a quadratic fit will be used
164 // * to estimate a guess.
165 // */
166 // double opt_s;
167 //
168 // /**
169 // * @brief Power function base, values closer to 0 will decrease step size
170 // * faster than ones close to 1.
171 // */
172 // double opt_beta;
173 //
174 // /**
175 // * @brief Theshold for stopping
176 // */
177 // double opt_sigma;
178 //
179 // /**
180 // * @brief Maximum number of iterations
181 // */
182 // int opt_maxIt;
183 //
184 // double search(double v_init, const VectorXd& x_init, const VectorXd& dir)
185 // {
186 // if(opt_alpha_step <= 0)
187 // throw std::invalid_argument("opt_alpha_step must be > 0");
188 //
189 // const double ALPHA_START = 1;
190 // const double ALPHA_MAX = 1;
191 //
192 // double dPhi_dAlpha_init = g_init.dot(dir);
193 // double alpha_prev = 0;
194 // double alpha = ALPHA_START;
195 // double alpha_max = ALPHA_MAX;
196 //
197 // VectorXd g; // gradient
198 //
199 // double v_prev;
200 // double v = v_init; // value
201 // for(int iter = 0; iter < opt_maxIt; iter++) {
202 // x = x_init + alpha*direction;
203 // v_prev = v;
204 // if(comp(x, v, g) != 0)
205 // throw std::domain_error("Update function returned error");
206 //
207 //#ifdef DEBUG
208 // fprintf(stderr, "Alpha: %f, Init Val: %f, Val: %f, C1: %f, phi'(0): %f\n",
209 // alpha, v_init, v, opt_c1, dPhi_dAlpha_init);
210 //#endif
211 // if(v > v_init + opt_c1*alpha*dPhi_dAlpha_init ||
212 // (iter>0 && v >= v_prev ))
213 // return zoom(v_prev, v);
214 //
215 // double dPhi_dAlpha = g.dot(dir);
216 //#ifdef DEBUG
217 // fprintf(stderr, "phi'(alpha): %f, C: %f, phi'(0): %f\n",
218 // dPhi_dAlpha, opt_c2, dPhi_dAlpha_init);
219 //#endif
220 // if(fabs(dPhi_dAlpha) <= -opt_c2*dPhi_dAlpha_init)
221 // return alpha;
222 //
223 // if(dPhi_dAlpha >= 0)
224 // return zoom(alpha, alpha_prev);
225 //
226 // alpha_prev = alpha;
227 // alpha *= opt_alpha_step;
228 //
229 // if(alpha == alpha_prev)
230 // throw std::runtime_error("Repeated alpha, will form inf loop");
231 // }
232 //
233 // return 0;
234 // }
235 //
236 // double zoom(double alpha_low, double alpha_hi)
237 // {
238 //#ifdef DEBUG
239 // fprintf(stderr, "Zoom(%f, %f)\n", alpha_low, alpha_hi);
240 //#endif
241 //
242 //
243 // }
244 //
245 //private:
246 // ComputeValFunc compVal;
247 //};
248 
252 }
253 
254 #endif
255 
Implementation of Armijo approximate line search algorithm.
Definition: linesearch.h:88
Implementation of Armijo approximate line search algorithm.
Definition: linesearch.h:36
double search(double init_val, const VectorXd &init_x, const VectorXd &init_g, const VectorXd &direction)
Performs a line search to find the alpha (step size) that satifies the armijo rule.
Definition: accessors.h:29
function< int(const VectorXd &x, VectorXd &g)> GradFunc
Gradient Only Computation Function.
Definition: opt.h:46
double opt_c2
Theshold for stopping based on curvature.
Definition: linesearch.h:126
function< int(const VectorXd &x, double &v)> ValFunc
Value Only Computation Function.
Definition: opt.h:51
int opt_maxIt
Maximum number of iterations.
Definition: linesearch.h:131
Wolfe(const ValFunc &valFunc, const GradFunc &gradFunc)
Constructs a line search that satisfies the wolfe conditions (Armijo and curvature condition)...
double opt_c1
Theshold for stopping based on function values.
Definition: linesearch.h:121
int opt_maxIt
Maximum number of iterations.
Definition: linesearch.h:65
double opt_s
Maximum step.
Definition: linesearch.h:110
double opt_minstep
Minimum non-zero step, once alpha drops below this, returns 0.
Definition: linesearch.h:49
double opt_beta
Power function base, values closer to 0 will decrease step size faster than ones close to 1...
Definition: linesearch.h:55
double opt_sigma
Theshold for stopping.
Definition: linesearch.h:60
double opt_beta
Power function base, values closer to 0 will decrease step size faster than ones close to 1...
Definition: linesearch.h:116
Armijo(const ValFunc &valFunc)
double opt_minstep
Minimum non-zero step, once alpha drops below this, returns 0.
Definition: linesearch.h:105
double opt_s
Maximum step.
Definition: linesearch.h:44
double search(double init_val, const VectorXd &init_x, const VectorXd &init_g, const VectorXd &direction)
Performs a line search to find the alpha (step size) that satifies the armijo rule.