NPL
Neurological Programs and Libraries
lbfgs.h
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright 2014 Micah C Chambers (micahc.vt@gmail.com)
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  * @file lbfgs.cpp Declaration of the LBFGSOpt class which implements
17  * a LBFGS optimization (energy minimization) algorithm.
18  *
19  *****************************************************************************/
20 
21 #ifndef LBFGS_H
22 #define LBFGS_H
23 
24 #include <list>
25 #include <tuple>
26 #include <iostream>
27 #include <cmath>
28 #include <Eigen/Dense>
29 
30 #include "opt.h"
31 #include "linesearch.h"
32 
33 namespace npl {
34 
45 class LBFGSOpt : virtual public Optimizer
46 {
47 public:
48 
49 
50  LBFGSOpt(size_t dim, const ValFunc& valfunc,
51  const GradFunc& gradfunc,
52  const CallBackFunc& callback = noopCallback);
53 
54  LBFGSOpt(size_t dim, const ValFunc& valfunc,
55  const GradFunc& gradfunc,
56  const ValGradFunc& gradAndValFunc,
57  const CallBackFunc& callback = noopCallback);
58 
65 
69  void reset_history() { m_hist.clear(); };
70 
75  int opt_histsize;
76 
80  double opt_ls_s;
81 
86  double opt_ls_beta;
87 
91  double opt_ls_sigma;
92 
96  VectorXd opt_H0inv;
97 
98 private:
99 
104 // Wolfe m_lsearch;
105  Armijo m_lsearch;
106 
112  std::list<std::tuple<double,VectorXd,VectorXd>> m_hist;
113 
124  VectorXd hessFunc(double gamma, const VectorXd& d,
125  std::list<std::tuple<double,VectorXd,VectorXd>>::const_iterator it);
126 
127  VectorXd hessFuncTwoLoop(double gamma, const VectorXd& g);
128 };
129 
131 }
132 
133 #endif // LBFGS_H
Implementation of Armijo approximate line search algorithm.
Definition: linesearch.h:36
Definition: accessors.h:29
function< int(const VectorXd &x, double &v, VectorXd &g)> ValGradFunc
Value and Gradient Computation Function.
Definition: opt.h:41
VectorXd opt_H0inv
Default (initial) value for inverse hessian matrix.
Definition: lbfgs.h:96
StopReason optimize()
Perform LBFGS optimization.
int noopCallback(const VectorXd &x, double value, const VectorXd &grad, size_t iter)
Callback that does nothing.
Definition: opt.h:132
function< int(const VectorXd &x, VectorXd &g)> GradFunc
Gradient Only Computation Function.
Definition: opt.h:46
double opt_ls_sigma
Theshold for stopping linesearch.
Definition: lbfgs.h:91
function< int(const VectorXd &x, double &v)> ValFunc
Value Only Computation Function.
Definition: opt.h:51
LBFGSOpt(size_t dim, const ValFunc &valfunc, const GradFunc &gradfunc, const CallBackFunc &callback=noopCallback)
double opt_ls_beta
How quickly to reduce linesearch distance. Power function base, values closer to 0 will decrease step...
Definition: lbfgs.h:86
double opt_ls_s
Maximum step during line search.
Definition: lbfgs.h:80
StopReason
Definition: opt.h:141
Limited-Memory Broyden–Fletcher–Goldfarb–Shanno Algorithm based on "A Limited Memory Algorithm for...
Definition: lbfgs.h:45
void reset_history()
Reset the history.
Definition: lbfgs.h:69
int opt_histsize
Number of updates to store for the purposes of estimating the hessian matrix.
Definition: lbfgs.h:69
function< int(const VectorXd &x, double v, const VectorXd &g, size_t iter)> CallBackFunc
Callback function.
Definition: opt.h:56