ROL
ROL_FletcherStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_FLETCHERSTEP_H
45 #define ROL_FLETCHERSTEP_H
46 
47 #include "ROL_FletcherBase.hpp"
48 #include "ROL_Step.hpp"
49 #include "ROL_TrustRegionStep.hpp"
50 #include "ROL_LineSearchStep.hpp"
51 #include "ROL_Types.hpp"
52 #include "ROL_ParameterList.hpp"
59 namespace ROL {
60 
61 template <class Real>
62 class FletcherStep : public Step<Real> {
63 private:
64  ROL::Ptr<Step<Real> > step_;
65  ROL::Ptr<BoundConstraint<Real> > bnd_;
66 
67  ROL::ParameterList parlist_;
68 
69  ROL::Ptr<Vector<Real> > x_;
70 
71  // Lagrange multiplier update
76  // Subproblem information
77  bool print_;
78  std::string subStep_;
79 
80  Real delta_;
81  Real deltaMin_;
84 
86 
87  ROL::Ptr<Vector<Real> > g_;
88 
90 
91  // For printing output
92  mutable bool isDeltaChanged_;
93  mutable bool isPenaltyChanged_;
94 
96 
97  mutable int stepHeaderLength_; // For formatting
98 
100  BoundConstraint<Real> &bnd) {
101  Real gnorm = 0.;
102  // Compute norm of projected gradient
103  if (bnd.isActivated()) {
104  x_->set(x);
105  x_->axpy(-1.,g.dual());
106  bnd.project(*x_);
107  x_->axpy(-1.,x);
108  gnorm = x_->norm();
109  }
110  else {
111  gnorm = g.norm();
112  }
113  return gnorm;
114  }
115 
116 public:
117 
119  using Step<Real>::compute;
120  using Step<Real>::update;
121 
123 
124  FletcherStep(ROL::ParameterList &parlist)
125  : Step<Real>(), bnd_activated_(false), numSuccessSteps_(0),
127  Real zero(0), one(1), two(2), oe8(1.e8), oe1(1.e-1), oem6(1e-6), oem8(1.e-8);
128 
129  ROL::ParameterList& sublist = parlist.sublist("Step").sublist("Fletcher");
130  Step<Real>::getState()->searchSize = sublist.get("Penalty Parameter",one);
131  delta_ = sublist.get("Regularization Parameter",zero);
132  deltaMin_ = sublist.get("Min Regularization Parameter",oem8);
133  deltaUpdate_ = sublist.get("Regularization Parameter Decrease Factor", oe1);
134  // penalty parameters
135  penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", two);
136  modifyPenalty_ = sublist.get("Modify Penalty Parameter", false);
137  maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
138  minPenaltyParam_ = sublist.get("Minimum Penalty Parameter", oem6);
139 
140  subStep_ = sublist.get("Subproblem Solver", "Trust Region");
141 
142  parlist_ = parlist;
143  }
144 
149  AlgorithmState<Real> &algo_state ) {
150  bnd_ = ROL::makePtr<BoundConstraint<Real>>();
151  bnd_->deactivate();
152  initialize(x,g,l,c,obj,con,*bnd_,algo_state);
153  }
154 
159  AlgorithmState<Real> &algo_state ) {
160  // Determine what kind of step
161  bnd_activated_ = bnd.isActivated();
162 
163  ROL::ParameterList trlist(parlist_);
164  bool inexactFletcher = trlist.sublist("Step").sublist("Fletcher").get("Inexact Solves", false);
165  if( inexactFletcher ) {
166  trlist.sublist("General").set("Inexact Objective Value", true);
167  trlist.sublist("General").set("Inexact Gradient", true);
168  }
169  if( bnd_activated_ ) {
170  trlist.sublist("Step").sublist("Trust Region").set("Subproblem Model", "Coleman-Li");
171  }
172 
173  if ( subStep_ == "Line Search" ) {
174  step_ = makePtr<LineSearchStep<Real>>(trlist);
175  }
176  else {
177  step_ = makePtr<TrustRegionStep<Real>>(trlist);
178  }
179  etr_ = StringToETrustRegion(parlist_.sublist("Step").sublist("Trust Region").get("Subproblem Solver", "Truncated CG"));
180 
181  // Initialize class members
182  g_ = g.clone();
183  x_ = x.clone();
184 
185  // Rest of initialize
186  FletcherBase<Real>& fletcher = dynamic_cast<FletcherBase<Real>&>(obj);
187 
188  tr_algo_state_.iterateVec = x.clone();
189  tr_algo_state_.minIterVec = x.clone();
190  tr_algo_state_.lagmultVec = l.clone();
191 
192  step_->initialize(x, g, obj, bnd, tr_algo_state_);
193 
194  // Initialize step state
195  ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
196  state->descentVec = x.clone();
197  state->gradientVec = g.clone();
198  state->constraintVec = c.clone();
199  // Initialize the algorithm state
200  algo_state.nfval = 0;
201  algo_state.ncval = 0;
202  algo_state.ngrad = 0;
203 
204  algo_state.value = fletcher.getObjectiveValue(x);
205  algo_state.gnorm = computeProjGradientNorm(*(fletcher.getLagrangianGradient(x)),
206  x, bnd);
207  algo_state.aggregateGradientNorm = tr_algo_state_.gnorm;
208 
209  state->constraintVec->set(*(fletcher.getConstraintVec(x)));
210  algo_state.cnorm = (state->constraintVec)->norm();
211  // Update evaluation counters
212  algo_state.ncval = fletcher.getNumberConstraintEvaluations();
213  algo_state.nfval = fletcher.getNumberFunctionEvaluations();
214  algo_state.ngrad = fletcher.getNumberGradientEvaluations();
215  }
216 
219  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
220  Objective<Real> &obj, Constraint<Real> &con,
221  AlgorithmState<Real> &algo_state ) {
222  compute(s,x,l,obj,con,*bnd_, algo_state);
223  }
224 
227  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
228  Objective<Real> &obj, Constraint<Real> &con,
229  BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) {
230  step_->compute( s, x, obj, bnd, tr_algo_state_ );
231  }
232 
237  AlgorithmState<Real> &algo_state ) {
238  update(x,l,s,obj,con,*bnd_, algo_state);
239  }
240 
246  AlgorithmState<Real> &algo_state ) {
247 
248  // This should be in print, but this will not work there
249  isDeltaChanged_ = false;
250  isPenaltyChanged_ = false;
251  bool modified = false;
252 
253  FletcherBase<Real> &fletcher = dynamic_cast<FletcherBase<Real>&>(obj);
254  ROL::Ptr<StepState<Real> > fletcherState = Step<Real>::getState();
255  const ROL::Ptr<const StepState<Real> > state = step_->getStepState();
256 
257  step_->update(x,s,obj,bnd,tr_algo_state_);
258  numSuccessSteps_ += (state->flag == 0);
259 
260  Real gPhiNorm = tr_algo_state_.gnorm;
261  Real cnorm = (fletcherState->constraintVec)->norm();
262  bool too_infeasible = cnorm > static_cast<Real>(100.)*gPhiNorm;
263  bool too_feasible = cnorm < static_cast<Real>(1e-2)*gPhiNorm;
264 
265  if( too_infeasible && !modified && modifyPenalty_ && numSuccessSteps_ > 1 ) {
266  Real penaltyParam = Step<Real>::getStepState()->searchSize;
267  if( penaltyParam >= maxPenaltyParam_ ) {
268  // Penalty parameter too large, exit
269  algo_state.flag = true;
270  }
271  penaltyParam *= penaltyUpdate_;
272  penaltyParam = std::min(penaltyParam, maxPenaltyParam_);
273  fletcher.setPenaltyParameter(penaltyParam);
274  Step<Real>::getState()->searchSize = penaltyParam;
275  isPenaltyChanged_ = true;
276  modified = true;
277  }
278 
279  if( too_feasible && !modified && modifyPenalty_ && numSuccessSteps_ > 1 ) {
280  Real penaltyParam = Step<Real>::getStepState()->searchSize;
281  if( penaltyParam <= minPenaltyParam_ ) {
282  // Penalty parameter too small, exit (this is unlikely)
283  algo_state.flag = true;
284  }
285  penaltyParam /= penaltyUpdate_;
286  penaltyParam = std::max(penaltyParam, minPenaltyParam_);
287  fletcher.setPenaltyParameter(penaltyParam);
288  Step<Real>::getState()->searchSize = penaltyParam;
289  isPenaltyChanged_ = true;
290  modified = true;
291  }
292 
293  if( delta_ > deltaMin_ && !modified ) {
294  Real deltaNext = delta_ * deltaUpdate_;
295  if( gPhiNorm < deltaNext ) {
296  delta_ = deltaNext;
297  fletcher.setDelta(deltaNext);
298  isDeltaChanged_ = true;
299  modified = true;
300  }
301  }
302 
303  if( modified ) {
304  // Penalty function has been changed somehow, need to recompute
305  Real tol = static_cast<Real>(1e-12);
306  tr_algo_state_.value = fletcher.value(x, tol);
307  fletcher.gradient(*g_, x, tol);
308 
309  tr_algo_state_.nfval++;
310  tr_algo_state_.ngrad++;
311  tr_algo_state_.ncval++;
312  tr_algo_state_.minIter = tr_algo_state_.iter;
313  tr_algo_state_.minValue = tr_algo_state_.value;
314  tr_algo_state_.gnorm = computeProjGradientNorm(*g_, x, bnd);
315  }
316 
317  // Update the step and store in state
318  algo_state.iterateVec->set(x);
319  algo_state.iter++;
320 
321  fletcherState->descentVec->set(s);
322  fletcherState->gradientVec->set(*(fletcher.getLagrangianGradient(x)));
323  fletcherState->constraintVec->set(*(fletcher.getConstraintVec(x)));
324 
325  // Update objective function value
326  algo_state.value = fletcher.getObjectiveValue(x);
327  // Update constraint value
328  algo_state.cnorm = (fletcherState->constraintVec)->norm();
329  // Update the step size
330  algo_state.snorm = tr_algo_state_.snorm;
331  // Compute gradient of the Lagrangian
332  algo_state.gnorm = computeProjGradientNorm(*(fletcherState->gradientVec),
333  x, bnd);
334  // Compute gradient of penalty function
335  algo_state.aggregateGradientNorm = tr_algo_state_.gnorm;
336  // Update evaluation countersgetConstraintVec
337  algo_state.nfval = fletcher.getNumberFunctionEvaluations();
338  algo_state.ngrad = fletcher.getNumberGradientEvaluations();
339  algo_state.ncval = fletcher.getNumberConstraintEvaluations();
340  // Update objective function and constraints
341  // fletcher.update(x,true,algo_state.iter);
342  // bnd.update(x,true,algo_state.iter);
343  // Update multipliers
344  algo_state.lagmultVec->set(*(fletcher.getMultiplierVec(x)));
345  }
346 
349  std::string printHeader( void ) const {
350  std::stringstream hist;
351  if( subStep_ == "Trust Region" ) {
352  hist << " ";
353  hist << std::setw(6) << std::left << "iter";
354  hist << std::setw(15) << std::left << "merit";
355  hist << std::setw(15) << std::left << "fval";
356  hist << std::setw(15) << std::left << "gpnorm";
357  hist << std::setw(15) << std::left << "gLnorm";
358  hist << std::setw(15) << std::left << "cnorm";
359  hist << std::setw(15) << std::left << "snorm";
360  hist << std::setw(15) << std::left << "tr_radius";
361  hist << std::setw(10) << std::left << "tr_flag";
362  if ( etr_ == TRUSTREGION_TRUNCATEDCG && subStep_ == "Trust Region") {
363  hist << std::setw(10) << std::left << "iterCG";
364  hist << std::setw(10) << std::left << "flagCG";
365  }
366  hist << std::setw(15) << std::left << "penalty";
367  hist << std::setw(15) << std::left << "delta";
368  hist << std::setw(10) << std::left << "#fval";
369  hist << std::setw(10) << std::left << "#grad";
370  hist << std::setw(10) << std::left << "#cval";
371  hist << "\n";
372  }
373  else {
374  std::string stepHeader = step_->printHeader();
375  stepHeaderLength_ = stepHeader.length();
376  hist << stepHeader.substr(0, stepHeaderLength_-1);
377  hist << std::setw(15) << std::left << "fval";
378  hist << std::setw(15) << std::left << "gLnorm";
379  hist << std::setw(15) << std::left << "cnorm";
380  hist << std::setw(15) << std::left << "penalty";
381  hist << std::setw(15) << std::left << "delta";
382  hist << std::setw(10) << std::left << "#cval";
383  hist << "\n";
384  }
385  return hist.str();
386  }
387 
390  std::string printName( void ) const {
391  std::stringstream hist;
392  hist << "\n" << " Fletcher solver : " << subStep_;
393  hist << "\n";
394  return hist.str();
395  }
396 
399  std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
400  std::string stepHist = step_->print( tr_algo_state_, false );
401  stepHist.erase(std::remove(stepHist.end()-3, stepHist.end(),'\n'), stepHist.end());
402  std::string name = step_->printName();
403  size_t pos = stepHist.find(name);
404  if ( pos != std::string::npos ) {
405  stepHist.erase(pos, name.length());
406  }
407 
408  std::stringstream hist;
409  hist << std::scientific << std::setprecision(6);
410  if ( algo_state.iter == 0 ) {
411  hist << printName();
412  }
413  if ( pHeader ) {
414  hist << printHeader();
415  }
416 
417  std::string penaltyString = getValueString( Step<Real>::getStepState()->searchSize, isPenaltyChanged_ );
418  std::string deltaString = getValueString( delta_, isDeltaChanged_ );
419 
420  if( subStep_ == "Trust Region" ) {
421  hist << " ";
422  hist << std::setw(6) << std::left << algo_state.iter;
423  hist << std::setw(15) << std::left << tr_algo_state_.value;
424  hist << std::setw(15) << std::left << algo_state.value;
425  hist << std::setw(15) << std::left << tr_algo_state_.gnorm;
426  hist << std::setw(15) << std::left << algo_state.gnorm;
427  hist << std::setw(15) << std::left << algo_state.cnorm;
428  hist << std::setw(15) << std::left << stepHist.substr(38,15); // snorm
429  hist << std::setw(15) << std::left << stepHist.substr(53,15); // tr_radius
430  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(88,10)); // tr_flag
431  if ( etr_ == TRUSTREGION_TRUNCATEDCG && subStep_ == "Trust Region") {
432  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(93,10)); // iterCG
433  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(103,10)); // flagCG
434  }
435  hist << std::setw(15) << std::left << penaltyString;
436  hist << std::setw(15) << std::left << deltaString;
437  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(68,10)); // #fval
438  hist << std::setw(10) << std::left << (algo_state.iter == 0 ? "" : stepHist.substr(78,10)); // #gval
439  hist << std::setw(10) << std::left << algo_state.ncval;
440  hist << "\n";
441  } else {
442  hist << std::setw(stepHeaderLength_-1) << std::left << stepHist;
443  hist << std::setw(15) << std::left << algo_state.value;
444  hist << std::setw(15) << std::left << algo_state.gnorm;
445  hist << std::setw(15) << std::left << algo_state.cnorm;
446  hist << std::setw(15) << std::left << penaltyString;
447  hist << std::setw(15) << std::left << deltaString;
448  hist << std::setw(10) << std::left << algo_state.ncval;
449  hist << "\n";
450  }
451 
452  return hist.str();
453  }
454 
455  std::string getValueString( const Real value, const bool print ) const {
456  std::stringstream valString;
457  valString << std::scientific << std::setprecision(6);
458  if( print ) {
459  valString << std::setw(15) << std::left << value;
460  } else {
461  valString << std::setw(15) << "";
462  }
463  return valString.str();
464  }
465 
471  AlgorithmState<Real> &algo_state ) {}
472 
478  AlgorithmState<Real> &algo_state ) {}
479 
480 }; // class FletcherStep
481 
482 } // namespace ROL
483 
484 #endif
Objective_SerialSimOpt(const Ptr< Obj > &obj, const V &ui) z0_ zero()
Contains definitions of custom data types in ROL.
Provides the interface to apply upper and lower bound constraints.
bool isActivated(void) const
Check if bounds are on.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
Defines the general constraint operator interface.
const Ptr< Vector< Real > > getMultiplierVec(const Vector< Real > &x)
int getNumberConstraintEvaluations() const
int getNumberGradientEvaluations() const
void setDelta(Real delta)
Real getObjectiveValue(const Vector< Real > &x)
const Ptr< Vector< Real > > getLagrangianGradient(const Vector< Real > &x)
const Ptr< Vector< Real > > getConstraintVec(const Vector< Real > &x)
int getNumberFunctionEvaluations() const
void setPenaltyParameter(Real sigma)
Provides the interface to compute Fletcher steps.
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality and bound constraints).
std::string printHeader(void) const
Print iterate header.
ROL::Ptr< BoundConstraint< Real > > bnd_
ROL::ParameterList parlist_
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements,...
FletcherStep(ROL::ParameterList &parlist)
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraint).
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements,...
ROL::Ptr< Step< Real > > step_
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality and bound constraints.
std::string getValueString(const Real value, const bool print) const
std::string printName(void) const
Print step name.
Real computeProjGradientNorm(const Vector< Real > &g, const Vector< Real > &x, BoundConstraint< Real > &bnd)
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
ROL::Ptr< Vector< Real > > g_
AlgorithmState< Real > tr_algo_state_
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality and bound constraints).
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraint).
ROL::Ptr< Vector< Real > > x_
Provides the interface to evaluate objective functions.
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:68
ROL::Ptr< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:73
const ROL::Ptr< const StepState< Real > > getStepState(void) const
Get state for step object.
Definition: ROL_Step.hpp:211
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:84
virtual Real norm() const =0
Returns where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis,...
Definition: ROL_Vector.hpp:226
ROL::Objective_SerialSimOpt Objective_SimOpt value(const V &u, const V &z, Real &tol) override
ETrustRegion StringToETrustRegion(std::string s)
@ TRUSTREGION_TRUNCATEDCG
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:143
ROL::Ptr< Vector< Real > > lagmultVec
Definition: ROL_Types.hpp:158
ROL::Ptr< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:157