ROL
ROL_TrustRegionStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_TRUSTREGIONSTEP_H
45 #define ROL_TRUSTREGIONSTEP_H
46 
47 #include "ROL_Step.hpp"
48 #include "ROL_Types.hpp"
49 #include "ROL_Secant.hpp"
50 #include "ROL_TrustRegion.hpp"
51 #include <sstream>
52 #include <iomanip>
53 
126 namespace ROL {
127 
128 template <class Real>
129 class TrustRegionStep : public Step<Real> {
130 private:
131 
132  Teuchos::RCP<Secant<Real> > secant_;
133  Teuchos::RCP<TrustRegion<Real> > trustRegion_;
134 
135  Teuchos::RCP<Vector<Real> > xnew_;
136  Teuchos::RCP<Vector<Real> > xold_;
137  Teuchos::RCP<Vector<Real> > gp_;
138 
141 
144 
146 
147  std::vector<bool> useInexact_;
148  int TRflag_ ;
149  int TR_nfval_;
150  int TR_ngrad_;
151  int CGflag_;
152  int CGiter_;
153 
154  Real delMax_;
155 
156  Real alpha_init_;
157  int max_fval_;
158 
159  Real scale0_;
160  Real scale1_;
161 
162  bool softUp_;
163  Real scaleEps_;
164 
180  AlgorithmState<Real> &algo_state ) {
181  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
182  if ( useInexact_[1] ) {
183  Real c = scale0_*std::max(1.e-2,std::min(1.0,1.e4*algo_state.gnorm));
184  Real gtol1 = c*(state->searchSize);
185  Real gtol0 = scale1_*gtol1 + 1.0;
186  while ( gtol0 > gtol1*scale1_ ) {
187  obj.gradient(*(state->gradientVec),x,gtol1);
188  algo_state.gnorm = computeCriticalityMeasure(*(state->gradientVec),x,con);
189  gtol0 = gtol1;
190  c = scale0_*std::max(1.e-2,std::min(1.0,1.e4*algo_state.gnorm));
191  gtol1 = c*std::min(algo_state.gnorm,state->searchSize);
192  }
193  algo_state.ngrad++;
194  }
195  else {
196  Real gtol = std::sqrt(ROL_EPSILON);
197  obj.gradient(*(state->gradientVec),x,gtol);
198  algo_state.ngrad++;
199  algo_state.gnorm = computeCriticalityMeasure(*(state->gradientVec),x,con);
200  }
201  }
202 
212  if ( con.isActivated() ) {
213  if ( useProjectedGrad_ ) {
214  gp_->set(g);
215  con.computeProjectedGradient( *gp_, x );
216  return gp_->norm();
217  }
218  else {
219  xnew_->set(x);
220  xnew_->axpy(-1.0,g.dual());
221  con.project(*xnew_);
222  xnew_->axpy(-1.0,x);
223  return xnew_->norm();
224  }
225  }
226  else {
227  return g.norm();
228  }
229  }
230 
231 public:
232 
233  virtual ~TrustRegionStep() {}
234 
242  TrustRegionStep( Teuchos::ParameterList & parlist )
243  : Step<Real>(),
244  secant_(Teuchos::null), trustRegion_(Teuchos::null),
245  xnew_(Teuchos::null), xold_(Teuchos::null), gp_(Teuchos::null),
246  etr_(TRUSTREGION_DOGLEG), esec_(SECANT_LBFGS),
247  useSecantHessVec_(false), useSecantPrecond_(false),
248  useProjectedGrad_(false),
249  TRflag_(0), TR_nfval_(0), TR_ngrad_(0),
250  CGflag_(0), CGiter_(0),
251  delMax_(1.e4),
252  alpha_init_(1.), max_fval_(20),
253  scale0_(1.), scale1_(1.),
254  softUp_(false), scaleEps_(1.) {
255  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
256  // Trust-Region Parameters
257  step_state->searchSize = parlist.sublist("Step").sublist("Trust Region").get("Initial Radius", -1.0);
258  delMax_ = parlist.sublist("Step").sublist("Trust Region").get("Maximum Radius", 1000.0);
259  // Inexactness Information
260  useInexact_.clear();
261  useInexact_.push_back(parlist.sublist("General").get("Inexact Objective Function", false));
262  useInexact_.push_back(parlist.sublist("General").get("Inexact Gradient", false));
263  useInexact_.push_back(parlist.sublist("General").get("Inexact Hessian-Times-A-Vector", false));
264  // Trust-Region Inexactness Parameters
265  scale0_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Tolerance Scaling",1.e-1);
266  scale1_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Relative Tolerance",2.0);
267  // Initialize Trust Region Subproblem Solver Object
268  etr_ = StringToETrustRegion(parlist.sublist("Step").sublist("Trust Region").get("Subproblem Solver","Dogleg"));
269  useProjectedGrad_ = parlist.sublist("General").get("Projected Gradient Criticality Measure", false);
270  max_fval_ = parlist.sublist("Step").sublist("Line Search").get("Function Evaluation Limit", 20);
271  alpha_init_ = parlist.sublist("Step").sublist("Line Search").get("Initial Step Size", 1.0);
272  trustRegion_ = TrustRegionFactory<Real>(parlist);
273  // Secant Object
274  esec_ = StringToESecant(parlist.sublist("General").sublist("Secant").get("Type","Limited-Memory BFGS"));
275  useSecantPrecond_ = parlist.sublist("General").sublist("Secant").get("Use as Preconditioner", false);
276  useSecantHessVec_ = parlist.sublist("General").sublist("Secant").get("Use as Hessian", false);
277  secant_ = SecantFactory<Real>(parlist);
278  // Changing Objective Functions
279  softUp_ = parlist.sublist("General").get("Variable Objective Function",false);
280  // Scale for epsilon active sets
281  scaleEps_ = parlist.sublist("General").get("Scale for Epsilon Active Sets",1.0);
282  }
283 
293  TrustRegionStep( Teuchos::RCP<Secant<Real> > &secant, Teuchos::ParameterList &parlist )
294  : Step<Real>(),
295  secant_(secant), trustRegion_(Teuchos::null),
296  xnew_(Teuchos::null), xold_(Teuchos::null), gp_(Teuchos::null),
298  useSecantHessVec_(false), useSecantPrecond_(false),
299  useProjectedGrad_(false),
300  TRflag_(0), TR_nfval_(0), TR_ngrad_(0),
301  CGflag_(0), CGiter_(0),
302  delMax_(1.e4),
303  alpha_init_(1.), max_fval_(20),
304  scale0_(1.), scale1_(1.),
305  softUp_(false), scaleEps_(1.) {
306  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
307  // Trust-Region Parameters
308  step_state->searchSize = parlist.sublist("Step").sublist("Trust Region").get("Initial Radius", -1.0);
309  delMax_ = parlist.sublist("Step").sublist("Trust Region").get("Maximum Radius", 1000.0);
310  // Inexactness Information
311  useInexact_.clear();
312  useInexact_.push_back(parlist.sublist("General").get("Inexact Objective Function", false));
313  useInexact_.push_back(parlist.sublist("General").get("Inexact Gradient", false));
314  useInexact_.push_back(parlist.sublist("General").get("Inexact Hessian-Times-A-Vector", false));
315  // Trust-Region Inexactness Parameters
316  scale0_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Tolerance Scaling",1.e-1);
317  scale1_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Relative Tolerance",2.0);
318  // Initialize Trust Region Subproblem Solver Object
319  etr_ = StringToETrustRegion(parlist.sublist("Step").sublist("Trust Region").get("Subproblem Solver","Dogleg"));
320  useProjectedGrad_ = parlist.sublist("General").get("Projected Gradient Criticality Measure", false);
321  max_fval_ = parlist.sublist("Step").sublist("Line Search").get("Function Evaluation Limit", 20);
322  alpha_init_ = parlist.sublist("Step").sublist("Line Search").get("Initial Step Size", 1.0);
323  trustRegion_ = TrustRegionFactory<Real>(parlist);
324  // Secant Object
325  useSecantPrecond_ = parlist.sublist("General").sublist("Secant").get("Use as Preconditioner", false);
326  useSecantHessVec_ = parlist.sublist("General").sublist("Secant").get("Use as Hessian", false);
327  // Changing Objective Functions
328  softUp_ = parlist.sublist("General").get("Variable Objective Function",false);
329  // Scale for epsilon active sets
330  scaleEps_ = parlist.sublist("General").get("Scale for Epsilon Active Sets",1.0);
331  }
332 
341  void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g,
343  AlgorithmState<Real> &algo_state ) {
344  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
345 
346  trustRegion_->initialize(x,s,g);
347 
348  Real htol = std::sqrt(ROL_EPSILON);
349  Real ftol = 0.1*ROL_OVERFLOW;
350 
351  step_state->descentVec = s.clone();
352  step_state->gradientVec = g.clone();
353 
354  if ( con.isActivated() ) {
355  con.project(x);
356  xnew_ = x.clone();
357  xold_ = x.clone();
358  }
359 
360  if ( con.isActivated() || secant_ != Teuchos::null ) {
361  gp_ = g.clone();
362  }
363 
364  // Update approximate gradient and approximate objective function.
365  obj.update(x,true,algo_state.iter);
366  updateGradient(x,obj,con,algo_state);
367  algo_state.snorm = 1.e10;
368  algo_state.value = obj.value(x,ftol);
369  algo_state.nfval++;
370 
371  // Evaluate Objective Function at Cauchy Point
372  if ( step_state->searchSize <= 0.0 ) {
373  Teuchos::RCP<Vector<Real> > Bg = g.clone();
374  if ( useSecantHessVec_ ) {
375  secant_->applyB(*Bg,(step_state->gradientVec)->dual(),x);
376  }
377  else {
378  obj.hessVec(*Bg,(step_state->gradientVec)->dual(),x,htol);
379  }
380  Real gBg = Bg->dot(*(step_state->gradientVec));
381  Real alpha = 1.0;
382  if ( gBg > ROL_EPSILON ) {
383  alpha = algo_state.gnorm*algo_state.gnorm/gBg;
384  }
385  // Evaluate the objective function at the Cauchy point
386  Teuchos::RCP<Vector<Real> > cp = s.clone();
387  cp->set((step_state->gradientVec)->dual());
388  cp->scale(-alpha);
389  Teuchos::RCP<Vector<Real> > xcp = x.clone();
390  xcp->set(x);
391  xcp->plus(*cp);
392  if ( con.isActivated() ) {
393  con.project(*xcp);
394  }
395  obj.update(*xcp);
396  Real fnew = obj.value(*xcp,ftol); // MUST DO SOMETHING HERE WITH FTOL
397  algo_state.nfval++;
398  // Perform cubic interpolation to determine initial trust region radius
399  Real gs = cp->dot((step_state->gradientVec)->dual());
400  Real a = fnew - algo_state.value - gs - 0.5*alpha*alpha*gBg;
401  if ( std::abs(a) < ROL_EPSILON ) {
402  // a = 0 implies the objective is quadratic in the negative gradient direction
403  step_state->searchSize = std::min(alpha*algo_state.gnorm,delMax_);
404  }
405  else {
406  Real b = 0.5*alpha*alpha*gBg;
407  Real c = gs;
408  if ( b*b-3.0*a*c > ROL_EPSILON ) {
409  // There is at least one critical point
410  Real t1 = (-b-std::sqrt(b*b-3.0*a*c))/(3.0*a);
411  Real t2 = (-b+std::sqrt(b*b-3.0*a*c))/(3.0*a);
412  if ( 6.0*a*t1 + 2.0*b > 0.0 ) {
413  // t1 is the minimizer
414  step_state->searchSize = std::min(t1*alpha*algo_state.gnorm,delMax_);
415  }
416  else {
417  // t2 is the minimizer
418  step_state->searchSize = std::min(t2*alpha*algo_state.gnorm,delMax_);
419  }
420  }
421  else {
422  step_state->searchSize = std::min(alpha*algo_state.gnorm,delMax_);
423  }
424  }
425  }
426  }
427 
439  AlgorithmState<Real> &algo_state ) {
440  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
441 
442  Real eps = 0.0;
443  if ( con.isActivated() ) {
444  eps = scaleEps_*algo_state.gnorm;
445  }
446  ProjectedObjective<Real> pObj(obj,con,secant_,useSecantPrecond_,useSecantHessVec_,eps);
447 
448  CGflag_ = 0;
449  CGiter_ = 0;
450  trustRegion_->run(s,algo_state.snorm,step_state->searchSize,CGflag_,CGiter_,
451  x,*(step_state->gradientVec),algo_state.gnorm,pObj);
452 
453  if ( con.isActivated() ) {
454  xnew_->set(x);
455  xnew_->plus(s);
456  con.project(*xnew_);
457  s.set(*xnew_);
458  s.axpy(-1.0,x);
459  }
460  }
461 
474  AlgorithmState<Real> &algo_state ) {
475  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
476 
477  Real tol = std::sqrt(ROL_EPSILON);
478 
479  Real eps = 0.0;
480  if ( con.isActivated() ) {
481  eps = algo_state.gnorm;
482  }
483  ProjectedObjective<Real> pObj(obj,con,secant_,useSecantPrecond_,useSecantHessVec_,eps);
484 
485  // Store previous step for constraint computations
486  if ( con.isActivated() ) {
487  xold_->set(x);
488  }
489 
490  // Update trust-region information;
491  // Performs a hard update on the objective function
492  TRflag_ = 0;
493  TR_nfval_ = 0;
494  TR_ngrad_ = 0;
495  Real fold = algo_state.value;
496  Real fnew = 0.0;
497  algo_state.iter++;
498  trustRegion_->update(x,fnew,state->searchSize,TR_nfval_,TR_ngrad_,TRflag_,
499  s,algo_state.snorm,fold,*(state->gradientVec),algo_state.iter,pObj);
500  algo_state.nfval += TR_nfval_;
501  algo_state.ngrad += TR_ngrad_;
502  algo_state.value = fnew;
503 
504  // If step is accepted ...
505  // Compute new gradient and update secant storage
506  if ( TRflag_ == 0 || TRflag_ == 1 ) {
507  // Perform line search (smoothing) to ensure decrease
508  if ( con.isActivated() ) {
509  // Compute new gradient
510  obj.gradient(*gp_,x,tol); // MUST DO SOMETHING HERE WITH TOL
511  algo_state.ngrad++;
512  // Compute smoothed step
513  Real alpha = 1.0;
514  xnew_->set(x);
515  xnew_->axpy(-alpha*alpha_init_,gp_->dual());
516  con.project(*xnew_);
517  // Compute new objective value
518  if ( softUp_ ) {
519  obj.update(*xnew_);
520  }
521  else {
522  obj.update(*xnew_,true,algo_state.iter);
523  }
524  Real ftmp = obj.value(*xnew_,tol); // MUST DO SOMETHING HERE WITH TOL
525  algo_state.nfval++;
526  // Perform smoothing
527  int cnt = 0;
528  alpha = 1.0/alpha_init_;
529  while ( (fnew-ftmp) <= 1.e-4*(fnew-fold) ) {
530  xnew_->set(x);
531  xnew_->axpy(-alpha*alpha_init_,gp_->dual());
532  con.project(*xnew_);
533  if ( softUp_ ) {
534  obj.update(*xnew_,false,algo_state.iter);
535  }
536  else {
537  obj.update(*xnew_,true,algo_state.iter);
538  }
539  ftmp = obj.value(*xnew_,tol); // MUST DO SOMETHING HERE WITH TOL
540  algo_state.nfval++;
541  if ( cnt >= max_fval_ ) {
542  break;
543  }
544  alpha *= 0.5;
545  cnt++;
546  }
547  // Store objective function and iteration information
548  fnew = ftmp;
549  x.set(*xnew_);
550  }
551  else {
552  if (softUp_) {
553  pObj.update(x,true,algo_state.iter);
554  }
555  }
556 
557  // Store previous gradient for secant update
558  if ( secant_ != Teuchos::null ) {
559  gp_->set(*(state->gradientVec));
560  }
561 
562  // Update objective function and approximate model
563  updateGradient(x,obj,con,algo_state);
564 
565  // Update secant information
566  if ( secant_ != Teuchos::null ) {
567  if ( con.isActivated() ) { // Compute new constrained step
568  xnew_->set(x);
569  xnew_->axpy(-1.0,*xold_);
570  secant_->update(*(state->gradientVec),*gp_,*xnew_,algo_state.snorm,algo_state.iter+1);
571  }
572  else {
573  secant_->update(*(state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
574  }
575  }
576 
577  // Update algorithm state
578  (algo_state.iterateVec)->set(x);
579  }
580  else { // Step was rejected
581  if ( softUp_ ) {
582  obj.update(x,true,algo_state.iter);
583  fnew = pObj.value(x,tol);
584  algo_state.nfval++;
585  algo_state.value = fnew;
586  }
587  }
588 
589  }
590 
595  std::string printHeader( void ) const {
596  std::stringstream hist;
597  hist << " ";
598  hist << std::setw(6) << std::left << "iter";
599  hist << std::setw(15) << std::left << "value";
600  hist << std::setw(15) << std::left << "gnorm";
601  hist << std::setw(15) << std::left << "snorm";
602  hist << std::setw(15) << std::left << "delta";
603  hist << std::setw(10) << std::left << "#fval";
604  hist << std::setw(10) << std::left << "#grad";
605  hist << std::setw(10) << std::left << "tr_flag";
606  if ( etr_ == TRUSTREGION_TRUNCATEDCG ) {
607  hist << std::setw(10) << std::left << "iterCG";
608  hist << std::setw(10) << std::left << "flagCG";
609  }
610  hist << "\n";
611  return hist.str();
612  }
613 
618  std::string printName( void ) const {
619  std::stringstream hist;
620  hist << "\n" << ETrustRegionToString(etr_) << " Trust-Region solver";
621  if ( useSecantPrecond_ || useSecantHessVec_ ) {
622  if ( useSecantPrecond_ && !useSecantHessVec_ ) {
623  hist << " with " << ESecantToString(esec_) << " preconditioning\n";
624  }
625  else if ( !useSecantPrecond_ && useSecantHessVec_ ) {
626  hist << " with " << ESecantToString(esec_) << " Hessian approximation\n";
627  }
628  else {
629  hist << " with " << ESecantToString(esec_) << " preconditioning and Hessian approximation\n";
630  }
631  }
632  else {
633  hist << "\n";
634  }
635  return hist.str();
636  }
637 
645  std::string print( AlgorithmState<Real> & algo_state, bool print_header = false ) const {
646  const Teuchos::RCP<const StepState<Real> >& step_state = Step<Real>::getStepState();
647 
648  std::stringstream hist;
649  hist << std::scientific << std::setprecision(6);
650  if ( algo_state.iter == 0 ) {
651  hist << printName();
652  }
653  if ( print_header ) {
654  hist << printHeader();
655  }
656  if ( algo_state.iter == 0 ) {
657  hist << " ";
658  hist << std::setw(6) << std::left << algo_state.iter;
659  hist << std::setw(15) << std::left << algo_state.value;
660  hist << std::setw(15) << std::left << algo_state.gnorm;
661  hist << std::setw(15) << std::left << " ";
662  hist << std::setw(15) << std::left << step_state->searchSize;
663  hist << "\n";
664  }
665  else {
666  hist << " ";
667  hist << std::setw(6) << std::left << algo_state.iter;
668  hist << std::setw(15) << std::left << algo_state.value;
669  hist << std::setw(15) << std::left << algo_state.gnorm;
670  hist << std::setw(15) << std::left << algo_state.snorm;
671  hist << std::setw(15) << std::left << step_state->searchSize;
672  hist << std::setw(10) << std::left << algo_state.nfval;
673  hist << std::setw(10) << std::left << algo_state.ngrad;
674  hist << std::setw(10) << std::left << TRflag_;
675  if ( etr_ == TRUSTREGION_TRUNCATEDCG ) {
676  hist << std::setw(10) << std::left << CGiter_;
677  hist << std::setw(10) << std::left << CGflag_;
678  }
679  hist << "\n";
680  }
681  return hist.str();
682  }
683 
684 }; // class Step
685 
686 } // namespace ROL
687 
688 #endif
Provides the interface to evaluate objective functions.
ESecant esec_
Secant type.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:213
Real value(const Vector< Real > &x, Real &tol)
Compute value.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:143
bool useSecantPrecond_
Flag whether to use a secant preconditioner.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:67
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
Teuchos::RCP< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:72
Contains definitions of custom data types in ROL.
Real alpha_init_
Initial line-search parameter for projected methods.
virtual Teuchos::RCP< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
TrustRegionStep(Teuchos::RCP< Secant< Real > > &secant, Teuchos::ParameterList &parlist)
Constructor.
ESecant StringToESecant(std::string s)
Definition: ROL_Types.hpp:438
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:74
Teuchos::RCP< Vector< Real > > gp_
Container for previous gradient vector.
Teuchos::RCP< Vector< Real > > xold_
Container for previous iteration vector.
Teuchos::RCP< Secant< Real > > secant_
Container for secant approximation.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step.
std::string printName(void) const
Print step name.
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:77
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
bool isActivated(void)
Check if bounds are on.
ETrustRegion etr_
Trust-region subproblem solver type.
std::string printHeader(void) const
Print iterate header.
Teuchos::RCP< Vector< Real > > xnew_
Container for updated iteration vector.
ESecant
Enumeration of secant update algorithms.
Definition: ROL_Types.hpp:381
int CGflag_
Truncated CG termination flag.
virtual Teuchos::RCP< const StepState< Real > > getStepState(void) const
Get state for step object.
Definition: ROL_Step.hpp:192
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful.
Real computeCriticalityMeasure(const Vector< Real > &g, const Vector< Real > &x, BoundConstraint< Real > &con)
Compute the criticality measure.
bool useSecantHessVec_
Flag whether to use a secant Hessian.
std::vector< bool > useInexact_
Contains flags for inexact (0) objective function, (1) gradient, (2) Hessian.
Provides interface for and implements limited-memory secant operators.
Definition: ROL_Secant.hpp:68
Real scale0_
Scale for inexact gradient computation.
void initialize(Vector< Real > &x, const Vector< Real > &s, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step.
Real delMax_
Maximum trust-region radius.
TrustRegionStep(Teuchos::ParameterList &parlist)
Constructor.
std::string print(AlgorithmState< Real > &algo_state, bool print_header=false) const
Print iterate status.
Provides the interface to apply upper and lower bound constraints.
int TR_ngrad_
Trust-region gradient evaluation counter.
void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
void computeProjectedGradient(Vector< Real > &g, const Vector< Real > &x)
Compute projected gradient.
ETrustRegion StringToETrustRegion(std::string s)
Definition: ROL_Types.hpp:831
void updateGradient(Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update gradient to iteratively satisfy inexactness condition.
int TR_nfval_
Trust-region function evaluation counter.
Teuchos::RCP< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:91
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:196
virtual Real norm() const =0
Returns where .
int max_fval_
Maximum function evaluations in line-search for projected methods.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
int TRflag_
Trust-region exit flag.
ETrustRegion
Enumeration of trust-region solver types.
Definition: ROL_Types.hpp:777
std::string ETrustRegionToString(ETrustRegion tr)
Definition: ROL_Types.hpp:785
int CGiter_
Truncated CG iteration count.
std::string ESecantToString(ESecant tr)
Definition: ROL_Types.hpp:390
static const double ROL_OVERFLOW
Platform-dependent maximum double.
Definition: ROL_Types.hpp:126
bool useProjectedGrad_
Flag whether to use the projected gradient criticality measure.
Teuchos::RCP< TrustRegion< Real > > trustRegion_
Container for trust-region object.
Real scale1_
Scale for inexact gradient computation.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
Provides the interface to compute optimization steps with trust regions.
static const double ROL_EPSILON
Platform-dependent machine epsilon.
Definition: ROL_Types.hpp:118