{-# LINE 1 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}---------------------------------------------------------------------------{-# LINE 2 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}-- | Module : Numeric.Statistics.Dirichlet.Mixture-- Copyright : (c) 2009 Felipe Lessa-- License : GPL---- Maintainer : felipe.lessa@gmail.com-- Stability : experimental-- Portability : portable---- This module implements the algorithms described by Hager and-- Zhang [1]. We use bindings to @CG_DESCENT@ library by the same-- authors, version 3.0 from 18\/05\/2008 [2]. The library code is-- also licensed under the terms of the GPL.---- * [1] Hager, W. W. and Zhang, H. /A new conjugate gradient/-- /method with guaranteed descent and an efficient line/-- /search./ Society of Industrial and Applied Mathematics-- Journal on Optimization, 16 (2005), 170-192.---- * [2] <http://www.math.ufl.edu/~hager/papers/CG/CG_DESCENT-C-3.0.tar.gz>----------------------------------------------------------------------------moduleNumeric.Optimization.Algorithms.HagerZhang05(-- * Main function-- $mainFunctionoptimize-- ** User-defined function types,Function(..),Gradient(..),Combined(..),PointMVector,GradientMVector-- ** Kinds of function types,Simple,Mutable-- * Result and statistics,Result(..),Statistics(..)-- * Options,defaultParameters,Parameters(..),Verbose(..),LineSearch(..),StopRules(..),EstimateError(..)-- * Technical parameters,TechParameters(..))whereimportqualifiedData.Vector.GenericasGimportqualifiedData.Vector.Generic.MutableasGMimportqualifiedData.Vector.StorableasSimportqualifiedData.Vector.Storable.MutableasSMimportControl.Exception(bracket)importControl.Monad.Primitive(PrimMonad(..))importForeignimportForeign.C{-# LINE 61 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}-- $mainFunction-- Please pay close attention to the types of @Vector@s and-- @MVetor@s being used below. They may come from-- "Data.Vector.Generic"/"Data.Vector.Generic.Mutable" or from-- "Data.Vector.Storable"/"Data.Vector.Storable.Mutable". The-- rule of thumb is that input pure vectors are @Generic@ and-- everything else is @Storable@.-- | Run the @CG_DESCENT@ optimizer and try to minimize the-- function.optimize::(G.VectorvDouble)=>Parameters-- ^ How should we optimize.->Double-- ^ @grad_tol@, see 'stopRules'.->vDouble-- ^ Initial guess.->Functiont1-- ^ Function to be minimized.->Gradientt2-- ^ Gradient of the function.->Maybe(Combinedt3)-- ^ (Optional) Combined function computing-- both the function and its gradient.->IO(S.VectorDouble,Result,Statistics)optimizeparamsgrad_tolinitialfgc=do-- Mutable vector used for initial guess and final solution.letn=G.lengthinitialx<-GM.unstream$G.streaminitial-- Convert user-provided functions.letmf=mutableFfmg=mutableGgmc=maybe(combinemfmg)mutableCccf=prepareFmfcg=prepareGmgcc=prepareCmc-- Allocate everything.(ret,stats)<-SM.unsafeWithx$\x_ptr->alloca$\stats_ptr->alloca$\param_ptr->bracket(mkCFunctioncf)freeHaskellFunPtr$\cf_ptr->bracket(mkCGradientcg)freeHaskellFunPtr$\cg_ptr->bracket(mkCCombinedcc)freeHaskellFunPtr$\cc_ptr->allocateWorkSpacen$\work_ptr->do-- Go to C land.pokeparam_ptrparamsret<-cg_descentx_ptr(fromIntegraln)stats_ptrparam_ptrgrad_tolcf_ptrcg_ptrcc_ptrwork_ptrstats<-peekstats_ptrreturn(intToResultret,stats)-- Retrive solution and return.x'<-G.unsafeFreezexreturn$ret`seq`(x',ret,stats)-- | Allocates enough work space for CG_DESCENT. If the number-- of dimensions is "small enough" then we allocate on the stack,-- otherwise we allocate via malloc.allocateWorkSpace::Int->(PtrDouble->IOa)->IOaallocateWorkSpacen|size<threshold=allocaBytessize|otherwise=bracket(mallocBytessize)freewheresize=4*n*sizeOf(undefined::Double)threshold=4096-- gives room to 128 dimensionstypeCFunction=PtrDouble->CInt->IODoubletypeCGradient=PtrDouble->PtrDouble->CInt->IO()typeCCombined=PtrDouble->PtrDouble->CInt->IODoubleforeignimportccallsafe"cg_user.h"cg_descent::PtrDouble->CInt->PtrStatistics->PtrParameters->Double->FunPtrCFunction->FunPtrCGradient->FunPtrCCombined->PtrDouble->IOCIntforeignimportccall"wrapper"mkCFunction::CFunction->IO(FunPtrCFunction)foreignimportccall"wrapper"mkCGradient::CGradient->IO(FunPtrCGradient)foreignimportccall"wrapper"mkCCombined::CCombined->IO(FunPtrCCombined)-- | Phantom type for simple pure functions.dataSimple-- | Phantom type for functions using mutable data.dataMutable-- | Mutable vector representing the point where the-- function\/gradient is begin evaluated. This vector /should/-- /not/ be modified.typePointMVectorm=SM.MVector(PrimStatem)Double-- | Mutable vector representing where the gradient should be-- /written/.typeGradientMVectorm=SM.MVector(PrimStatem)Double-- | Function calculating the value of the objective function @f@-- at a point @x@.dataFunctiontwhereVFunction::G.VectorvDouble=>(vDouble->Double)->FunctionSimpleMFunction::(forallm.PrimMonadm=>PointMVectorm->mDouble)->FunctionMutablemutableF::Functiont->FunctionMutablemutableF(VFunctionf)=MFunctionf'wheref'mx=do-- Copy the input to an immutable vector.lets=GM.lengthmxmz<-GM.newsletgoi|i>s=return()|otherwise=GM.unsafeReadmxi>>=GM.unsafeWritemzi>>go(i+1)go0z<-G.unsafeFreezemz-- Run the user function.return(fz)mutableF(MFunctionf)=MFunctionfprepareF::FunctionMutable->CFunctionprepareF(MFunctionf)=\x_ptrn->doletn'=fromIntegralnx_fptr<-newForeignPtr_x_ptrf(SM.unsafeFromForeignPtrx_fptr0n')prepareF_=error"HagerZhang05.prepareF: never here"-- | Function calculating the value of the gradient of the-- objective function @f@ at a point @x@.---- The 'MGradient' constructor uses a function receiving as-- parameters the point @x@ being evaluated (should not be-- modified) and the vector where the gradient should be written.dataGradienttwhereVGradient::G.VectorvDouble=>(vDouble->vDouble)->GradientSimpleMGradient::(forallm.PrimMonadm=>PointMVectorm->GradientMVectorm->m())->GradientMutablemutableG::Gradientt->GradientMutablemutableG(VGradientf)=MGradientf'wheref'mxmret=do-- Copy the input to an immutable vector.lets=GM.lengthmxmz<-GM.newsletgoi|i>s=return()|otherwise=GM.unsafeReadmxi>>=GM.unsafeWritemzi>>go(i+1)go0z<-G.unsafeFreezemz-- Run the user function.let!r=fz-- Copy the output to an immutable vectorlets'=mins(G.lengthr)go'i|i>s'=return()|otherwise=let!x=G.unsafeIndexriinGM.unsafeWritemretix>>go(i+1)go'0mutableG(MGradientf)=MGradientfprepareG::GradientMutable->CGradientprepareG(MGradientf)=\ret_ptrx_ptrn->doletn'=fromIntegralnx_fptr<-newForeignPtr_x_ptrret_fptr<-newForeignPtr_ret_ptrf(SM.unsafeFromForeignPtrx_fptr0n')(SM.unsafeFromForeignPtrret_fptr0n')prepareG_=error"HagerZhang05.prepareG: never here"-- | Function calculating both the value of the objective-- function @f@ and its gradient at a point @x@.dataCombinedtwhereVCombined::G.VectorvDouble=>(vDouble->(Double,vDouble))->CombinedSimpleMCombined::(forallm.PrimMonadm=>PointMVectorm->GradientMVectorm->mDouble)->CombinedMutablemutableC::Combinedt->CombinedMutablemutableC(VCombinedf)=MCombinedf'wheref'mxmret=do-- Copy the input to an immutable vector.lets=GM.lengthmxmz<-GM.newsletgoi|i>s=return()|otherwise=GM.unsafeReadmxi>>=GM.unsafeWritemzi>>go(i+1)go0z<-G.unsafeFreezemz-- Run the user function.let!(v,r)=fz-- Copy the output to an immutable vectorlets'=mins(G.lengthr)go'i|i>s'=return()|otherwise=let!x=G.unsafeIndexriinGM.unsafeWritemretix>>go(i+1)go'0-- Return the valuereturnvmutableC(MCombinedf)=MCombinedfprepareC::CombinedMutable->CCombinedprepareC(MCombinedf)=\ret_ptrx_ptrn->doletn'=fromIntegralnx_fptr<-newForeignPtr_x_ptrret_fptr<-newForeignPtr_ret_ptrf(SM.unsafeFromForeignPtrx_fptr0n')(SM.unsafeFromForeignPtrret_fptr0n')prepareC_=error"HagerZhang05.prepareC: never here"-- | Combine two separated functions into a single, combined one.-- This is always a win for us since we save one jump from C to-- Haskell land.combine::FunctionMutable->GradientMutable->CombinedMutablecombine(MFunctionf)(MGradientg)=MCombined$\mxmret->gmxmret>>fmxcombine__=error"HagerZhang05.combine: never here"dataResult=ToleranceStatisfied-- ^ Convergence tolerance was satisfied.|FunctionChange-- ^ Change in function value was less than @funcEpsilon *-- |f|@.|MaxTotalIter-- ^ Total iterations exceeded @maxItersFac * n@.|NegativeSlope-- ^ Slope was always negative in line search.|MaxSecantIter-- ^ Number of secant iterations exceed nsecant.|NotDescent-- ^ Search direction not a descent direction.|LineSearchFailsInitial-- ^ Line search fails in initial interval.|LineSearchFailsBisection-- ^ Line search fails during bisection.|LineSearchFailsUpdate-- ^ Line search fails during interval update.|DebugTol-- ^ Debug tolerance was on and the test failed (see 'debugTol').|FunctionValueNaN-- ^ Function value became @NaN@.|StartFunctionValueNaN-- ^ Initial function value was @NaN@.deriving(Eq,Ord,Show,Read,Enum)intToResult::CInt->ResultintToResult(-2)=FunctionValueNaNintToResult(-1)=StartFunctionValueNaNintToResult0=ToleranceStatisfiedintToResult1=FunctionChangeintToResult2=MaxTotalIterintToResult3=NegativeSlopeintToResult4=MaxSecantIterintToResult5=NotDescentintToResult6=LineSearchFailsInitialintToResult7=LineSearchFailsBisectionintToResult8=LineSearchFailsUpdateintToResult9=DebugTolintToResult10=error$"HagerZhang05.intToResult: out of memory?! how?!"intToResultx=error$"HagerZhang05.intToResult: unknown value "++showx-- | Statistics given after the process finishes.dataStatistics=Statistics{finalValue::Double-- ^ Value of the function at the solution.,gradNorm::Double-- ^ Maximum absolute component of the gradient at the-- solution.,totalIters::CInt-- ^ Total number of iterations.,funcEvals::CInt-- ^ Total number of function evaluations.,gradEvals::CInt-- ^ Total number of gradient evaluations.}deriving(Eq,Ord,Show,Read)instanceStorableStatisticswheresizeOf_=(28){-# LINE 371 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}alignment_=alignment(undefined::Double)peekptr=dov_finalValue<-(\hsc_ptr->peekByteOffhsc_ptr0)ptr{-# LINE 374 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_gradNorm<-(\hsc_ptr->peekByteOffhsc_ptr8)ptr{-# LINE 375 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_totalIters<-(\hsc_ptr->peekByteOffhsc_ptr16)ptr{-# LINE 376 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_funcEvals<-(\hsc_ptr->peekByteOffhsc_ptr20)ptr{-# LINE 377 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_gradEvals<-(\hsc_ptr->peekByteOffhsc_ptr24)ptr{-# LINE 378 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}returnStatistics{finalValue=v_finalValue,gradNorm=v_gradNorm,totalIters=v_totalIters,funcEvals=v_funcEvals,gradEvals=v_gradEvals}pokeptrs=do(\hsc_ptr->pokeByteOffhsc_ptr0)ptr(finalValues){-# LINE 385 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr8)ptr(gradNorms){-# LINE 386 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr16)ptr(totalIterss){-# LINE 387 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr20)ptr(funcEvalss){-# LINE 388 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr24)ptr(gradEvalss){-# LINE 389 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}-- | Default parameters. See the documentation for 'Parameters'-- and 'TechParameters' to see what are the defaults.defaultParameters::ParametersdefaultParameters=unsafePerformIO$doalloca$\ptr->docg_defaultptrpeekptr{-# NOINLINE defaultParameters #-}foreignimportccallunsafe"cg_user.h"cg_default::PtrParameters->IO()-- | Parameters given to the optimizer.dataParameters=Parameters{printFinal::Bool-- ^ Print final statistics to @stdout@. Defaults to @True@.,printParams::Bool-- ^ Print parameters to @stdout@ before starting. Defaults to @False@,verbose::Verbose-- ^ How verbose we should be while computing. Everything is-- printed to @stdout@. Defaults to 'Quiet'.,lineSearch::LineSearch-- ^ What kind of line search should be used. Defaults to-- @AutoSwitch 1e-3@.,qdecay::Double-- ^ Factor in @[0, 1]@ used to compute average cost-- magnitude @C_k@ as follows:---- > Q_k = 1 + (qdecay)Q_{k-1}, Q_0 = 0-- > C_k = C_{k-1} + (|f_k| - C_{k-1})/Q_k---- Defaults to @0.7@.,stopRules::StopRules-- ^ Stop rules that define when the iterations should end.-- Defaults to @DefaultStopRule 0@.,estimateError::EstimateError-- ^ How to calculate the estimated error in the function-- value. Defaults to @RelativeEpsilon 1e-6@.,quadraticStep::MaybeDouble-- ^ When to attempt quadratic interpolation in line search.-- If @Nothing@ then never try a quadratic interpolation-- step. If @Just cutoff@, then attemp quadratic-- interpolation in line search when @|f_{k+1} - f_k| / f_k-- <= cutoff@. Defaults to @Just 1e-12@.,debugTol::MaybeDouble-- ^ If @Just tol@, then always check that @f_{k+1} - f_k <=-- tol * C_k@. Otherwise, if @Nothing@ then no checking of-- function values is done. Defaults to @Nothing@.,initialStep::MaybeDouble-- ^ If @Just step@, then use @step@ as the initial step of-- the line search. Otherwise, if @Nothing@ then the initial-- step is programatically calculated. Defaults to-- @Nothing@.,maxItersFac::Double-- ^ Defines the maximum number of iterations. The process-- is aborted when @maxItersFac * n@ iterations are done, where-- @n@ is the number of dimensions. Defaults to infinity.,nexpand::CInt-- ^ Maximum number of times the bracketing interval grows or-- shrinks in the line search. Defaults to @50@.,nsecant::CInt-- ^ Maximum number of secant iterations in line search.-- Defaults to @50@.,restartFac::Double-- ^ Restart the conjugate gradient method after @restartFac-- * n@ iterations. Defaults to @1@.,funcEpsilon::Double-- ^ Stop when @-alpha * dphi0@, the estimated change in-- function value, is less than @funcEpsilon * |f|@.-- Defaults to @0@.,nanRho::Double-- ^ After encountering @NaN@ while calculating the step-- length, growth factor when searching for a bracketing-- interval. Defaults to @1.3@.,techParameters::TechParameters-- ^ Technical parameters which you probably should not-- touch.}deriving(Eq,Ord,Show,Read)instanceStorableParameterswheresizeOf_=(192){-# LINE 490 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}alignment_=alignment(undefined::Double)peekptr=dov_printFinal<-(\hsc_ptr->peekByteOffhsc_ptr0)ptr{-# LINE 493 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_printParams<-(\hsc_ptr->peekByteOffhsc_ptr8)ptr{-# LINE 494 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_verbose<-(\hsc_ptr->peekByteOffhsc_ptr4)ptr{-# LINE 495 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_awolfe<-(\hsc_ptr->peekByteOffhsc_ptr12)ptr{-# LINE 496 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_awolfefac<-(\hsc_ptr->peekByteOffhsc_ptr16)ptr{-# LINE 497 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_qdecay<-(\hsc_ptr->peekByteOffhsc_ptr24)ptr{-# LINE 498 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_stopRule<-(\hsc_ptr->peekByteOffhsc_ptr32)ptr{-# LINE 499 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_stopRuleFac<-(\hsc_ptr->peekByteOffhsc_ptr36)ptr{-# LINE 500 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_estimateError<-(\hsc_ptr->peekByteOffhsc_ptr44)ptr{-# LINE 501 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_estimateEps<-(\hsc_ptr->peekByteOffhsc_ptr48)ptr{-# LINE 502 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_quadraticStep<-(\hsc_ptr->peekByteOffhsc_ptr56)ptr{-# LINE 503 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_quadraticCut<-(\hsc_ptr->peekByteOffhsc_ptr60)ptr{-# LINE 504 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_debug<-(\hsc_ptr->peekByteOffhsc_ptr68)ptr{-# LINE 505 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_debugTol<-(\hsc_ptr->peekByteOffhsc_ptr72)ptr{-# LINE 506 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_initialStep<-(\hsc_ptr->peekByteOffhsc_ptr80)ptr{-# LINE 507 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_maxItersFac<-(\hsc_ptr->peekByteOffhsc_ptr88)ptr{-# LINE 508 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_nexpand<-(\hsc_ptr->peekByteOffhsc_ptr96)ptr{-# LINE 509 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_nsecant<-(\hsc_ptr->peekByteOffhsc_ptr100)ptr{-# LINE 510 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_restartFac<-(\hsc_ptr->peekByteOffhsc_ptr104)ptr{-# LINE 511 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_funcEpsilon<-(\hsc_ptr->peekByteOffhsc_ptr112)ptr{-# LINE 512 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_nanRho<-(\hsc_ptr->peekByteOffhsc_ptr120)ptr{-# LINE 513 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_delta<-(\hsc_ptr->peekByteOffhsc_ptr128)ptr{-# LINE 515 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_sigma<-(\hsc_ptr->peekByteOffhsc_ptr136)ptr{-# LINE 516 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_gamma<-(\hsc_ptr->peekByteOffhsc_ptr144)ptr{-# LINE 517 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_rho<-(\hsc_ptr->peekByteOffhsc_ptr152)ptr{-# LINE 518 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_eta<-(\hsc_ptr->peekByteOffhsc_ptr160)ptr{-# LINE 519 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_psi0<-(\hsc_ptr->peekByteOffhsc_ptr168)ptr{-# LINE 520 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_psi1<-(\hsc_ptr->peekByteOffhsc_ptr176)ptr{-# LINE 521 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}v_psi2<-(\hsc_ptr->peekByteOffhsc_ptr184)ptr{-# LINE 522 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}lettech=TechParameters{techDelta=v_delta,techSigma=v_sigma,techGamma=v_gamma,techRho=v_rho,techEta=v_eta,techPsi0=v_psi0,techPsi1=v_psi1,techPsi2=v_psi2}letb::CInt->Bool;b=(/=0)returnParameters{printFinal=bv_printFinal,printParams=bv_printParams,verbose=casev_verbose::CIntof0->Quiet1->Verbose_->VeryVerbose,lineSearch=ifbv_awolfethenApproximateWolfeelseAutoSwitchv_awolfefac,qdecay=v_qdecay,stopRules=ifbv_stopRulethenDefaultStopRulev_stopRuleFacelseAlternativeStopRule,estimateError=ifbv_estimateErrorthenRelativeEpsilonv_estimateEpselseAbsoluteEpsilonv_estimateEps,quadraticStep=ifbv_quadraticStepthenJustv_quadraticCutelseNothing,debugTol=ifbv_debugthenJustv_debugTolelseNothing,initialStep=casev_initialStepof0->Nothingx->Justx,maxItersFac=v_maxItersFac,nexpand=v_nexpand,nsecant=v_nsecant,restartFac=v_restartFac,funcEpsilon=v_funcEpsilon,nanRho=v_nanRho,techParameters=tech}pokeptrp=doletib=ifbpthen1else(0::CInt)mb=maybe(0::CInt)(const1)(bp)(\hsc_ptr->pokeByteOffhsc_ptr0)ptr(iprintFinal){-# LINE 570 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr8)ptr(iprintParams){-# LINE 571 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr4)ptr(caseverbosepof{-# LINE 572 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}Quiet->0::CIntVerbose->1VeryVerbose->3)let(awolfe,awolfefac)=caselineSearchpofApproximateWolfe->(1,0)AutoSwitchx->(0,x)(\hsc_ptr->pokeByteOffhsc_ptr12)ptr(awolfe::CInt){-# LINE 579 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr16)ptrawolfefac{-# LINE 580 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr24)ptr(qdecayp){-# LINE 581 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}let(stopRule,stopRuleFac)=casestopRulespofDefaultStopRulex->(1,x)AlternativeStopRule->(0,0)(\hsc_ptr->pokeByteOffhsc_ptr32)ptr(stopRule::CInt){-# LINE 585 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr36)ptrstopRuleFac{-# LINE 586 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}let(pertRule,eps)=caseestimateErrorpofRelativeEpsilonx->(1,x)AbsoluteEpsilonx->(0,x)(\hsc_ptr->pokeByteOffhsc_ptr44)ptr(pertRule::CInt){-# LINE 590 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr48)ptreps{-# LINE 591 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr56)ptr(mquadraticStep){-# LINE 592 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr60)ptr(maybe0id$quadraticStepp){-# LINE 593 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr68)ptr(mdebugTol){-# LINE 594 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr72)ptr(maybe0id$debugTolp){-# LINE 595 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr80)ptr(maybe0id$initialStepp){-# LINE 596 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr88)ptr(maxItersFacp){-# LINE 597 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr96)ptr(nexpandp){-# LINE 598 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr100)ptr(nsecantp){-# LINE 599 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr104)ptr(restartFacp){-# LINE 600 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr112)ptr(funcEpsilonp){-# LINE 601 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr120)ptr(nanRhop){-# LINE 602 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr128)ptr(techDelta$techParametersp){-# LINE 604 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr136)ptr(techSigma$techParametersp){-# LINE 605 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr144)ptr(techGamma$techParametersp){-# LINE 606 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr152)ptr(techRho$techParametersp){-# LINE 607 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr160)ptr(techEta$techParametersp){-# LINE 608 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr168)ptr(techPsi0$techParametersp){-# LINE 609 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr176)ptr(techPsi1$techParametersp){-# LINE 610 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}(\hsc_ptr->pokeByteOffhsc_ptr184)ptr(techPsi2$techParametersp){-# LINE 611 "Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}-- | Technical parameters which you probably should not touch.-- You should read the papers of @CG_DESCENT@ to understand how-- you can tune these parameters.dataTechParameters=TechParameters{techDelta::Double-- ^ Wolfe line search parameter. Defaults to @0.1@.,techSigma::Double-- ^ Wolfe line search parameter. Defaults to @0.9@.,techGamma::Double-- ^ Decay factor for bracket interval width. Defaults to-- @0.66@.,techRho::Double-- ^ Growth factor when searching for initial bracketing-- interval. Defaults to @5@.,techEta::Double-- ^ Lower bound for the conjugate gradient update parameter-- @beta_k@ is @techEta * ||d||_2@. Defaults to @0.01@.,techPsi0::Double-- ^ Factor used in starting guess for iteration 1. Defaults-- to @0.01@.,techPsi1::Double-- ^ In performing a QuadStep, we evaluate the function at-- @psi1 * previous step@. Defaults to @0.1@.,techPsi2::Double-- ^ When starting a new CG iteration, our initial guess for-- the line search stepsize is @psi2 * previous step@.-- Defaults to @2@.}deriving(Eq,Ord,Show,Read)-- | How verbose we should be.dataVerbose=Quiet-- ^ Do not output anything to @stdout@, which most of the-- time is good.|Verbose-- ^ Print what work is being done on each iteraction.|VeryVerbose-- ^ Print information about every step, may be useful for-- troubleshooting.deriving(Eq,Ord,Show,Read,Enum)-- | Line search methods that may be used.dataLineSearch=ApproximateWolfe-- ^ Use approximate Wolfe line search.|AutoSwitchDouble-- ^ Use ordinary Wolfe line search, switch to approximate-- Wolfe when---- > |f_{k+1} - f_k| < AWolfeFac * C_k---- where @C_k@ is the average size of cost and-- @AWolfeFac@ is the parameter to this constructor.deriving(Eq,Ord,Show,Read)-- | Stop rules used to decided when to stop iterating.dataStopRules=DefaultStopRuleDouble-- ^ @DefaultStopRule stop_fac@ stops when---- > |g_k|_infty <= max(grad_tol, |g_0|_infty * stop_fac)---- where @|g_i|_infty@ is the maximum absolute component of-- the gradient at the @i@-th step.|AlternativeStopRule-- ^ @AlternativeStopRule@ stops when---- > |g_k|_infty <= grad_tol * (1 + |f_k|)deriving(Eq,Ord,Show,Read)-- | How to calculate the estimated error in the function value.dataEstimateError=AbsoluteEpsilonDouble-- ^ @AbsoluteEpsilon eps@ estimates the error as @eps@.|RelativeEpsilonDouble-- ^ @RelativeEpsilon eps@ estimates the error as @eps * C_k@.deriving(Eq,Ord,Show,Read)