-- |-- Module : Criterion.Main-- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan---- License : BSD-style-- Maintainer : bos@serpentine.com-- Stability : experimental-- Portability : GHC---- Wrappers for compiling and running benchmarks quickly and easily.-- See 'defaultMain' below for an example.moduleCriterion.Main(-- * How to write benchmarks-- $bench-- ** Benchmarking IO actions-- $io-- ** Benchmarking pure code-- $pure-- ** Fully evaluating a result-- $rnf-- * TypesBenchmarkable(..),Benchmark,Pure-- * Constructing benchmarks,bench,bgroup,nf,whnf,nfIO,whnfIO-- * Running benchmarks,defaultMain,defaultMainWith-- * Other useful code,defaultOptions,parseArgs)whereimportControl.Monad.Trans(liftIO)importCriterion(runAndAnalyse)importCriterion.ConfigimportCriterion.Environment(measureEnvironment)importCriterion.IO(note,printError)importCriterion.Monad(Criterion,withConfig)importCriterion.Types(Benchmarkable(..),Benchmark(..),Pure,bench,benchNames,bgroup,nf,nfIO,whnf,whnfIO)importData.List(isPrefixOf,sort)importData.Monoid(Monoid(..),Last(..))importSystem.Console.GetOptimportSystem.Environment(getArgs,getProgName)importSystem.Exit(ExitCode(..),exitWith)-- | Parse a confidence interval.ci::String->IOConfigcis=casereadss'of[(d,"%")]->check(d/100)[(d,"")]->checkd_->parseError"invalid confidence interval provided"wheres'=casesof('.':_)->'0':s_->scheckd|d<=0=parseError"confidence interval is negative"|d>=1=parseError"confidence interval is greater than 1"|otherwise=returnmempty{cfgConfInterval=ljustd}-- | Parse a positive number.pos::(Numa,Orda,Reada)=>String->(Lasta->Config)->String->IOConfigposqfs=casereadssof[(n,"")]|n>0->return.f$ljustn|otherwise->parseError$q++" must be positive"_->parseError$"invalid "++q++" provided"noArg::Config->ArgDescr(IOConfig)noArg=NoArg.return-- | The standard options accepted on the command line.defaultOptions::[OptDescr(IOConfig)]defaultOptions=[Option['h','?']["help"](noArgmempty{cfgPrintExit=Help})"print help, then exit",Option['G']["no-gc"](noArgmempty{cfgPerformGC=ljustFalse})"do not collect garbage between iterations",Option['g']["gc"](noArgmempty{cfgPerformGC=ljustTrue})"collect garbage between iterations",Option['I']["ci"](ReqArgci"CI")"bootstrap confidence interval",Option['l']["list"](noArgmempty{cfgPrintExit=List})"print only a list of benchmark names",Option['o']["output"](ReqArg(\t->return$mempty{cfgReport=ljustt})"FILENAME")"report file to write to",Option['q']["quiet"](noArgmempty{cfgVerbosity=ljustQuiet})"print less output",Option[]["resamples"](ReqArg(pos"resample count"$\n->mempty{cfgResamples=n})"N")"number of bootstrap resamples to perform",Option['s']["samples"](ReqArg(pos"sample count"$\n->mempty{cfgSamples=n})"N")"number of samples to collect",Option['t']["template"](ReqArg(\t->return$mempty{cfgTemplate=ljustt})"FILENAME")"template file to use",Option['u']["summary"](ReqArg(\s->return$mempty{cfgSummaryFile=ljusts})"FILENAME")"produce a summary CSV file of all results",Option['V']["version"](noArgmempty{cfgPrintExit=Version})"display version, then exit",Option['v']["verbose"](noArgmempty{cfgVerbosity=ljustVerbose})"print more output"]printBanner::Config->IO()printBannercfg=withConfigcfg$casecfgBannercfgofLast(Justb)->note"%s\n"b_->note"Hey, nobody told me what version I am!\n"printUsage::[OptDescr(IOConfig)]->ExitCode->IOaprintUsageoptionsexitCode=dop<-getProgNameputStr(usageInfo("Usage: "++p++" [OPTIONS] [BENCHMARKS]")options)putStrLn"If no benchmark names are given, all are run\n\
\Otherwise, benchmarks are run by prefix match"exitWithexitCode-- | Parse command line options.parseArgs::Config->[OptDescr(IOConfig)]->[String]->IO(Config,[String])parseArgsdefCfgoptionsargs=casegetOptPermuteoptionsargsof(_,_,(err:_))->parseErrorerr(opts,rest,_)->docfg<-(mappenddefCfg.mconcat)`fmap`sequenceoptscasecfgPrintExitcfgofHelp->printBannercfg>>printUsageoptionsExitSuccessVersion->printBannercfg>>exitWithExitSuccess_->return(cfg,rest)-- | An entry point that can be used as a @main@ function.---- > import Criterion.Main-- >-- > fib :: Int -> Int-- > fib 0 = 0-- > fib 1 = 1-- > fib n = fib (n-1) + fib (n-2)-- >-- > main = defaultMain [-- > bgroup "fib" [ bench "10" $ whnf fib 10-- > , bench "35" $ whnf fib 35-- > , bench "37" $ whnf fib 37-- > ]-- > ]defaultMain::[Benchmark]->IO()defaultMain=defaultMainWithdefaultConfig(return())-- | An entry point that can be used as a @main@ function, with-- configurable defaults.---- Example:---- > import Criterion.Config-- > import qualified Criterion.MultiMap as M-- > import Criterion.Main-- >-- > myConfig = defaultConfig {-- > -- Always GC between runs.-- > cfgPerformGC = ljust True-- > }-- > -- > main = defaultMainWith myConfig (return ()) [-- > bench "fib 30" $ whnf fib 30-- > ]---- If you save the above example as @\"Fib.hs\"@, you should be able-- to compile it as follows:---- > ghc -O --make Fib---- Run @\"Fib --help\"@ on the command line to get a list of command-- line options.defaultMainWith::Config->Criterion()-- ^ Prepare data prior to executing the first benchmark.->[Benchmark]->IO()defaultMainWithdefCfgprepbs=do(cfg,args)<-parseArgsdefCfgdefaultOptions=<<getArgswithConfigcfg$ifcfgPrintExitcfg==Listthendo_<-note"Benchmarks:\n"mapM_(note" %s\n")(sort$concatMapbenchNamesbs)elsedocasegetLast$cfgSummaryFilecfgofJustfn->liftIO$writeFilefn"Name,Mean,MeanLB,MeanUB,Stddev,StddevLB,StddevUB\n"Nothing->return()env<-measureEnvironmentletshouldRunb=nullargs||any(`isPrefixOf`b)argspreprunAndAnalyseshouldRunenv$BenchGroup""bs-- | Display an error message from a command line parsing failure, and-- exit.parseError::String->IOaparseErrormsg=do_<-printError"Error: %s"msg_<-printError"Run \"%s --help\" for usage information\n"=<<getProgNameexitWith(ExitFailure64)-- $bench---- The 'Benchmarkable' typeclass represents the class of all code that-- can be benchmarked. Every instance must run a benchmark a given-- number of times. We are most interested in benchmarking two things:---- * 'IO' actions. Any 'IO' action can be benchmarked directly.---- * Pure functions. GHC optimises aggressively when compiling with-- @-O@, so it is easy to write innocent-looking benchmark code that-- doesn't measure the performance of a pure function at all. We-- work around this by benchmarking both a function and its final-- argument together.-- $io---- Any 'IO' action can be benchmarked easily if its type resembles-- this:---- @-- 'IO' a-- @-- $pure---- Because GHC optimises aggressively when compiling with @-O@, it is-- potentially easy to write innocent-looking benchmark code that will-- only be evaluated once, for which all but the first iteration of-- the timing loop will be timing the cost of doing nothing.---- To work around this, we provide a special type, 'Pure', for-- benchmarking pure code. Values of this type are constructed using-- one of two functions.---- The first is a function which will cause results to be evaluated to-- head normal form (NF):---- @-- 'nf' :: 'NFData' b => (a -> b) -> a -> 'Pure'-- @---- The second will cause results to be evaluated to weak head normal-- form (the Haskell default):---- @-- 'whnf' :: (a -> b) -> a -> 'Pure'-- @---- As both of these types suggest, when you want to benchmark a-- function, you must supply two values:---- * The first element is the function, saturated with all but its-- last argument.---- * The second element is the last argument to the function.---- Here is an example that makes the use of these functions clearer.-- Suppose we want to benchmark the following function:---- @-- firstN :: Int -> [Int]-- firstN k = take k [(0::Int)..]-- @---- So in the easy case, we construct a benchmark as follows:---- @-- 'nf' firstN 1000-- @---- The compiler will correctly infer that the number 1000 must have-- the type 'Int', and the type of the expression is 'Pure'.-- $rnf---- The 'whnf' harness for evaluating a pure function only evaluates-- the result to weak head normal form (WHNF). If you need the result-- evaluated all the way to normal form, use the 'nf' function to-- force its complete evaluation.---- Using the @firstN@ example from earlier, to naive eyes it might-- /appear/ that the following code ought to benchmark the production-- of the first 1000 list elements:---- @-- 'whnf' firstN 1000-- @---- Because in this case the result will only be forced until it-- reaches WHNF, what this would /actually/ benchmark is merely the-- production of the first list element!