-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/


-- | Robust, reliable performance measurement and analysis
--   
--   This library provides a powerful but simple way to measure software
--   performance. It provides both a framework for executing and analysing
--   benchmarks and a set of driver functions that makes it easy to build
--   and run benchmarks, and to analyse their results.
--   
--   The fastest way to get started is to read the <a>online tutorial</a>,
--   followed by the documentation and examples in the
--   <a>Criterion.Main</a> module.
--   
--   For examples of the kinds of reports that criterion generates, see
--   <a>the home page</a>.
@package criterion
@version 1.1.1.0


-- | Types for benchmarking.
--   
--   The core type is <a>Benchmarkable</a>, which admits both pure
--   functions and <a>IO</a> actions.
--   
--   For a pure function of type <tt>a -&gt; b</tt>, the benchmarking
--   harness calls this function repeatedly, each time with a different
--   <a>Int64</a> argument (the number of times to run the function in a
--   loop), and reduces the result the function returns to weak head normal
--   form.
--   
--   For an action of type <tt>IO a</tt>, the benchmarking harness calls
--   the action repeatedly, but does not reduce the result.
module Criterion.Types

-- | Top-level benchmarking configuration.
data Config
Config :: Double -> Bool -> Double -> Int -> [([String], String)] -> Maybe FilePath -> Maybe FilePath -> Maybe FilePath -> Maybe FilePath -> Verbosity -> FilePath -> Config

-- | Confidence interval for bootstrap estimation (greater than 0, less
--   than 1).
[confInterval] :: Config -> Double

-- | <i>Obsolete, unused</i>. This option used to force garbage collection
--   between every benchmark run, but it no longer has an effect (we now
--   unconditionally force garbage collection). This option remains solely
--   for backwards API compatibility.
[forceGC] :: Config -> Bool

-- | Number of seconds to run a single benchmark. (In practice, execution
--   time will very slightly exceed this limit.)
[timeLimit] :: Config -> Double

-- | Number of resamples to perform when bootstrapping.
[resamples] :: Config -> Int

-- | Regressions to perform.
[regressions] :: Config -> [([String], String)]

-- | File to write binary measurement and analysis data to. If not
--   specified, this will be a temporary file.
[rawDataFile] :: Config -> Maybe FilePath

-- | File to write report output to, with template expanded.
[reportFile] :: Config -> Maybe FilePath

-- | File to write CSV summary to.
[csvFile] :: Config -> Maybe FilePath

-- | File to write JUnit-compatible XML results to.
[junitFile] :: Config -> Maybe FilePath

-- | Verbosity level to use when running and analysing benchmarks.
[verbosity] :: Config -> Verbosity

-- | Template file to use if writing a report.
[template] :: Config -> FilePath

-- | Control the amount of information displayed.
data Verbosity
Quiet :: Verbosity
Normal :: Verbosity
Verbose :: Verbosity

-- | A pure function or impure action that can be benchmarked. The
--   <a>Int64</a> parameter indicates the number of times to run the given
--   function or action.
newtype Benchmarkable
Benchmarkable :: (Int64 -> IO ()) -> Benchmarkable
[runRepeatedly] :: Benchmarkable -> Int64 -> IO ()

-- | Specification of a collection of benchmarks and environments. A
--   benchmark may consist of:
--   
--   <ul>
--   <li>An environment that creates input data for benchmarks, created
--   with <a>env</a>.</li>
--   <li>A single <a>Benchmarkable</a> item with a name, created with
--   <a>bench</a>.</li>
--   <li>A (possibly nested) group of <a>Benchmark</a>s, created with
--   <a>bgroup</a>.</li>
--   </ul>
data Benchmark
[Environment] :: NFData env => IO env -> (env -> Benchmark) -> Benchmark
[Benchmark] :: String -> Benchmarkable -> Benchmark
[BenchGroup] :: String -> [Benchmark] -> Benchmark

-- | A collection of measurements made while benchmarking.
--   
--   Measurements related to garbage collection are tagged with <b>GC</b>.
--   They will only be available if a benchmark is run with <tt>"+RTS
--   -T"</tt>.
--   
--   <b>Packed storage.</b> When GC statistics cannot be collected, GC
--   values will be set to huge negative values. If a field is labeled with
--   "<b>GC</b>" below, use <a>fromInt</a> and <a>fromDouble</a> to safely
--   convert to "real" values.
data Measured
Measured :: !Double -> !Double -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Double -> !Double -> !Double -> !Double -> Measured

-- | Total wall-clock time elapsed, in seconds.
[measTime] :: Measured -> !Double

-- | Total CPU time elapsed, in seconds. Includes both user and kernel
--   (system) time.
[measCpuTime] :: Measured -> !Double

-- | Cycles, in unspecified units that may be CPU cycles. (On i386 and
--   x86_64, this is measured using the <tt>rdtsc</tt> instruction.)
[measCycles] :: Measured -> !Int64

-- | Number of loop iterations measured.
[measIters] :: Measured -> !Int64

-- | <b>(GC)</b> Number of bytes allocated. Access using <a>fromInt</a>.
[measAllocated] :: Measured -> !Int64

-- | <b>(GC)</b> Number of garbage collections performed. Access using
--   <a>fromInt</a>.
[measNumGcs] :: Measured -> !Int64

-- | <b>(GC)</b> Number of bytes copied during garbage collection. Access
--   using <a>fromInt</a>.
[measBytesCopied] :: Measured -> !Int64

-- | <b>(GC)</b> Wall-clock time spent doing real work ("mutation"), as
--   distinct from garbage collection. Access using <a>fromDouble</a>.
[measMutatorWallSeconds] :: Measured -> !Double

-- | <b>(GC)</b> CPU time spent doing real work ("mutation"), as distinct
--   from garbage collection. Access using <a>fromDouble</a>.
[measMutatorCpuSeconds] :: Measured -> !Double

-- | <b>(GC)</b> Wall-clock time spent doing garbage collection. Access
--   using <a>fromDouble</a>.
[measGcWallSeconds] :: Measured -> !Double

-- | <b>(GC)</b> CPU time spent doing garbage collection. Access using
--   <a>fromDouble</a>.
[measGcCpuSeconds] :: Measured -> !Double

-- | Convert a (possibly unavailable) GC measurement to a true value. If
--   the measurement is a huge negative number that corresponds to "no
--   data", this will return <a>Nothing</a>.
fromInt :: Int64 -> Maybe Int64

-- | Convert from a true value back to the packed representation used for
--   GC measurements.
toInt :: Maybe Int64 -> Int64

-- | Convert a (possibly unavailable) GC measurement to a true value. If
--   the measurement is a huge negative number that corresponds to "no
--   data", this will return <a>Nothing</a>.
fromDouble :: Double -> Maybe Double

-- | Convert from a true value back to the packed representation used for
--   GC measurements.
toDouble :: Maybe Double -> Double

-- | Field names and accessors for a <a>Measured</a> record.
measureAccessors :: Map String (Measured -> Maybe Double, String)

-- | Field names in a <a>Measured</a> record, in the order in which they
--   appear.
measureKeys :: [String]
measure :: (Unbox a) => (Measured -> a) -> Vector Measured -> Vector a

-- | Normalise every measurement as if <a>measIters</a> was 1.
--   
--   (<a>measIters</a> itself is left unaffected.)
rescale :: Measured -> Measured

-- | Run a benchmark (or collection of benchmarks) in the given
--   environment. The purpose of an environment is to lazily create input
--   data to pass to the functions that will be benchmarked.
--   
--   A common example of environment data is input that is read from a
--   file. Another is a large data structure constructed in-place.
--   
--   <b>Motivation.</b> In earlier versions of criterion, all benchmark
--   inputs were always created when a program started running. By
--   deferring the creation of an environment when its associated
--   benchmarks need the its, we avoid two problems that this strategy
--   caused:
--   
--   <ul>
--   <li>Memory pressure distorted the results of unrelated benchmarks. If
--   one benchmark needed e.g. a gigabyte-sized input, it would force the
--   garbage collector to do extra work when running some other benchmark
--   that had no use for that input. Since the data created by an
--   environment is only available when it is in scope, it should be
--   garbage collected before other benchmarks are run.</li>
--   <li>The time cost of generating all needed inputs could be significant
--   in cases where no inputs (or just a few) were really needed. This
--   occurred often, for instance when just one out of a large suite of
--   benchmarks was run, or when a user would list the collection of
--   benchmarks without running any.</li>
--   </ul>
--   
--   <b>Creation.</b> An environment is created right before its related
--   benchmarks are run. The <a>IO</a> action that creates the environment
--   is run, then the newly created environment is evaluated to normal form
--   (hence the <a>NFData</a> constraint) before being passed to the
--   function that receives the environment.
--   
--   <b>Complex environments.</b> If you need to create an environment that
--   contains multiple values, simply pack the values into a tuple.
--   
--   <b>Lazy pattern matching.</b> In situations where a "real" environment
--   is not needed, e.g. if a list of benchmark names is being generated,
--   <tt>undefined</tt> will be passed to the function that receives the
--   environment. This avoids the overhead of generating an environment
--   that will not actually be used.
--   
--   The function that receives the environment must use lazy pattern
--   matching to deconstruct the tuple, as use of strict pattern matching
--   will cause a crash if <tt>undefined</tt> is passed in.
--   
--   <b>Example.</b> This program runs benchmarks in an environment that
--   contains two values. The first value is the contents of a text file;
--   the second is a string. Pay attention to the use of a lazy pattern to
--   deconstruct the tuple in the function that returns the benchmarks to
--   be run.
--   
--   <pre>
--   setupEnv = do
--     let small = replicate 1000 (1 :: Int)
--     big &lt;- map length . words &lt;$&gt; readFile "/usr/dict/words"
--     return (small, big)
--   
--   main = defaultMain [
--      -- notice the lazy pattern match here!
--      env setupEnv $ \ ~(small,big) -&gt; bgroup "main" [
--      bgroup "small" [
--        bench "length" $ whnf length small
--      , bench "length . filter" $ whnf (length . filter (==1)) small
--      ]
--    ,  bgroup "big" [
--        bench "length" $ whnf length big
--      , bench "length . filter" $ whnf (length . filter (==1)) big
--      ]
--    ] ]
--   </pre>
--   
--   <b>Discussion.</b> The environment created in the example above is
--   intentionally <i>not</i> ideal. As Haskell's scoping rules suggest,
--   the variable <tt>big</tt> is in scope for the benchmarks that use only
--   <tt>small</tt>. It would be better to create a separate environment
--   for <tt>big</tt>, so that it will not be kept alive while the
--   unrelated benchmarks are being run.
env :: NFData env => IO env -> (env -> Benchmark) -> Benchmark

-- | Create a single benchmark.
bench :: String -> Benchmarkable -> Benchmark

-- | Group several benchmarks together under a common name.
bgroup :: String -> [Benchmark] -> Benchmark

-- | Add the given prefix to a name. If the prefix is empty, the name is
--   returned unmodified. Otherwise, the prefix and name are separated by a
--   <tt>'/'</tt> character.
addPrefix :: String -> String -> String

-- | Retrieve the names of all benchmarks. Grouped benchmarks are prefixed
--   with the name of the group they're in.
benchNames :: Benchmark -> [String]

-- | Apply an argument to a function, and evaluate the result to weak head
--   normal form (WHNF).
whnf :: (a -> b) -> a -> Benchmarkable

-- | Apply an argument to a function, and evaluate the result to head
--   normal form (NF).
nf :: NFData b => (a -> b) -> a -> Benchmarkable

-- | Perform an action, then evaluate its result to head normal form. This
--   is particularly useful for forcing a lazy <a>IO</a> action to be
--   completely performed.
nfIO :: NFData a => IO a -> Benchmarkable

-- | Perform an action, then evaluate its result to weak head normal form
--   (WHNF). This is useful for forcing an <a>IO</a> action whose result is
--   an expression to be evaluated down to a more useful value.
whnfIO :: IO a -> Benchmarkable

-- | Outliers from sample data, calculated using the boxplot technique.
data Outliers
Outliers :: !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> Outliers
[samplesSeen] :: Outliers -> !Int64

-- | More than 3 times the interquartile range (IQR) below the first
--   quartile.
[lowSevere] :: Outliers -> !Int64

-- | Between 1.5 and 3 times the IQR below the first quartile.
[lowMild] :: Outliers -> !Int64

-- | Between 1.5 and 3 times the IQR above the third quartile.
[highMild] :: Outliers -> !Int64

-- | More than 3 times the IQR above the third quartile.
[highSevere] :: Outliers -> !Int64

-- | A description of the extent to which outliers in the sample data
--   affect the sample mean and standard deviation.
data OutlierEffect

-- | Less than 1% effect.
Unaffected :: OutlierEffect

-- | Between 1% and 10%.
Slight :: OutlierEffect

-- | Between 10% and 50%.
Moderate :: OutlierEffect

-- | Above 50% (i.e. measurements are useless).
Severe :: OutlierEffect

-- | Analysis of the extent to which outliers in a sample affect its
--   standard deviation (and to some extent, its mean).
data OutlierVariance
OutlierVariance :: OutlierEffect -> String -> Double -> OutlierVariance

-- | Qualitative description of effect.
[ovEffect] :: OutlierVariance -> OutlierEffect

-- | Brief textual description of effect.
[ovDesc] :: OutlierVariance -> String

-- | Quantitative description of effect (a fraction between 0 and 1).
[ovFraction] :: OutlierVariance -> Double

-- | Results of a linear regression.
data Regression
Regression :: String -> Map String Estimate -> Estimate -> Regression

-- | Name of the responding variable.
[regResponder] :: Regression -> String

-- | Map from name to value of predictor coefficients.
[regCoeffs] :: Regression -> Map String Estimate

-- | R² goodness-of-fit estimate.
[regRSquare] :: Regression -> Estimate

-- | Data for a KDE chart of performance.
data KDE
KDE :: String -> Vector Double -> Vector Double -> KDE
[kdeType] :: KDE -> String
[kdeValues] :: KDE -> Vector Double
[kdePDF] :: KDE -> Vector Double

-- | Report of a sample analysis.
data Report
Report :: Int -> String -> [String] -> Vector Measured -> SampleAnalysis -> Outliers -> [KDE] -> Report

-- | A simple index indicating that this is the <i>n</i>th report.
[reportNumber] :: Report -> Int

-- | The name of this report.
[reportName] :: Report -> String

-- | See <a>measureKeys</a>.
[reportKeys] :: Report -> [String]

-- | Raw measurements. These are <i>not</i> corrected for the estimated
--   measurement overhead that can be found via the <a>anOverhead</a> field
--   of <a>reportAnalysis</a>.
[reportMeasured] :: Report -> Vector Measured

-- | Report analysis.
[reportAnalysis] :: Report -> SampleAnalysis

-- | Analysis of outliers.
[reportOutliers] :: Report -> Outliers

-- | Data for a KDE of times.
[reportKDEs] :: Report -> [KDE]

-- | Result of a bootstrap analysis of a non-parametric sample.
data SampleAnalysis
SampleAnalysis :: [Regression] -> Double -> Estimate -> Estimate -> OutlierVariance -> SampleAnalysis

-- | Estimates calculated via linear regression.
[anRegress] :: SampleAnalysis -> [Regression]

-- | Estimated measurement overhead, in seconds. Estimation is performed
--   via linear regression.
[anOverhead] :: SampleAnalysis -> Double

-- | Estimated mean.
[anMean] :: SampleAnalysis -> Estimate

-- | Estimated standard deviation.
[anStdDev] :: SampleAnalysis -> Estimate

-- | Description of the effects of outliers on the estimated variance.
[anOutlierVar] :: SampleAnalysis -> OutlierVariance
data DataRecord
Measurement :: Int -> String -> (Vector Measured) -> DataRecord
Analysed :: Report -> DataRecord
instance GHC.Generics.Generic Criterion.Types.DataRecord
instance Data.Data.Data Criterion.Types.DataRecord
instance GHC.Show.Show Criterion.Types.DataRecord
instance GHC.Read.Read Criterion.Types.DataRecord
instance GHC.Classes.Eq Criterion.Types.DataRecord
instance GHC.Generics.Generic Criterion.Types.Report
instance Data.Data.Data Criterion.Types.Report
instance GHC.Show.Show Criterion.Types.Report
instance GHC.Read.Read Criterion.Types.Report
instance GHC.Classes.Eq Criterion.Types.Report
instance GHC.Generics.Generic Criterion.Types.KDE
instance Data.Data.Data Criterion.Types.KDE
instance GHC.Show.Show Criterion.Types.KDE
instance GHC.Read.Read Criterion.Types.KDE
instance GHC.Classes.Eq Criterion.Types.KDE
instance GHC.Generics.Generic Criterion.Types.SampleAnalysis
instance Data.Data.Data Criterion.Types.SampleAnalysis
instance GHC.Show.Show Criterion.Types.SampleAnalysis
instance GHC.Read.Read Criterion.Types.SampleAnalysis
instance GHC.Classes.Eq Criterion.Types.SampleAnalysis
instance GHC.Generics.Generic Criterion.Types.Regression
instance Data.Data.Data Criterion.Types.Regression
instance GHC.Show.Show Criterion.Types.Regression
instance GHC.Read.Read Criterion.Types.Regression
instance GHC.Classes.Eq Criterion.Types.Regression
instance GHC.Generics.Generic Criterion.Types.OutlierVariance
instance Data.Data.Data Criterion.Types.OutlierVariance
instance GHC.Show.Show Criterion.Types.OutlierVariance
instance GHC.Read.Read Criterion.Types.OutlierVariance
instance GHC.Classes.Eq Criterion.Types.OutlierVariance
instance GHC.Generics.Generic Criterion.Types.OutlierEffect
instance Data.Data.Data Criterion.Types.OutlierEffect
instance GHC.Show.Show Criterion.Types.OutlierEffect
instance GHC.Read.Read Criterion.Types.OutlierEffect
instance GHC.Classes.Ord Criterion.Types.OutlierEffect
instance GHC.Classes.Eq Criterion.Types.OutlierEffect
instance GHC.Generics.Generic Criterion.Types.Outliers
instance Data.Data.Data Criterion.Types.Outliers
instance GHC.Show.Show Criterion.Types.Outliers
instance GHC.Read.Read Criterion.Types.Outliers
instance GHC.Classes.Eq Criterion.Types.Outliers
instance GHC.Generics.Generic Criterion.Types.Measured
instance Data.Data.Data Criterion.Types.Measured
instance GHC.Show.Show Criterion.Types.Measured
instance GHC.Read.Read Criterion.Types.Measured
instance GHC.Classes.Eq Criterion.Types.Measured
instance GHC.Generics.Generic Criterion.Types.Config
instance Data.Data.Data Criterion.Types.Config
instance GHC.Show.Show Criterion.Types.Config
instance GHC.Read.Read Criterion.Types.Config
instance GHC.Classes.Eq Criterion.Types.Config
instance GHC.Generics.Generic Criterion.Types.Verbosity
instance Data.Data.Data Criterion.Types.Verbosity
instance GHC.Show.Show Criterion.Types.Verbosity
instance GHC.Read.Read Criterion.Types.Verbosity
instance GHC.Enum.Enum Criterion.Types.Verbosity
instance GHC.Enum.Bounded Criterion.Types.Verbosity
instance GHC.Classes.Ord Criterion.Types.Verbosity
instance GHC.Classes.Eq Criterion.Types.Verbosity
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.Measured
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.Measured
instance Control.DeepSeq.NFData Criterion.Types.Measured
instance Data.Binary.Class.Binary Criterion.Types.Measured
instance GHC.Show.Show Criterion.Types.Benchmark
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.Outliers
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.Outliers
instance Data.Binary.Class.Binary Criterion.Types.Outliers
instance Control.DeepSeq.NFData Criterion.Types.Outliers
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.OutlierEffect
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.OutlierEffect
instance Data.Binary.Class.Binary Criterion.Types.OutlierEffect
instance Control.DeepSeq.NFData Criterion.Types.OutlierEffect
instance GHC.Base.Monoid Criterion.Types.Outliers
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.OutlierVariance
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.OutlierVariance
instance Data.Binary.Class.Binary Criterion.Types.OutlierVariance
instance Control.DeepSeq.NFData Criterion.Types.OutlierVariance
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.Regression
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.Regression
instance Data.Binary.Class.Binary Criterion.Types.Regression
instance Control.DeepSeq.NFData Criterion.Types.Regression
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.SampleAnalysis
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.SampleAnalysis
instance Data.Binary.Class.Binary Criterion.Types.SampleAnalysis
instance Control.DeepSeq.NFData Criterion.Types.SampleAnalysis
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.KDE
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.KDE
instance Data.Binary.Class.Binary Criterion.Types.KDE
instance Control.DeepSeq.NFData Criterion.Types.KDE
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.Report
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.Report
instance Data.Binary.Class.Binary Criterion.Types.Report
instance Control.DeepSeq.NFData Criterion.Types.Report
instance Data.Binary.Class.Binary Criterion.Types.DataRecord
instance Control.DeepSeq.NFData Criterion.Types.DataRecord
instance Data.Aeson.Types.Class.FromJSON Criterion.Types.DataRecord
instance Data.Aeson.Types.Class.ToJSON Criterion.Types.DataRecord


-- | Benchmark measurement code.
module Criterion.Measurement

-- | Set up time measurement.
initializeTime :: IO ()

-- | Return the current wallclock time, in seconds since some arbitrary
--   time.
--   
--   You <i>must</i> call <a>initializeTime</a> once before calling this
--   function!
getTime :: IO Double

-- | Return the amount of elapsed CPU time, combining user and kernel
--   (system) time into a single measure.
getCPUTime :: IO Double

-- | Read the CPU cycle counter.
getCycles :: IO Word64

-- | Try to get GC statistics, bearing in mind that the GHC runtime will
--   throw an exception if statistics collection was not enabled using
--   "<tt>+RTS -T</tt>".
getGCStats :: IO (Maybe GCStats)

-- | Convert a number of seconds to a string. The string will consist of
--   four decimal places, followed by a short description of the time
--   units.
secs :: Double -> String

-- | Measure the execution of a benchmark a given number of times.
measure :: Benchmarkable -> Int64 -> IO (Measured, Double)

-- | Run a single benchmark, and return measurements collected while
--   executing it, along with the amount of time the measurement process
--   took.
runBenchmark :: Benchmarkable -> Double -> IO (Vector Measured, Double)

-- | An empty structure.
measured :: Measured

-- | Apply the difference between two sets of GC statistics to a
--   measurement.
applyGCStats :: Maybe GCStats -> Maybe GCStats -> Measured -> Measured

-- | The amount of time a benchmark must run for in order for us to have
--   some trust in the raw measurement.
--   
--   We set this threshold so that we can generate enough data to later
--   perform meaningful statistical analyses.
--   
--   The threshold is 30 milliseconds. One use of <a>runBenchmark</a> must
--   accumulate more than 300 milliseconds of total measurements above this
--   threshold before it will finish.
threshold :: Double


-- | The environment in which most criterion code executes.
module Criterion.Monad

-- | The monad in which most criterion code executes.
data Criterion a

-- | Run a <a>Criterion</a> action with the given <a>Config</a>.
withConfig :: Config -> Criterion a -> IO a

-- | Return a random number generator, creating one if necessary.
--   
--   This is not currently thread-safe, but in a harmless way (we might
--   call <a>createSystemRandom</a> more than once if multiple threads
--   race).
getGen :: Criterion GenIO

-- | Return an estimate of the measurement overhead.
getOverhead :: Criterion Double


-- | Reporting functions.
module Criterion.Report

-- | Format a series of <a>Report</a> values using the given Hastache
--   template.
formatReport :: [Report] -> Text -> IO Text

-- | Write out a series of <a>Report</a> values to a single file, if
--   configured to do so.
report :: [Report] -> Criterion ()

-- | Trim long flat tails from a KDE plot.
tidyTails :: KDE -> KDE

-- | A problem arose with a template.
data TemplateException

-- | The template could not be found.
TemplateNotFound :: FilePath -> TemplateException

-- | Load a Hastache template file.
--   
--   If the name is an absolute or relative path, the search path is
--   <i>not</i> used, and the name is treated as a literal path.
--   
--   This function throws a <a>TemplateException</a> if the template could
--   not be found, or an <a>IOException</a> if no template could be loaded.
loadTemplate :: [FilePath] -> FilePath -> IO Text

-- | Attempt to include the contents of a file based on a search path.
--   Returns <a>empty</a> if the search fails or the file could not be
--   read.
--   
--   Intended for use with Hastache's <a>MuLambdaM</a>, for example:
--   
--   <pre>
--   context "include" = <a>MuLambdaM</a> $ <a>includeFile</a> [<tt>templateDir</tt>]
--   </pre>
--   
--   Hastache template expansion is <i>not</i> performed within the
--   included file. No attempt is made to ensure that the included file
--   path is safe, i.e. that it does not refer to an unexpected file such
--   as "<tt><i>etc</i>passwd</tt>".
includeFile :: (MonadIO m) => [FilePath] -> Text -> m Text

-- | Return the path to the template and other files used for generating
--   reports.
getTemplateDir :: IO FilePath

-- | Render the elements of a vector.
--   
--   For example, given this piece of Haskell:
--   
--   <pre>
--   <a>mkStrContext</a> $ \name -&gt;
--    case name of
--      "foo" -&gt; <a>vector</a> "x" foo
--   </pre>
--   
--   It will substitute each value in the vector for <tt>x</tt> in the
--   following Hastache template:
--   
--   <pre>
--   {{#foo}}
--    {{x}}
--   {{/foo}}
--   </pre>
vector :: (Monad m, Vector v a, MuVar a) => String -> v a -> MuType m

-- | Render the elements of two vectors.
vector2 :: (Monad m, Vector v a, Vector v b, MuVar a, MuVar b) => String -> String -> v a -> v b -> MuType m
instance GHC.Generics.Generic Criterion.Report.TemplateException
instance Data.Data.Data Criterion.Report.TemplateException
instance GHC.Show.Show Criterion.Report.TemplateException
instance GHC.Read.Read Criterion.Report.TemplateException
instance GHC.Classes.Eq Criterion.Report.TemplateException
instance GHC.Exception.Exception Criterion.Report.TemplateException


-- | Input and output actions.
module Criterion.IO.Printf

-- | An internal class that acts like Printf/HPrintf.
--   
--   The implementation is visible to the rest of the program, but the
--   details of the class are not.
class CritHPrintfType a

-- | Print a "normal" note.
note :: (CritHPrintfType r) => String -> r

-- | Print an error message.
printError :: (CritHPrintfType r) => String -> r

-- | Print verbose output.
prolix :: (CritHPrintfType r) => String -> r

-- | Write a record to a CSV file.
writeCsv :: ToRecord a => a -> Criterion ()
instance Criterion.IO.Printf.CritHPrintfType (Criterion.Monad.Internal.Criterion a)
instance Criterion.IO.Printf.CritHPrintfType (GHC.Types.IO a)
instance (Criterion.IO.Printf.CritHPrintfType r, Text.Printf.PrintfArg a) => Criterion.IO.Printf.CritHPrintfType (a -> r)


-- | Analysis code for benchmarks.
module Criterion.Analysis

-- | Outliers from sample data, calculated using the boxplot technique.
data Outliers
Outliers :: !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> Outliers
[samplesSeen] :: Outliers -> !Int64

-- | More than 3 times the interquartile range (IQR) below the first
--   quartile.
[lowSevere] :: Outliers -> !Int64

-- | Between 1.5 and 3 times the IQR below the first quartile.
[lowMild] :: Outliers -> !Int64

-- | Between 1.5 and 3 times the IQR above the third quartile.
[highMild] :: Outliers -> !Int64

-- | More than 3 times the IQR above the third quartile.
[highSevere] :: Outliers -> !Int64

-- | A description of the extent to which outliers in the sample data
--   affect the sample mean and standard deviation.
data OutlierEffect

-- | Less than 1% effect.
Unaffected :: OutlierEffect

-- | Between 1% and 10%.
Slight :: OutlierEffect

-- | Between 10% and 50%.
Moderate :: OutlierEffect

-- | Above 50% (i.e. measurements are useless).
Severe :: OutlierEffect

-- | Analysis of the extent to which outliers in a sample affect its
--   standard deviation (and to some extent, its mean).
data OutlierVariance
OutlierVariance :: OutlierEffect -> String -> Double -> OutlierVariance

-- | Qualitative description of effect.
[ovEffect] :: OutlierVariance -> OutlierEffect

-- | Brief textual description of effect.
[ovDesc] :: OutlierVariance -> String

-- | Quantitative description of effect (a fraction between 0 and 1).
[ovFraction] :: OutlierVariance -> Double

-- | Result of a bootstrap analysis of a non-parametric sample.
data SampleAnalysis
SampleAnalysis :: [Regression] -> Double -> Estimate -> Estimate -> OutlierVariance -> SampleAnalysis

-- | Estimates calculated via linear regression.
[anRegress] :: SampleAnalysis -> [Regression]

-- | Estimated measurement overhead, in seconds. Estimation is performed
--   via linear regression.
[anOverhead] :: SampleAnalysis -> Double

-- | Estimated mean.
[anMean] :: SampleAnalysis -> Estimate

-- | Estimated standard deviation.
[anStdDev] :: SampleAnalysis -> Estimate

-- | Description of the effects of outliers on the estimated variance.
[anOutlierVar] :: SampleAnalysis -> OutlierVariance

-- | Perform an analysis of a measurement.
analyseSample :: Int -> String -> Vector Measured -> ExceptT String Criterion Report

-- | Multiply the <tt>Estimate</tt>s in an analysis by the given value,
--   using <a>scale</a>.
scale :: Double -> SampleAnalysis -> SampleAnalysis

-- | Display the mean of a <a>Sample</a>, and characterise the outliers
--   present in the sample.
analyseMean :: Sample -> Int -> Criterion Double

-- | Count the total number of outliers in a sample.
countOutliers :: Outliers -> Int64

-- | Classify outliers in a data set, using the boxplot technique.
classifyOutliers :: Sample -> Outliers

-- | Display a report of the <a>Outliers</a> present in a <a>Sample</a>.
noteOutliers :: Outliers -> Criterion ()

-- | Compute the extent to which outliers in the sample data affect the
--   sample mean and standard deviation.
outlierVariance :: Estimate -> Estimate -> Double -> OutlierVariance

-- | Given a list of accessor names (see <a>measureKeys</a>), return either
--   a mapping from accessor name to function or an error message if any
--   names are wrong.
resolveAccessors :: [String] -> Either String [(String, Measured -> Maybe Double)]

-- | Given predictor and responder names, do some basic validation, then
--   hand back the relevant accessors.
validateAccessors :: [String] -> String -> Either String [(String, Measured -> Maybe Double)]

-- | Regress the given predictors against the responder.
--   
--   Errors may be returned under various circumstances, such as invalid
--   names or lack of needed data.
--   
--   See <a>olsRegress</a> for details of the regression performed.
regress :: GenIO -> [String] -> String -> Vector Measured -> ExceptT String Criterion Regression


-- | Benchmarking command-line configuration.
module Criterion.Main.Options

-- | Execution mode for a benchmark program.
data Mode

-- | List all benchmarks.
List :: Mode

-- | Print the version.
Version :: Mode

-- | Run the given benchmarks, without collecting or analysing performance
--   numbers.
RunIters :: Int64 -> MatchType -> [String] -> Mode

-- | Run and analyse the given benchmarks.
Run :: Config -> MatchType -> [String] -> Mode

-- | How to match a benchmark name.
data MatchType

-- | Match by prefix. For example, a prefix of <tt>"foo"</tt> will match
--   <tt>"foobar"</tt>.
Prefix :: MatchType

-- | Match by Unix-style glob pattern.
Glob :: MatchType

-- | Default benchmarking configuration.
defaultConfig :: Config

-- | Parse a command line.
parseWith :: Config -> Parser Mode

-- | Flesh out a command line parser.
describe :: Config -> ParserInfo Mode

-- | A string describing the version of this benchmark (really, the version
--   of criterion that was used to build it).
versionInfo :: String
instance GHC.Generics.Generic Criterion.Main.Options.Mode
instance Data.Data.Data Criterion.Main.Options.Mode
instance GHC.Show.Show Criterion.Main.Options.Mode
instance GHC.Read.Read Criterion.Main.Options.Mode
instance GHC.Classes.Eq Criterion.Main.Options.Mode
instance GHC.Generics.Generic Criterion.Main.Options.MatchType
instance Data.Data.Data Criterion.Main.Options.MatchType
instance GHC.Show.Show Criterion.Main.Options.MatchType
instance GHC.Read.Read Criterion.Main.Options.MatchType
instance GHC.Enum.Enum Criterion.Main.Options.MatchType
instance GHC.Enum.Bounded Criterion.Main.Options.MatchType
instance GHC.Classes.Ord Criterion.Main.Options.MatchType
instance GHC.Classes.Eq Criterion.Main.Options.MatchType


-- | Input and output actions.
module Criterion.IO

-- | The header identifies a criterion data file. This contains version
--   information; there is no expectation of cross-version compatibility.
header :: ByteString

-- | Read all records from the given <a>Handle</a>.
hGetRecords :: Binary a => Handle -> IO (Either String [a])

-- | Write records to the given <a>Handle</a>.
hPutRecords :: Binary a => Handle -> [a] -> IO ()

-- | Read all records from the given file.
readRecords :: Binary a => FilePath -> IO (Either String [a])

-- | Write records to the given file.
writeRecords :: Binary a => FilePath -> [a] -> IO ()


-- | Core benchmarking code.
module Criterion.Internal

-- | Run, and analyse, one or more benchmarks.
runAndAnalyse :: (String -> Bool) -> Benchmark -> Criterion ()

-- | Run a single benchmark and analyse its performance.
runAndAnalyseOne :: Int -> String -> Benchmarkable -> Criterion DataRecord

-- | Run a single benchmark.
runOne :: Int -> String -> Benchmarkable -> Criterion DataRecord

-- | Run a benchmark without analysing its performance.
runFixedIters :: Int64 -> (String -> Bool) -> Benchmark -> Criterion ()


-- | Wrappers for compiling and running benchmarks quickly and easily. See
--   <a>defaultMain</a> below for an example.
module Criterion.Main

-- | A pure function or impure action that can be benchmarked. The
--   <a>Int64</a> parameter indicates the number of times to run the given
--   function or action.
data Benchmarkable

-- | Specification of a collection of benchmarks and environments. A
--   benchmark may consist of:
--   
--   <ul>
--   <li>An environment that creates input data for benchmarks, created
--   with <a>env</a>.</li>
--   <li>A single <a>Benchmarkable</a> item with a name, created with
--   <a>bench</a>.</li>
--   <li>A (possibly nested) group of <a>Benchmark</a>s, created with
--   <a>bgroup</a>.</li>
--   </ul>
data Benchmark

-- | Run a benchmark (or collection of benchmarks) in the given
--   environment. The purpose of an environment is to lazily create input
--   data to pass to the functions that will be benchmarked.
--   
--   A common example of environment data is input that is read from a
--   file. Another is a large data structure constructed in-place.
--   
--   <b>Motivation.</b> In earlier versions of criterion, all benchmark
--   inputs were always created when a program started running. By
--   deferring the creation of an environment when its associated
--   benchmarks need the its, we avoid two problems that this strategy
--   caused:
--   
--   <ul>
--   <li>Memory pressure distorted the results of unrelated benchmarks. If
--   one benchmark needed e.g. a gigabyte-sized input, it would force the
--   garbage collector to do extra work when running some other benchmark
--   that had no use for that input. Since the data created by an
--   environment is only available when it is in scope, it should be
--   garbage collected before other benchmarks are run.</li>
--   <li>The time cost of generating all needed inputs could be significant
--   in cases where no inputs (or just a few) were really needed. This
--   occurred often, for instance when just one out of a large suite of
--   benchmarks was run, or when a user would list the collection of
--   benchmarks without running any.</li>
--   </ul>
--   
--   <b>Creation.</b> An environment is created right before its related
--   benchmarks are run. The <a>IO</a> action that creates the environment
--   is run, then the newly created environment is evaluated to normal form
--   (hence the <a>NFData</a> constraint) before being passed to the
--   function that receives the environment.
--   
--   <b>Complex environments.</b> If you need to create an environment that
--   contains multiple values, simply pack the values into a tuple.
--   
--   <b>Lazy pattern matching.</b> In situations where a "real" environment
--   is not needed, e.g. if a list of benchmark names is being generated,
--   <tt>undefined</tt> will be passed to the function that receives the
--   environment. This avoids the overhead of generating an environment
--   that will not actually be used.
--   
--   The function that receives the environment must use lazy pattern
--   matching to deconstruct the tuple, as use of strict pattern matching
--   will cause a crash if <tt>undefined</tt> is passed in.
--   
--   <b>Example.</b> This program runs benchmarks in an environment that
--   contains two values. The first value is the contents of a text file;
--   the second is a string. Pay attention to the use of a lazy pattern to
--   deconstruct the tuple in the function that returns the benchmarks to
--   be run.
--   
--   <pre>
--   setupEnv = do
--     let small = replicate 1000 (1 :: Int)
--     big &lt;- map length . words &lt;$&gt; readFile "/usr/dict/words"
--     return (small, big)
--   
--   main = defaultMain [
--      -- notice the lazy pattern match here!
--      env setupEnv $ \ ~(small,big) -&gt; bgroup "main" [
--      bgroup "small" [
--        bench "length" $ whnf length small
--      , bench "length . filter" $ whnf (length . filter (==1)) small
--      ]
--    ,  bgroup "big" [
--        bench "length" $ whnf length big
--      , bench "length . filter" $ whnf (length . filter (==1)) big
--      ]
--    ] ]
--   </pre>
--   
--   <b>Discussion.</b> The environment created in the example above is
--   intentionally <i>not</i> ideal. As Haskell's scoping rules suggest,
--   the variable <tt>big</tt> is in scope for the benchmarks that use only
--   <tt>small</tt>. It would be better to create a separate environment
--   for <tt>big</tt>, so that it will not be kept alive while the
--   unrelated benchmarks are being run.
env :: NFData env => IO env -> (env -> Benchmark) -> Benchmark

-- | Create a single benchmark.
bench :: String -> Benchmarkable -> Benchmark

-- | Group several benchmarks together under a common name.
bgroup :: String -> [Benchmark] -> Benchmark

-- | Apply an argument to a function, and evaluate the result to head
--   normal form (NF).
nf :: NFData b => (a -> b) -> a -> Benchmarkable

-- | Apply an argument to a function, and evaluate the result to weak head
--   normal form (WHNF).
whnf :: (a -> b) -> a -> Benchmarkable

-- | Perform an action, then evaluate its result to head normal form. This
--   is particularly useful for forcing a lazy <a>IO</a> action to be
--   completely performed.
nfIO :: NFData a => IO a -> Benchmarkable

-- | Perform an action, then evaluate its result to weak head normal form
--   (WHNF). This is useful for forcing an <a>IO</a> action whose result is
--   an expression to be evaluated down to a more useful value.
whnfIO :: IO a -> Benchmarkable

-- | An entry point that can be used as a <tt>main</tt> function.
--   
--   <pre>
--   import Criterion.Main
--   
--   fib :: Int -&gt; Int
--   fib 0 = 0
--   fib 1 = 1
--   fib n = fib (n-1) + fib (n-2)
--   
--   main = defaultMain [
--          bgroup "fib" [ bench "10" $ whnf fib 10
--                       , bench "35" $ whnf fib 35
--                       , bench "37" $ whnf fib 37
--                       ]
--                      ]
--   </pre>
defaultMain :: [Benchmark] -> IO ()

-- | An entry point that can be used as a <tt>main</tt> function, with
--   configurable defaults.
--   
--   Example:
--   
--   <pre>
--   import Criterion.Main.Options
--   import Criterion.Main
--   
--   myConfig = defaultConfig {
--                -- Do not GC between runs.
--                forceGC = False
--              }
--   
--   main = defaultMainWith myConfig [
--            bench "fib 30" $ whnf fib 30
--          ]
--   </pre>
--   
--   If you save the above example as <tt>"Fib.hs"</tt>, you should be able
--   to compile it as follows:
--   
--   <pre>
--   ghc -O --make Fib
--   </pre>
--   
--   Run <tt>"Fib --help"</tt> on the command line to get a list of command
--   line options.
defaultMainWith :: Config -> [Benchmark] -> IO ()

-- | Default benchmarking configuration.
defaultConfig :: Config

-- | Create a function that can tell if a name given on the command line
--   matches a benchmark.
makeMatcher :: MatchType -> [String] -> Either String (String -> Bool)

-- | Run a set of <a>Benchmark</a>s with the given <a>Mode</a>.
--   
--   This can be useful if you have a <a>Mode</a> from some other source
--   (e.g. from a one in your benchmark driver's command-line parser).
runMode :: Mode -> [Benchmark] -> IO ()


-- | Core benchmarking code.
module Criterion

-- | A pure function or impure action that can be benchmarked. The
--   <a>Int64</a> parameter indicates the number of times to run the given
--   function or action.
data Benchmarkable

-- | Specification of a collection of benchmarks and environments. A
--   benchmark may consist of:
--   
--   <ul>
--   <li>An environment that creates input data for benchmarks, created
--   with <a>env</a>.</li>
--   <li>A single <a>Benchmarkable</a> item with a name, created with
--   <a>bench</a>.</li>
--   <li>A (possibly nested) group of <a>Benchmark</a>s, created with
--   <a>bgroup</a>.</li>
--   </ul>
data Benchmark

-- | Run a benchmark (or collection of benchmarks) in the given
--   environment. The purpose of an environment is to lazily create input
--   data to pass to the functions that will be benchmarked.
--   
--   A common example of environment data is input that is read from a
--   file. Another is a large data structure constructed in-place.
--   
--   <b>Motivation.</b> In earlier versions of criterion, all benchmark
--   inputs were always created when a program started running. By
--   deferring the creation of an environment when its associated
--   benchmarks need the its, we avoid two problems that this strategy
--   caused:
--   
--   <ul>
--   <li>Memory pressure distorted the results of unrelated benchmarks. If
--   one benchmark needed e.g. a gigabyte-sized input, it would force the
--   garbage collector to do extra work when running some other benchmark
--   that had no use for that input. Since the data created by an
--   environment is only available when it is in scope, it should be
--   garbage collected before other benchmarks are run.</li>
--   <li>The time cost of generating all needed inputs could be significant
--   in cases where no inputs (or just a few) were really needed. This
--   occurred often, for instance when just one out of a large suite of
--   benchmarks was run, or when a user would list the collection of
--   benchmarks without running any.</li>
--   </ul>
--   
--   <b>Creation.</b> An environment is created right before its related
--   benchmarks are run. The <a>IO</a> action that creates the environment
--   is run, then the newly created environment is evaluated to normal form
--   (hence the <a>NFData</a> constraint) before being passed to the
--   function that receives the environment.
--   
--   <b>Complex environments.</b> If you need to create an environment that
--   contains multiple values, simply pack the values into a tuple.
--   
--   <b>Lazy pattern matching.</b> In situations where a "real" environment
--   is not needed, e.g. if a list of benchmark names is being generated,
--   <tt>undefined</tt> will be passed to the function that receives the
--   environment. This avoids the overhead of generating an environment
--   that will not actually be used.
--   
--   The function that receives the environment must use lazy pattern
--   matching to deconstruct the tuple, as use of strict pattern matching
--   will cause a crash if <tt>undefined</tt> is passed in.
--   
--   <b>Example.</b> This program runs benchmarks in an environment that
--   contains two values. The first value is the contents of a text file;
--   the second is a string. Pay attention to the use of a lazy pattern to
--   deconstruct the tuple in the function that returns the benchmarks to
--   be run.
--   
--   <pre>
--   setupEnv = do
--     let small = replicate 1000 (1 :: Int)
--     big &lt;- map length . words &lt;$&gt; readFile "/usr/dict/words"
--     return (small, big)
--   
--   main = defaultMain [
--      -- notice the lazy pattern match here!
--      env setupEnv $ \ ~(small,big) -&gt; bgroup "main" [
--      bgroup "small" [
--        bench "length" $ whnf length small
--      , bench "length . filter" $ whnf (length . filter (==1)) small
--      ]
--    ,  bgroup "big" [
--        bench "length" $ whnf length big
--      , bench "length . filter" $ whnf (length . filter (==1)) big
--      ]
--    ] ]
--   </pre>
--   
--   <b>Discussion.</b> The environment created in the example above is
--   intentionally <i>not</i> ideal. As Haskell's scoping rules suggest,
--   the variable <tt>big</tt> is in scope for the benchmarks that use only
--   <tt>small</tt>. It would be better to create a separate environment
--   for <tt>big</tt>, so that it will not be kept alive while the
--   unrelated benchmarks are being run.
env :: NFData env => IO env -> (env -> Benchmark) -> Benchmark

-- | Create a single benchmark.
bench :: String -> Benchmarkable -> Benchmark

-- | Group several benchmarks together under a common name.
bgroup :: String -> [Benchmark] -> Benchmark

-- | Apply an argument to a function, and evaluate the result to head
--   normal form (NF).
nf :: NFData b => (a -> b) -> a -> Benchmarkable

-- | Apply an argument to a function, and evaluate the result to weak head
--   normal form (WHNF).
whnf :: (a -> b) -> a -> Benchmarkable

-- | Perform an action, then evaluate its result to head normal form. This
--   is particularly useful for forcing a lazy <a>IO</a> action to be
--   completely performed.
nfIO :: NFData a => IO a -> Benchmarkable

-- | Perform an action, then evaluate its result to weak head normal form
--   (WHNF). This is useful for forcing an <a>IO</a> action whose result is
--   an expression to be evaluated down to a more useful value.
whnfIO :: IO a -> Benchmarkable

-- | Run a benchmark interactively, and analyse its performance.
benchmark :: Benchmarkable -> IO ()

-- | Run a benchmark interactively, and analyse its performance.
benchmarkWith :: Config -> Benchmarkable -> IO ()

-- | Run a benchmark interactively, analyse its performance, and return the
--   analysis.
benchmark' :: Benchmarkable -> IO Report

-- | Run a benchmark interactively, analyse its performance, and return the
--   analysis.
benchmarkWith' :: Config -> Benchmarkable -> IO Report
