diff gtc/simple-statistics.js @ 89:18f8c214169f

add gtc
author paulo
date Sun, 19 Feb 2017 19:45:31 -0800
parents
children
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/gtc/simple-statistics.js	Sun Feb 19 19:45:31 2017 -0800
     1.3 @@ -0,0 +1,3514 @@
     1.4 +(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.ss = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
     1.5 +/* @flow */
     1.6 +'use strict';
     1.7 +
     1.8 +// # simple-statistics
     1.9 +//
    1.10 +// A simple, literate statistics system.
    1.11 +
    1.12 +var ss = module.exports = {};
    1.13 +
    1.14 +// Linear Regression
    1.15 +ss.linearRegression = require(21);
    1.16 +ss.linearRegressionLine = require(22);
    1.17 +ss.standardDeviation = require(54);
    1.18 +ss.rSquared = require(43);
    1.19 +ss.mode = require(32);
    1.20 +ss.modeSorted = require(33);
    1.21 +ss.min = require(29);
    1.22 +ss.max = require(23);
    1.23 +ss.minSorted = require(30);
    1.24 +ss.maxSorted = require(24);
    1.25 +ss.sum = require(56);
    1.26 +ss.sumSimple = require(58);
    1.27 +ss.product = require(39);
    1.28 +ss.quantile = require(40);
    1.29 +ss.quantileSorted = require(41);
    1.30 +ss.iqr = ss.interquartileRange = require(19);
    1.31 +ss.medianAbsoluteDeviation = ss.mad = require(27);
    1.32 +ss.chunk = require(8);
    1.33 +ss.shuffle = require(51);
    1.34 +ss.shuffleInPlace = require(52);
    1.35 +ss.sample = require(45);
    1.36 +ss.ckmeans = require(9);
    1.37 +ss.uniqueCountSorted = require(61);
    1.38 +ss.sumNthPowerDeviations = require(57);
    1.39 +ss.equalIntervalBreaks = require(14);
    1.40 +
    1.41 +// sample statistics
    1.42 +ss.sampleCovariance = require(47);
    1.43 +ss.sampleCorrelation = require(46);
    1.44 +ss.sampleVariance = require(50);
    1.45 +ss.sampleStandardDeviation = require(49);
    1.46 +ss.sampleSkewness = require(48);
    1.47 +
    1.48 +// combinatorics
    1.49 +ss.permutationsHeap = require(36);
    1.50 +ss.combinations = require(10);
    1.51 +ss.combinationsReplacement = require(11);
    1.52 +
    1.53 +// measures of centrality
    1.54 +ss.geometricMean = require(17);
    1.55 +ss.harmonicMean = require(18);
    1.56 +ss.mean = ss.average = require(25);
    1.57 +ss.median = require(26);
    1.58 +ss.medianSorted = require(28);
    1.59 +
    1.60 +ss.rootMeanSquare = ss.rms = require(44);
    1.61 +ss.variance = require(62);
    1.62 +ss.tTest = require(59);
    1.63 +ss.tTestTwoSample = require(60);
    1.64 +// ss.jenks = require('./src/jenks');
    1.65 +
    1.66 +// Classifiers
    1.67 +ss.bayesian = require(2);
    1.68 +ss.perceptron = require(35);
    1.69 +
    1.70 +// Distribution-related methods
    1.71 +ss.epsilon = require(13); // We make ε available to the test suite.
    1.72 +ss.factorial = require(16);
    1.73 +ss.bernoulliDistribution = require(3);
    1.74 +ss.binomialDistribution = require(4);
    1.75 +ss.poissonDistribution = require(37);
    1.76 +ss.chiSquaredGoodnessOfFit = require(7);
    1.77 +
    1.78 +// Normal distribution
    1.79 +ss.zScore = require(63);
    1.80 +ss.cumulativeStdNormalProbability = require(12);
    1.81 +ss.standardNormalTable = require(55);
    1.82 +ss.errorFunction = ss.erf = require(15);
    1.83 +ss.inverseErrorFunction = require(20);
    1.84 +ss.probit = require(38);
    1.85 +ss.mixin = require(31);
    1.86 +
    1.87 +// Root-finding methods
    1.88 +ss.bisect = require(5);
    1.89 +
    1.90 +},{"10":10,"11":11,"12":12,"13":13,"14":14,"15":15,"16":16,"17":17,"18":18,"19":19,"2":2,"20":20,"21":21,"22":22,"23":23,"24":24,"25":25,"26":26,"27":27,"28":28,"29":29,"3":3,"30":30,"31":31,"32":32,"33":33,"35":35,"36":36,"37":37,"38":38,"39":39,"4":4,"40":40,"41":41,"43":43,"44":44,"45":45,"46":46,"47":47,"48":48,"49":49,"5":5,"50":50,"51":51,"52":52,"54":54,"55":55,"56":56,"57":57,"58":58,"59":59,"60":60,"61":61,"62":62,"63":63,"7":7,"8":8,"9":9}],2:[function(require,module,exports){
    1.91 +'use strict';
    1.92 +/* @flow */
    1.93 +
    1.94 +/**
    1.95 + * [Bayesian Classifier](http://en.wikipedia.org/wiki/Naive_Bayes_classifier)
    1.96 + *
    1.97 + * This is a naïve bayesian classifier that takes
    1.98 + * singly-nested objects.
    1.99 + *
   1.100 + * @class
   1.101 + * @example
   1.102 + * var bayes = new BayesianClassifier();
   1.103 + * bayes.train({
   1.104 + *   species: 'Cat'
   1.105 + * }, 'animal');
   1.106 + * var result = bayes.score({
   1.107 + *   species: 'Cat'
   1.108 + * })
   1.109 + * // result
   1.110 + * // {
   1.111 + * //   animal: 1
   1.112 + * // }
   1.113 + */
   1.114 +function BayesianClassifier() {
   1.115 +    // The number of items that are currently
   1.116 +    // classified in the model
   1.117 +    this.totalCount = 0;
   1.118 +    // Every item classified in the model
   1.119 +    this.data = {};
   1.120 +}
   1.121 +
   1.122 +/**
   1.123 + * Train the classifier with a new item, which has a single
   1.124 + * dimension of Javascript literal keys and values.
   1.125 + *
   1.126 + * @param {Object} item an object with singly-deep properties
   1.127 + * @param {string} category the category this item belongs to
   1.128 + * @return {undefined} adds the item to the classifier
   1.129 + */
   1.130 +BayesianClassifier.prototype.train = function(item, category) {
   1.131 +    // If the data object doesn't have any values
   1.132 +    // for this category, create a new object for it.
   1.133 +    if (!this.data[category]) {
   1.134 +        this.data[category] = {};
   1.135 +    }
   1.136 +
   1.137 +    // Iterate through each key in the item.
   1.138 +    for (var k in item) {
   1.139 +        var v = item[k];
   1.140 +        // Initialize the nested object `data[category][k][item[k]]`
   1.141 +        // with an object of keys that equal 0.
   1.142 +        if (this.data[category][k] === undefined) {
   1.143 +            this.data[category][k] = {};
   1.144 +        }
   1.145 +        if (this.data[category][k][v] === undefined) {
   1.146 +            this.data[category][k][v] = 0;
   1.147 +        }
   1.148 +
   1.149 +        // And increment the key for this key/value combination.
   1.150 +        this.data[category][k][v]++;
   1.151 +    }
   1.152 +
   1.153 +    // Increment the number of items classified
   1.154 +    this.totalCount++;
   1.155 +};
   1.156 +
   1.157 +/**
   1.158 + * Generate a score of how well this item matches all
   1.159 + * possible categories based on its attributes
   1.160 + *
   1.161 + * @param {Object} item an item in the same format as with train
   1.162 + * @returns {Object} of probabilities that this item belongs to a
   1.163 + * given category.
   1.164 + */
   1.165 +BayesianClassifier.prototype.score = function(item) {
   1.166 +    // Initialize an empty array of odds per category.
   1.167 +    var odds = {}, category;
   1.168 +    // Iterate through each key in the item,
   1.169 +    // then iterate through each category that has been used
   1.170 +    // in previous calls to `.train()`
   1.171 +    for (var k in item) {
   1.172 +        var v = item[k];
   1.173 +        for (category in this.data) {
   1.174 +            // Create an empty object for storing key - value combinations
   1.175 +            // for this category.
   1.176 +            odds[category] = {};
   1.177 +
   1.178 +            // If this item doesn't even have a property, it counts for nothing,
   1.179 +            // but if it does have the property that we're looking for from
   1.180 +            // the item to categorize, it counts based on how popular it is
   1.181 +            // versus the whole population.
   1.182 +            if (this.data[category][k]) {
   1.183 +                odds[category][k + '_' + v] = (this.data[category][k][v] || 0) / this.totalCount;
   1.184 +            } else {
   1.185 +                odds[category][k + '_' + v] = 0;
   1.186 +            }
   1.187 +        }
   1.188 +    }
   1.189 +
   1.190 +    // Set up a new object that will contain sums of these odds by category
   1.191 +    var oddsSums = {};
   1.192 +
   1.193 +    for (category in odds) {
   1.194 +        // Tally all of the odds for each category-combination pair -
   1.195 +        // the non-existence of a category does not add anything to the
   1.196 +        // score.
   1.197 +        oddsSums[category] = 0;
   1.198 +        for (var combination in odds[category]) {
   1.199 +            oddsSums[category] += odds[category][combination];
   1.200 +        }
   1.201 +    }
   1.202 +
   1.203 +    return oddsSums;
   1.204 +};
   1.205 +
   1.206 +module.exports = BayesianClassifier;
   1.207 +
   1.208 +},{}],3:[function(require,module,exports){
   1.209 +'use strict';
   1.210 +/* @flow */
   1.211 +
   1.212 +var binomialDistribution = require(4);
   1.213 +
   1.214 +/**
   1.215 + * The [Bernoulli distribution](http://en.wikipedia.org/wiki/Bernoulli_distribution)
   1.216 + * is the probability discrete
   1.217 + * distribution of a random variable which takes value 1 with success
   1.218 + * probability `p` and value 0 with failure
   1.219 + * probability `q` = 1 - `p`. It can be used, for example, to represent the
   1.220 + * toss of a coin, where "1" is defined to mean "heads" and "0" is defined
   1.221 + * to mean "tails" (or vice versa). It is
   1.222 + * a special case of a Binomial Distribution
   1.223 + * where `n` = 1.
   1.224 + *
   1.225 + * @param {number} p input value, between 0 and 1 inclusive
   1.226 + * @returns {number} value of bernoulli distribution at this point
   1.227 + * @example
   1.228 + * bernoulliDistribution(0.5); // => { '0': 0.5, '1': 0.5 }
   1.229 + */
   1.230 +function bernoulliDistribution(p/*: number */) {
   1.231 +    // Check that `p` is a valid probability (0 ≤ p ≤ 1)
   1.232 +    if (p < 0 || p > 1 ) { return NaN; }
   1.233 +
   1.234 +    return binomialDistribution(1, p);
   1.235 +}
   1.236 +
   1.237 +module.exports = bernoulliDistribution;
   1.238 +
   1.239 +},{"4":4}],4:[function(require,module,exports){
   1.240 +'use strict';
   1.241 +/* @flow */
   1.242 +
   1.243 +var epsilon = require(13);
   1.244 +var factorial = require(16);
   1.245 +
   1.246 +/**
   1.247 + * The [Binomial Distribution](http://en.wikipedia.org/wiki/Binomial_distribution) is the discrete probability
   1.248 + * distribution of the number of successes in a sequence of n independent yes/no experiments, each of which yields
   1.249 + * success with probability `probability`. Such a success/failure experiment is also called a Bernoulli experiment or
   1.250 + * Bernoulli trial; when trials = 1, the Binomial Distribution is a Bernoulli Distribution.
   1.251 + *
   1.252 + * @param {number} trials number of trials to simulate
   1.253 + * @param {number} probability
   1.254 + * @returns {Object} output
   1.255 + */
   1.256 +function binomialDistribution(
   1.257 +    trials/*: number */,
   1.258 +    probability/*: number */)/*: ?Object */ {
   1.259 +    // Check that `p` is a valid probability (0 ≤ p ≤ 1),
   1.260 +    // that `n` is an integer, strictly positive.
   1.261 +    if (probability < 0 || probability > 1 ||
   1.262 +        trials <= 0 || trials % 1 !== 0) {
   1.263 +        return undefined;
   1.264 +    }
   1.265 +
   1.266 +    // We initialize `x`, the random variable, and `accumulator`, an accumulator
   1.267 +    // for the cumulative distribution function to 0. `distribution_functions`
   1.268 +    // is the object we'll return with the `probability_of_x` and the
   1.269 +    // `cumulativeProbability_of_x`, as well as the calculated mean &
   1.270 +    // variance. We iterate until the `cumulativeProbability_of_x` is
   1.271 +    // within `epsilon` of 1.0.
   1.272 +    var x = 0,
   1.273 +        cumulativeProbability = 0,
   1.274 +        cells = {};
   1.275 +
   1.276 +    // This algorithm iterates through each potential outcome,
   1.277 +    // until the `cumulativeProbability` is very close to 1, at
   1.278 +    // which point we've defined the vast majority of outcomes
   1.279 +    do {
   1.280 +        // a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)
   1.281 +        cells[x] = factorial(trials) /
   1.282 +            (factorial(x) * factorial(trials - x)) *
   1.283 +            (Math.pow(probability, x) * Math.pow(1 - probability, trials - x));
   1.284 +        cumulativeProbability += cells[x];
   1.285 +        x++;
   1.286 +    // when the cumulativeProbability is nearly 1, we've calculated
   1.287 +    // the useful range of this distribution
   1.288 +    } while (cumulativeProbability < 1 - epsilon);
   1.289 +
   1.290 +    return cells;
   1.291 +}
   1.292 +
   1.293 +module.exports = binomialDistribution;
   1.294 +
   1.295 +},{"13":13,"16":16}],5:[function(require,module,exports){
   1.296 +'use strict';
   1.297 +/* @flow */
   1.298 +
   1.299 +var sign = require(53);
   1.300 +/**
   1.301 + * [Bisection method](https://en.wikipedia.org/wiki/Bisection_method) is a root-finding 
   1.302 + * method that repeatedly bisects an interval to find the root.
   1.303 + * 
   1.304 + * This function returns a numerical approximation to the exact value.
   1.305 + * 
   1.306 + * @param {Function} func input function
   1.307 + * @param {Number} start - start of interval
   1.308 + * @param {Number} end - end of interval
   1.309 + * @param {Number} maxIterations - the maximum number of iterations
   1.310 + * @param {Number} errorTolerance - the error tolerance
   1.311 + * @returns {Number} estimated root value
   1.312 + * @throws {TypeError} Argument func must be a function
   1.313 + * 
   1.314 + * @example
   1.315 + * bisect(Math.cos,0,4,100,0.003); // => 1.572265625
   1.316 + */
   1.317 +function bisect(
   1.318 +    func/*: (x: any) => number */,
   1.319 +    start/*: number */,
   1.320 +    end/*: number */,
   1.321 +    maxIterations/*: number */,
   1.322 +    errorTolerance/*: number */)/*:number*/ {
   1.323 +
   1.324 +    if (typeof func !== 'function') throw new TypeError('func must be a function');
   1.325 +    
   1.326 +    for (var i = 0; i < maxIterations; i++) {
   1.327 +        var output = (start + end) / 2;
   1.328 +
   1.329 +        if (func(output) === 0 || Math.abs((end - start) / 2) < errorTolerance) {
   1.330 +            return output;
   1.331 +        }
   1.332 +
   1.333 +        if (sign(func(output)) === sign(func(start))) {
   1.334 +            start = output;
   1.335 +        } else {
   1.336 +            end = output;
   1.337 +        }
   1.338 +    }
   1.339 +    
   1.340 +    throw new Error('maximum number of iterations exceeded');
   1.341 +}
   1.342 +
   1.343 +module.exports = bisect;
   1.344 +
   1.345 +},{"53":53}],6:[function(require,module,exports){
   1.346 +'use strict';
   1.347 +/* @flow */
   1.348 +
   1.349 +/**
   1.350 + * **Percentage Points of the χ2 (Chi-Squared) Distribution**
   1.351 + *
   1.352 + * The [χ2 (Chi-Squared) Distribution](http://en.wikipedia.org/wiki/Chi-squared_distribution) is used in the common
   1.353 + * chi-squared tests for goodness of fit of an observed distribution to a theoretical one, the independence of two
   1.354 + * criteria of classification of qualitative data, and in confidence interval estimation for a population standard
   1.355 + * deviation of a normal distribution from a sample standard deviation.
   1.356 + *
   1.357 + * Values from Appendix 1, Table III of William W. Hines & Douglas C. Montgomery, "Probability and Statistics in
   1.358 + * Engineering and Management Science", Wiley (1980).
   1.359 + */
   1.360 +var chiSquaredDistributionTable = { '1':
   1.361 +   { '0.995': 0,
   1.362 +     '0.99': 0,
   1.363 +     '0.975': 0,
   1.364 +     '0.95': 0,
   1.365 +     '0.9': 0.02,
   1.366 +     '0.5': 0.45,
   1.367 +     '0.1': 2.71,
   1.368 +     '0.05': 3.84,
   1.369 +     '0.025': 5.02,
   1.370 +     '0.01': 6.63,
   1.371 +     '0.005': 7.88 },
   1.372 +  '2':
   1.373 +   { '0.995': 0.01,
   1.374 +     '0.99': 0.02,
   1.375 +     '0.975': 0.05,
   1.376 +     '0.95': 0.1,
   1.377 +     '0.9': 0.21,
   1.378 +     '0.5': 1.39,
   1.379 +     '0.1': 4.61,
   1.380 +     '0.05': 5.99,
   1.381 +     '0.025': 7.38,
   1.382 +     '0.01': 9.21,
   1.383 +     '0.005': 10.6 },
   1.384 +  '3':
   1.385 +   { '0.995': 0.07,
   1.386 +     '0.99': 0.11,
   1.387 +     '0.975': 0.22,
   1.388 +     '0.95': 0.35,
   1.389 +     '0.9': 0.58,
   1.390 +     '0.5': 2.37,
   1.391 +     '0.1': 6.25,
   1.392 +     '0.05': 7.81,
   1.393 +     '0.025': 9.35,
   1.394 +     '0.01': 11.34,
   1.395 +     '0.005': 12.84 },
   1.396 +  '4':
   1.397 +   { '0.995': 0.21,
   1.398 +     '0.99': 0.3,
   1.399 +     '0.975': 0.48,
   1.400 +     '0.95': 0.71,
   1.401 +     '0.9': 1.06,
   1.402 +     '0.5': 3.36,
   1.403 +     '0.1': 7.78,
   1.404 +     '0.05': 9.49,
   1.405 +     '0.025': 11.14,
   1.406 +     '0.01': 13.28,
   1.407 +     '0.005': 14.86 },
   1.408 +  '5':
   1.409 +   { '0.995': 0.41,
   1.410 +     '0.99': 0.55,
   1.411 +     '0.975': 0.83,
   1.412 +     '0.95': 1.15,
   1.413 +     '0.9': 1.61,
   1.414 +     '0.5': 4.35,
   1.415 +     '0.1': 9.24,
   1.416 +     '0.05': 11.07,
   1.417 +     '0.025': 12.83,
   1.418 +     '0.01': 15.09,
   1.419 +     '0.005': 16.75 },
   1.420 +  '6':
   1.421 +   { '0.995': 0.68,
   1.422 +     '0.99': 0.87,
   1.423 +     '0.975': 1.24,
   1.424 +     '0.95': 1.64,
   1.425 +     '0.9': 2.2,
   1.426 +     '0.5': 5.35,
   1.427 +     '0.1': 10.65,
   1.428 +     '0.05': 12.59,
   1.429 +     '0.025': 14.45,
   1.430 +     '0.01': 16.81,
   1.431 +     '0.005': 18.55 },
   1.432 +  '7':
   1.433 +   { '0.995': 0.99,
   1.434 +     '0.99': 1.25,
   1.435 +     '0.975': 1.69,
   1.436 +     '0.95': 2.17,
   1.437 +     '0.9': 2.83,
   1.438 +     '0.5': 6.35,
   1.439 +     '0.1': 12.02,
   1.440 +     '0.05': 14.07,
   1.441 +     '0.025': 16.01,
   1.442 +     '0.01': 18.48,
   1.443 +     '0.005': 20.28 },
   1.444 +  '8':
   1.445 +   { '0.995': 1.34,
   1.446 +     '0.99': 1.65,
   1.447 +     '0.975': 2.18,
   1.448 +     '0.95': 2.73,
   1.449 +     '0.9': 3.49,
   1.450 +     '0.5': 7.34,
   1.451 +     '0.1': 13.36,
   1.452 +     '0.05': 15.51,
   1.453 +     '0.025': 17.53,
   1.454 +     '0.01': 20.09,
   1.455 +     '0.005': 21.96 },
   1.456 +  '9':
   1.457 +   { '0.995': 1.73,
   1.458 +     '0.99': 2.09,
   1.459 +     '0.975': 2.7,
   1.460 +     '0.95': 3.33,
   1.461 +     '0.9': 4.17,
   1.462 +     '0.5': 8.34,
   1.463 +     '0.1': 14.68,
   1.464 +     '0.05': 16.92,
   1.465 +     '0.025': 19.02,
   1.466 +     '0.01': 21.67,
   1.467 +     '0.005': 23.59 },
   1.468 +  '10':
   1.469 +   { '0.995': 2.16,
   1.470 +     '0.99': 2.56,
   1.471 +     '0.975': 3.25,
   1.472 +     '0.95': 3.94,
   1.473 +     '0.9': 4.87,
   1.474 +     '0.5': 9.34,
   1.475 +     '0.1': 15.99,
   1.476 +     '0.05': 18.31,
   1.477 +     '0.025': 20.48,
   1.478 +     '0.01': 23.21,
   1.479 +     '0.005': 25.19 },
   1.480 +  '11':
   1.481 +   { '0.995': 2.6,
   1.482 +     '0.99': 3.05,
   1.483 +     '0.975': 3.82,
   1.484 +     '0.95': 4.57,
   1.485 +     '0.9': 5.58,
   1.486 +     '0.5': 10.34,
   1.487 +     '0.1': 17.28,
   1.488 +     '0.05': 19.68,
   1.489 +     '0.025': 21.92,
   1.490 +     '0.01': 24.72,
   1.491 +     '0.005': 26.76 },
   1.492 +  '12':
   1.493 +   { '0.995': 3.07,
   1.494 +     '0.99': 3.57,
   1.495 +     '0.975': 4.4,
   1.496 +     '0.95': 5.23,
   1.497 +     '0.9': 6.3,
   1.498 +     '0.5': 11.34,
   1.499 +     '0.1': 18.55,
   1.500 +     '0.05': 21.03,
   1.501 +     '0.025': 23.34,
   1.502 +     '0.01': 26.22,
   1.503 +     '0.005': 28.3 },
   1.504 +  '13':
   1.505 +   { '0.995': 3.57,
   1.506 +     '0.99': 4.11,
   1.507 +     '0.975': 5.01,
   1.508 +     '0.95': 5.89,
   1.509 +     '0.9': 7.04,
   1.510 +     '0.5': 12.34,
   1.511 +     '0.1': 19.81,
   1.512 +     '0.05': 22.36,
   1.513 +     '0.025': 24.74,
   1.514 +     '0.01': 27.69,
   1.515 +     '0.005': 29.82 },
   1.516 +  '14':
   1.517 +   { '0.995': 4.07,
   1.518 +     '0.99': 4.66,
   1.519 +     '0.975': 5.63,
   1.520 +     '0.95': 6.57,
   1.521 +     '0.9': 7.79,
   1.522 +     '0.5': 13.34,
   1.523 +     '0.1': 21.06,
   1.524 +     '0.05': 23.68,
   1.525 +     '0.025': 26.12,
   1.526 +     '0.01': 29.14,
   1.527 +     '0.005': 31.32 },
   1.528 +  '15':
   1.529 +   { '0.995': 4.6,
   1.530 +     '0.99': 5.23,
   1.531 +     '0.975': 6.27,
   1.532 +     '0.95': 7.26,
   1.533 +     '0.9': 8.55,
   1.534 +     '0.5': 14.34,
   1.535 +     '0.1': 22.31,
   1.536 +     '0.05': 25,
   1.537 +     '0.025': 27.49,
   1.538 +     '0.01': 30.58,
   1.539 +     '0.005': 32.8 },
   1.540 +  '16':
   1.541 +   { '0.995': 5.14,
   1.542 +     '0.99': 5.81,
   1.543 +     '0.975': 6.91,
   1.544 +     '0.95': 7.96,
   1.545 +     '0.9': 9.31,
   1.546 +     '0.5': 15.34,
   1.547 +     '0.1': 23.54,
   1.548 +     '0.05': 26.3,
   1.549 +     '0.025': 28.85,
   1.550 +     '0.01': 32,
   1.551 +     '0.005': 34.27 },
   1.552 +  '17':
   1.553 +   { '0.995': 5.7,
   1.554 +     '0.99': 6.41,
   1.555 +     '0.975': 7.56,
   1.556 +     '0.95': 8.67,
   1.557 +     '0.9': 10.09,
   1.558 +     '0.5': 16.34,
   1.559 +     '0.1': 24.77,
   1.560 +     '0.05': 27.59,
   1.561 +     '0.025': 30.19,
   1.562 +     '0.01': 33.41,
   1.563 +     '0.005': 35.72 },
   1.564 +  '18':
   1.565 +   { '0.995': 6.26,
   1.566 +     '0.99': 7.01,
   1.567 +     '0.975': 8.23,
   1.568 +     '0.95': 9.39,
   1.569 +     '0.9': 10.87,
   1.570 +     '0.5': 17.34,
   1.571 +     '0.1': 25.99,
   1.572 +     '0.05': 28.87,
   1.573 +     '0.025': 31.53,
   1.574 +     '0.01': 34.81,
   1.575 +     '0.005': 37.16 },
   1.576 +  '19':
   1.577 +   { '0.995': 6.84,
   1.578 +     '0.99': 7.63,
   1.579 +     '0.975': 8.91,
   1.580 +     '0.95': 10.12,
   1.581 +     '0.9': 11.65,
   1.582 +     '0.5': 18.34,
   1.583 +     '0.1': 27.2,
   1.584 +     '0.05': 30.14,
   1.585 +     '0.025': 32.85,
   1.586 +     '0.01': 36.19,
   1.587 +     '0.005': 38.58 },
   1.588 +  '20':
   1.589 +   { '0.995': 7.43,
   1.590 +     '0.99': 8.26,
   1.591 +     '0.975': 9.59,
   1.592 +     '0.95': 10.85,
   1.593 +     '0.9': 12.44,
   1.594 +     '0.5': 19.34,
   1.595 +     '0.1': 28.41,
   1.596 +     '0.05': 31.41,
   1.597 +     '0.025': 34.17,
   1.598 +     '0.01': 37.57,
   1.599 +     '0.005': 40 },
   1.600 +  '21':
   1.601 +   { '0.995': 8.03,
   1.602 +     '0.99': 8.9,
   1.603 +     '0.975': 10.28,
   1.604 +     '0.95': 11.59,
   1.605 +     '0.9': 13.24,
   1.606 +     '0.5': 20.34,
   1.607 +     '0.1': 29.62,
   1.608 +     '0.05': 32.67,
   1.609 +     '0.025': 35.48,
   1.610 +     '0.01': 38.93,
   1.611 +     '0.005': 41.4 },
   1.612 +  '22':
   1.613 +   { '0.995': 8.64,
   1.614 +     '0.99': 9.54,
   1.615 +     '0.975': 10.98,
   1.616 +     '0.95': 12.34,
   1.617 +     '0.9': 14.04,
   1.618 +     '0.5': 21.34,
   1.619 +     '0.1': 30.81,
   1.620 +     '0.05': 33.92,
   1.621 +     '0.025': 36.78,
   1.622 +     '0.01': 40.29,
   1.623 +     '0.005': 42.8 },
   1.624 +  '23':
   1.625 +   { '0.995': 9.26,
   1.626 +     '0.99': 10.2,
   1.627 +     '0.975': 11.69,
   1.628 +     '0.95': 13.09,
   1.629 +     '0.9': 14.85,
   1.630 +     '0.5': 22.34,
   1.631 +     '0.1': 32.01,
   1.632 +     '0.05': 35.17,
   1.633 +     '0.025': 38.08,
   1.634 +     '0.01': 41.64,
   1.635 +     '0.005': 44.18 },
   1.636 +  '24':
   1.637 +   { '0.995': 9.89,
   1.638 +     '0.99': 10.86,
   1.639 +     '0.975': 12.4,
   1.640 +     '0.95': 13.85,
   1.641 +     '0.9': 15.66,
   1.642 +     '0.5': 23.34,
   1.643 +     '0.1': 33.2,
   1.644 +     '0.05': 36.42,
   1.645 +     '0.025': 39.36,
   1.646 +     '0.01': 42.98,
   1.647 +     '0.005': 45.56 },
   1.648 +  '25':
   1.649 +   { '0.995': 10.52,
   1.650 +     '0.99': 11.52,
   1.651 +     '0.975': 13.12,
   1.652 +     '0.95': 14.61,
   1.653 +     '0.9': 16.47,
   1.654 +     '0.5': 24.34,
   1.655 +     '0.1': 34.28,
   1.656 +     '0.05': 37.65,
   1.657 +     '0.025': 40.65,
   1.658 +     '0.01': 44.31,
   1.659 +     '0.005': 46.93 },
   1.660 +  '26':
   1.661 +   { '0.995': 11.16,
   1.662 +     '0.99': 12.2,
   1.663 +     '0.975': 13.84,
   1.664 +     '0.95': 15.38,
   1.665 +     '0.9': 17.29,
   1.666 +     '0.5': 25.34,
   1.667 +     '0.1': 35.56,
   1.668 +     '0.05': 38.89,
   1.669 +     '0.025': 41.92,
   1.670 +     '0.01': 45.64,
   1.671 +     '0.005': 48.29 },
   1.672 +  '27':
   1.673 +   { '0.995': 11.81,
   1.674 +     '0.99': 12.88,
   1.675 +     '0.975': 14.57,
   1.676 +     '0.95': 16.15,
   1.677 +     '0.9': 18.11,
   1.678 +     '0.5': 26.34,
   1.679 +     '0.1': 36.74,
   1.680 +     '0.05': 40.11,
   1.681 +     '0.025': 43.19,
   1.682 +     '0.01': 46.96,
   1.683 +     '0.005': 49.65 },
   1.684 +  '28':
   1.685 +   { '0.995': 12.46,
   1.686 +     '0.99': 13.57,
   1.687 +     '0.975': 15.31,
   1.688 +     '0.95': 16.93,
   1.689 +     '0.9': 18.94,
   1.690 +     '0.5': 27.34,
   1.691 +     '0.1': 37.92,
   1.692 +     '0.05': 41.34,
   1.693 +     '0.025': 44.46,
   1.694 +     '0.01': 48.28,
   1.695 +     '0.005': 50.99 },
   1.696 +  '29':
   1.697 +   { '0.995': 13.12,
   1.698 +     '0.99': 14.26,
   1.699 +     '0.975': 16.05,
   1.700 +     '0.95': 17.71,
   1.701 +     '0.9': 19.77,
   1.702 +     '0.5': 28.34,
   1.703 +     '0.1': 39.09,
   1.704 +     '0.05': 42.56,
   1.705 +     '0.025': 45.72,
   1.706 +     '0.01': 49.59,
   1.707 +     '0.005': 52.34 },
   1.708 +  '30':
   1.709 +   { '0.995': 13.79,
   1.710 +     '0.99': 14.95,
   1.711 +     '0.975': 16.79,
   1.712 +     '0.95': 18.49,
   1.713 +     '0.9': 20.6,
   1.714 +     '0.5': 29.34,
   1.715 +     '0.1': 40.26,
   1.716 +     '0.05': 43.77,
   1.717 +     '0.025': 46.98,
   1.718 +     '0.01': 50.89,
   1.719 +     '0.005': 53.67 },
   1.720 +  '40':
   1.721 +   { '0.995': 20.71,
   1.722 +     '0.99': 22.16,
   1.723 +     '0.975': 24.43,
   1.724 +     '0.95': 26.51,
   1.725 +     '0.9': 29.05,
   1.726 +     '0.5': 39.34,
   1.727 +     '0.1': 51.81,
   1.728 +     '0.05': 55.76,
   1.729 +     '0.025': 59.34,
   1.730 +     '0.01': 63.69,
   1.731 +     '0.005': 66.77 },
   1.732 +  '50':
   1.733 +   { '0.995': 27.99,
   1.734 +     '0.99': 29.71,
   1.735 +     '0.975': 32.36,
   1.736 +     '0.95': 34.76,
   1.737 +     '0.9': 37.69,
   1.738 +     '0.5': 49.33,
   1.739 +     '0.1': 63.17,
   1.740 +     '0.05': 67.5,
   1.741 +     '0.025': 71.42,
   1.742 +     '0.01': 76.15,
   1.743 +     '0.005': 79.49 },
   1.744 +  '60':
   1.745 +   { '0.995': 35.53,
   1.746 +     '0.99': 37.48,
   1.747 +     '0.975': 40.48,
   1.748 +     '0.95': 43.19,
   1.749 +     '0.9': 46.46,
   1.750 +     '0.5': 59.33,
   1.751 +     '0.1': 74.4,
   1.752 +     '0.05': 79.08,
   1.753 +     '0.025': 83.3,
   1.754 +     '0.01': 88.38,
   1.755 +     '0.005': 91.95 },
   1.756 +  '70':
   1.757 +   { '0.995': 43.28,
   1.758 +     '0.99': 45.44,
   1.759 +     '0.975': 48.76,
   1.760 +     '0.95': 51.74,
   1.761 +     '0.9': 55.33,
   1.762 +     '0.5': 69.33,
   1.763 +     '0.1': 85.53,
   1.764 +     '0.05': 90.53,
   1.765 +     '0.025': 95.02,
   1.766 +     '0.01': 100.42,
   1.767 +     '0.005': 104.22 },
   1.768 +  '80':
   1.769 +   { '0.995': 51.17,
   1.770 +     '0.99': 53.54,
   1.771 +     '0.975': 57.15,
   1.772 +     '0.95': 60.39,
   1.773 +     '0.9': 64.28,
   1.774 +     '0.5': 79.33,
   1.775 +     '0.1': 96.58,
   1.776 +     '0.05': 101.88,
   1.777 +     '0.025': 106.63,
   1.778 +     '0.01': 112.33,
   1.779 +     '0.005': 116.32 },
   1.780 +  '90':
   1.781 +   { '0.995': 59.2,
   1.782 +     '0.99': 61.75,
   1.783 +     '0.975': 65.65,
   1.784 +     '0.95': 69.13,
   1.785 +     '0.9': 73.29,
   1.786 +     '0.5': 89.33,
   1.787 +     '0.1': 107.57,
   1.788 +     '0.05': 113.14,
   1.789 +     '0.025': 118.14,
   1.790 +     '0.01': 124.12,
   1.791 +     '0.005': 128.3 },
   1.792 +  '100':
   1.793 +   { '0.995': 67.33,
   1.794 +     '0.99': 70.06,
   1.795 +     '0.975': 74.22,
   1.796 +     '0.95': 77.93,
   1.797 +     '0.9': 82.36,
   1.798 +     '0.5': 99.33,
   1.799 +     '0.1': 118.5,
   1.800 +     '0.05': 124.34,
   1.801 +     '0.025': 129.56,
   1.802 +     '0.01': 135.81,
   1.803 +     '0.005': 140.17 } };
   1.804 +
   1.805 +module.exports = chiSquaredDistributionTable;
   1.806 +
   1.807 +},{}],7:[function(require,module,exports){
   1.808 +'use strict';
   1.809 +/* @flow */
   1.810 +
   1.811 +var mean = require(25);
   1.812 +var chiSquaredDistributionTable = require(6);
   1.813 +
   1.814 +/**
   1.815 + * The [χ2 (Chi-Squared) Goodness-of-Fit Test](http://en.wikipedia.org/wiki/Goodness_of_fit#Pearson.27s_chi-squared_test)
   1.816 + * uses a measure of goodness of fit which is the sum of differences between observed and expected outcome frequencies
   1.817 + * (that is, counts of observations), each squared and divided by the number of observations expected given the
   1.818 + * hypothesized distribution. The resulting χ2 statistic, `chiSquared`, can be compared to the chi-squared distribution
   1.819 + * to determine the goodness of fit. In order to determine the degrees of freedom of the chi-squared distribution, one
   1.820 + * takes the total number of observed frequencies and subtracts the number of estimated parameters. The test statistic
   1.821 + * follows, approximately, a chi-square distribution with (k − c) degrees of freedom where `k` is the number of non-empty
   1.822 + * cells and `c` is the number of estimated parameters for the distribution.
   1.823 + *
   1.824 + * @param {Array<number>} data
   1.825 + * @param {Function} distributionType a function that returns a point in a distribution:
   1.826 + * for instance, binomial, bernoulli, or poisson
   1.827 + * @param {number} significance
   1.828 + * @returns {number} chi squared goodness of fit
   1.829 + * @example
   1.830 + * // Data from Poisson goodness-of-fit example 10-19 in William W. Hines & Douglas C. Montgomery,
   1.831 + * // "Probability and Statistics in Engineering and Management Science", Wiley (1980).
   1.832 + * var data1019 = [
   1.833 + *     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1.834 + *     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1.835 + *     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
   1.836 + *     2, 2, 2, 2, 2, 2, 2, 2, 2,
   1.837 + *     3, 3, 3, 3
   1.838 + * ];
   1.839 + * ss.chiSquaredGoodnessOfFit(data1019, ss.poissonDistribution, 0.05)); //= false
   1.840 + */
   1.841 +function chiSquaredGoodnessOfFit(
   1.842 +    data/*: Array<number> */,
   1.843 +    distributionType/*: Function */,
   1.844 +    significance/*: number */)/*: boolean */ {
   1.845 +    // Estimate from the sample data, a weighted mean.
   1.846 +    var inputMean = mean(data),
   1.847 +        // Calculated value of the χ2 statistic.
   1.848 +        chiSquared = 0,
   1.849 +        // Degrees of freedom, calculated as (number of class intervals -
   1.850 +        // number of hypothesized distribution parameters estimated - 1)
   1.851 +        degreesOfFreedom,
   1.852 +        // Number of hypothesized distribution parameters estimated, expected to be supplied in the distribution test.
   1.853 +        // Lose one degree of freedom for estimating `lambda` from the sample data.
   1.854 +        c = 1,
   1.855 +        // The hypothesized distribution.
   1.856 +        // Generate the hypothesized distribution.
   1.857 +        hypothesizedDistribution = distributionType(inputMean),
   1.858 +        observedFrequencies = [],
   1.859 +        expectedFrequencies = [],
   1.860 +        k;
   1.861 +
   1.862 +    // Create an array holding a histogram from the sample data, of
   1.863 +    // the form `{ value: numberOfOcurrences }`
   1.864 +    for (var i = 0; i < data.length; i++) {
   1.865 +        if (observedFrequencies[data[i]] === undefined) {
   1.866 +            observedFrequencies[data[i]] = 0;
   1.867 +        }
   1.868 +        observedFrequencies[data[i]]++;
   1.869 +    }
   1.870 +
   1.871 +    // The histogram we created might be sparse - there might be gaps
   1.872 +    // between values. So we iterate through the histogram, making
   1.873 +    // sure that instead of undefined, gaps have 0 values.
   1.874 +    for (i = 0; i < observedFrequencies.length; i++) {
   1.875 +        if (observedFrequencies[i] === undefined) {
   1.876 +            observedFrequencies[i] = 0;
   1.877 +        }
   1.878 +    }
   1.879 +
   1.880 +    // Create an array holding a histogram of expected data given the
   1.881 +    // sample size and hypothesized distribution.
   1.882 +    for (k in hypothesizedDistribution) {
   1.883 +        if (k in observedFrequencies) {
   1.884 +            expectedFrequencies[+k] = hypothesizedDistribution[k] * data.length;
   1.885 +        }
   1.886 +    }
   1.887 +
   1.888 +    // Working backward through the expected frequencies, collapse classes
   1.889 +    // if less than three observations are expected for a class.
   1.890 +    // This transformation is applied to the observed frequencies as well.
   1.891 +    for (k = expectedFrequencies.length - 1; k >= 0; k--) {
   1.892 +        if (expectedFrequencies[k] < 3) {
   1.893 +            expectedFrequencies[k - 1] += expectedFrequencies[k];
   1.894 +            expectedFrequencies.pop();
   1.895 +
   1.896 +            observedFrequencies[k - 1] += observedFrequencies[k];
   1.897 +            observedFrequencies.pop();
   1.898 +        }
   1.899 +    }
   1.900 +
   1.901 +    // Iterate through the squared differences between observed & expected
   1.902 +    // frequencies, accumulating the `chiSquared` statistic.
   1.903 +    for (k = 0; k < observedFrequencies.length; k++) {
   1.904 +        chiSquared += Math.pow(
   1.905 +            observedFrequencies[k] - expectedFrequencies[k], 2) /
   1.906 +            expectedFrequencies[k];
   1.907 +    }
   1.908 +
   1.909 +    // Calculate degrees of freedom for this test and look it up in the
   1.910 +    // `chiSquaredDistributionTable` in order to
   1.911 +    // accept or reject the goodness-of-fit of the hypothesized distribution.
   1.912 +    degreesOfFreedom = observedFrequencies.length - c - 1;
   1.913 +    return chiSquaredDistributionTable[degreesOfFreedom][significance] < chiSquared;
   1.914 +}
   1.915 +
   1.916 +module.exports = chiSquaredGoodnessOfFit;
   1.917 +
   1.918 +},{"25":25,"6":6}],8:[function(require,module,exports){
   1.919 +'use strict';
   1.920 +/* @flow */
   1.921 +
   1.922 +/**
   1.923 + * Split an array into chunks of a specified size. This function
   1.924 + * has the same behavior as [PHP's array_chunk](http://php.net/manual/en/function.array-chunk.php)
   1.925 + * function, and thus will insert smaller-sized chunks at the end if
   1.926 + * the input size is not divisible by the chunk size.
   1.927 + *
   1.928 + * `sample` is expected to be an array, and `chunkSize` a number.
   1.929 + * The `sample` array can contain any kind of data.
   1.930 + *
   1.931 + * @param {Array} sample any array of values
   1.932 + * @param {number} chunkSize size of each output array
   1.933 + * @returns {Array<Array>} a chunked array
   1.934 + * @example
   1.935 + * chunk([1, 2, 3, 4, 5, 6], 2);
   1.936 + * // => [[1, 2], [3, 4], [5, 6]]
   1.937 + */
   1.938 +function chunk(sample/*:Array<any>*/, chunkSize/*:number*/)/*:?Array<Array<any>>*/ {
   1.939 +
   1.940 +    // a list of result chunks, as arrays in an array
   1.941 +    var output = [];
   1.942 +
   1.943 +    // `chunkSize` must be zero or higher - otherwise the loop below,
   1.944 +    // in which we call `start += chunkSize`, will loop infinitely.
   1.945 +    // So, we'll detect and throw in that case to indicate
   1.946 +    // invalid input.
   1.947 +    if (chunkSize <= 0) {
   1.948 +        throw new Error('chunk size must be a positive integer');
   1.949 +    }
   1.950 +
   1.951 +    // `start` is the index at which `.slice` will start selecting
   1.952 +    // new array elements
   1.953 +    for (var start = 0; start < sample.length; start += chunkSize) {
   1.954 +
   1.955 +        // for each chunk, slice that part of the array and add it
   1.956 +        // to the output. The `.slice` function does not change
   1.957 +        // the original array.
   1.958 +        output.push(sample.slice(start, start + chunkSize));
   1.959 +    }
   1.960 +    return output;
   1.961 +}
   1.962 +
   1.963 +module.exports = chunk;
   1.964 +
   1.965 +},{}],9:[function(require,module,exports){
   1.966 +'use strict';
   1.967 +/* @flow */
   1.968 +
   1.969 +var uniqueCountSorted = require(61),
   1.970 +    numericSort = require(34);
   1.971 +
   1.972 +/**
   1.973 + * Create a new column x row matrix.
   1.974 + *
   1.975 + * @private
   1.976 + * @param {number} columns
   1.977 + * @param {number} rows
   1.978 + * @return {Array<Array<number>>} matrix
   1.979 + * @example
   1.980 + * makeMatrix(10, 10);
   1.981 + */
   1.982 +function makeMatrix(columns, rows) {
   1.983 +    var matrix = [];
   1.984 +    for (var i = 0; i < columns; i++) {
   1.985 +        var column = [];
   1.986 +        for (var j = 0; j < rows; j++) {
   1.987 +            column.push(0);
   1.988 +        }
   1.989 +        matrix.push(column);
   1.990 +    }
   1.991 +    return matrix;
   1.992 +}
   1.993 +
   1.994 +/**
   1.995 + * Generates incrementally computed values based on the sums and sums of
   1.996 + * squares for the data array
   1.997 + *
   1.998 + * @private
   1.999 + * @param {number} j
  1.1000 + * @param {number} i
  1.1001 + * @param {Array<number>} sums
  1.1002 + * @param {Array<number>} sumsOfSquares
  1.1003 + * @return {number}
  1.1004 + * @example
  1.1005 + * ssq(0, 1, [-1, 0, 2], [1, 1, 5]);
  1.1006 + */
  1.1007 +function ssq(j, i, sums, sumsOfSquares) {
  1.1008 +    var sji; // s(j, i)
  1.1009 +    if (j > 0) {
  1.1010 +        var muji = (sums[i] - sums[j - 1]) / (i - j + 1); // mu(j, i)
  1.1011 +        sji = sumsOfSquares[i] - sumsOfSquares[j - 1] - (i - j + 1) * muji * muji;
  1.1012 +    } else {
  1.1013 +        sji = sumsOfSquares[i] - sums[i] * sums[i] / (i + 1);
  1.1014 +    }
  1.1015 +    if (sji < 0) {
  1.1016 +        return 0;
  1.1017 +    }
  1.1018 +    return sji;
  1.1019 +}
  1.1020 +
  1.1021 +/**
  1.1022 + * Function that recursively divides and conquers computations
  1.1023 + * for cluster j
  1.1024 + *
  1.1025 + * @private
  1.1026 + * @param {number} iMin Minimum index in cluster to be computed
  1.1027 + * @param {number} iMax Maximum index in cluster to be computed
  1.1028 + * @param {number} cluster Index of the cluster currently being computed
  1.1029 + * @param {Array<Array<number>>} matrix
  1.1030 + * @param {Array<Array<number>>} backtrackMatrix
  1.1031 + * @param {Array<number>} sums
  1.1032 + * @param {Array<number>} sumsOfSquares
  1.1033 + */
  1.1034 +function fillMatrixColumn(iMin, iMax, cluster, matrix, backtrackMatrix, sums, sumsOfSquares) {
  1.1035 +    if (iMin > iMax) {
  1.1036 +        return;
  1.1037 +    }
  1.1038 +
  1.1039 +    // Start at midpoint between iMin and iMax
  1.1040 +    var i = Math.floor((iMin + iMax) / 2);
  1.1041 +
  1.1042 +    matrix[cluster][i] = matrix[cluster - 1][i - 1];
  1.1043 +    backtrackMatrix[cluster][i] = i;
  1.1044 +
  1.1045 +    var jlow = cluster; // the lower end for j
  1.1046 +
  1.1047 +    if (iMin > cluster) {
  1.1048 +        jlow = Math.max(jlow, backtrackMatrix[cluster][iMin - 1] || 0);
  1.1049 +    }
  1.1050 +    jlow = Math.max(jlow, backtrackMatrix[cluster - 1][i] || 0);
  1.1051 +
  1.1052 +    var jhigh = i - 1; // the upper end for j
  1.1053 +    if (iMax < matrix.length - 1) {
  1.1054 +        jhigh = Math.min(jhigh, backtrackMatrix[cluster][iMax + 1] || 0);
  1.1055 +    }
  1.1056 +
  1.1057 +    var sji;
  1.1058 +    var sjlowi;
  1.1059 +    var ssqjlow;
  1.1060 +    var ssqj;
  1.1061 +    for (var j = jhigh; j >= jlow; --j) {
  1.1062 +        sji = ssq(j, i, sums, sumsOfSquares);
  1.1063 +
  1.1064 +        if (sji + matrix[cluster - 1][jlow - 1] >= matrix[cluster][i]) {
  1.1065 +            break;
  1.1066 +        }
  1.1067 +
  1.1068 +        // Examine the lower bound of the cluster border
  1.1069 +        sjlowi = ssq(jlow, i, sums, sumsOfSquares);
  1.1070 +
  1.1071 +        ssqjlow = sjlowi + matrix[cluster - 1][jlow - 1];
  1.1072 +
  1.1073 +        if (ssqjlow < matrix[cluster][i]) {
  1.1074 +            // Shrink the lower bound
  1.1075 +            matrix[cluster][i] = ssqjlow;
  1.1076 +            backtrackMatrix[cluster][i] = jlow;
  1.1077 +        }
  1.1078 +        jlow++;
  1.1079 +
  1.1080 +        ssqj = sji + matrix[cluster - 1][j - 1];
  1.1081 +        if (ssqj < matrix[cluster][i]) {
  1.1082 +            matrix[cluster][i] = ssqj;
  1.1083 +            backtrackMatrix[cluster][i] = j;
  1.1084 +        }
  1.1085 +    }
  1.1086 +
  1.1087 +    fillMatrixColumn(iMin, i - 1, cluster, matrix, backtrackMatrix, sums, sumsOfSquares);
  1.1088 +    fillMatrixColumn(i + 1, iMax, cluster, matrix, backtrackMatrix, sums, sumsOfSquares);
  1.1089 +}
  1.1090 +
  1.1091 +/**
  1.1092 + * Initializes the main matrices used in Ckmeans and kicks
  1.1093 + * off the divide and conquer cluster computation strategy
  1.1094 + *
  1.1095 + * @private
  1.1096 + * @param {Array<number>} data sorted array of values
  1.1097 + * @param {Array<Array<number>>} matrix
  1.1098 + * @param {Array<Array<number>>} backtrackMatrix
  1.1099 + */
  1.1100 +function fillMatrices(data, matrix, backtrackMatrix) {
  1.1101 +    var nValues = matrix[0].length;
  1.1102 +
  1.1103 +    // Shift values by the median to improve numeric stability
  1.1104 +    var shift = data[Math.floor(nValues / 2)];
  1.1105 +
  1.1106 +    // Cumulative sum and cumulative sum of squares for all values in data array
  1.1107 +    var sums = [];
  1.1108 +    var sumsOfSquares = [];
  1.1109 +
  1.1110 +    // Initialize first column in matrix & backtrackMatrix
  1.1111 +    for (var i = 0, shiftedValue; i < nValues; ++i) {
  1.1112 +        shiftedValue = data[i] - shift;
  1.1113 +        if (i === 0) {
  1.1114 +            sums.push(shiftedValue);
  1.1115 +            sumsOfSquares.push(shiftedValue * shiftedValue);
  1.1116 +        } else {
  1.1117 +            sums.push(sums[i - 1] + shiftedValue);
  1.1118 +            sumsOfSquares.push(sumsOfSquares[i - 1] + shiftedValue * shiftedValue);
  1.1119 +        }
  1.1120 +
  1.1121 +        // Initialize for cluster = 0
  1.1122 +        matrix[0][i] = ssq(0, i, sums, sumsOfSquares);
  1.1123 +        backtrackMatrix[0][i] = 0;
  1.1124 +    }
  1.1125 +
  1.1126 +    // Initialize the rest of the columns
  1.1127 +    var iMin;
  1.1128 +    for (var cluster = 1; cluster < matrix.length; ++cluster) {
  1.1129 +        if (cluster < matrix.length - 1) {
  1.1130 +            iMin = cluster;
  1.1131 +        } else {
  1.1132 +            // No need to compute matrix[K-1][0] ... matrix[K-1][N-2]
  1.1133 +            iMin = nValues - 1;
  1.1134 +        }
  1.1135 +
  1.1136 +        fillMatrixColumn(iMin, nValues - 1, cluster, matrix, backtrackMatrix, sums, sumsOfSquares);
  1.1137 +    }
  1.1138 +}
  1.1139 +
  1.1140 +/**
  1.1141 + * Ckmeans clustering is an improvement on heuristic-based clustering
  1.1142 + * approaches like Jenks. The algorithm was developed in
  1.1143 + * [Haizhou Wang and Mingzhou Song](http://journal.r-project.org/archive/2011-2/RJournal_2011-2_Wang+Song.pdf)
  1.1144 + * as a [dynamic programming](https://en.wikipedia.org/wiki/Dynamic_programming) approach
  1.1145 + * to the problem of clustering numeric data into groups with the least
  1.1146 + * within-group sum-of-squared-deviations.
  1.1147 + *
  1.1148 + * Minimizing the difference within groups - what Wang & Song refer to as
  1.1149 + * `withinss`, or within sum-of-squares, means that groups are optimally
  1.1150 + * homogenous within and the data is split into representative groups.
  1.1151 + * This is very useful for visualization, where you may want to represent
  1.1152 + * a continuous variable in discrete color or style groups. This function
  1.1153 + * can provide groups that emphasize differences between data.
  1.1154 + *
  1.1155 + * Being a dynamic approach, this algorithm is based on two matrices that
  1.1156 + * store incrementally-computed values for squared deviations and backtracking
  1.1157 + * indexes.
  1.1158 + *
  1.1159 + * This implementation is based on Ckmeans 3.4.6, which introduced a new divide
  1.1160 + * and conquer approach that improved runtime from O(kn^2) to O(kn log(n)).
  1.1161 + *
  1.1162 + * Unlike the [original implementation](https://cran.r-project.org/web/packages/Ckmeans.1d.dp/index.html),
  1.1163 + * this implementation does not include any code to automatically determine
  1.1164 + * the optimal number of clusters: this information needs to be explicitly
  1.1165 + * provided.
  1.1166 + *
  1.1167 + * ### References
  1.1168 + * _Ckmeans.1d.dp: Optimal k-means Clustering in One Dimension by Dynamic
  1.1169 + * Programming_ Haizhou Wang and Mingzhou Song ISSN 2073-4859
  1.1170 + *
  1.1171 + * from The R Journal Vol. 3/2, December 2011
  1.1172 + * @param {Array<number>} data input data, as an array of number values
  1.1173 + * @param {number} nClusters number of desired classes. This cannot be
  1.1174 + * greater than the number of values in the data array.
  1.1175 + * @returns {Array<Array<number>>} clustered input
  1.1176 + * @example
  1.1177 + * ckmeans([-1, 2, -1, 2, 4, 5, 6, -1, 2, -1], 3);
  1.1178 + * // The input, clustered into groups of similar numbers.
  1.1179 + * //= [[-1, -1, -1, -1], [2, 2, 2], [4, 5, 6]]);
  1.1180 + */
  1.1181 +function ckmeans(data/*: Array<number> */, nClusters/*: number */)/*: Array<Array<number>> */ {
  1.1182 +
  1.1183 +    if (nClusters > data.length) {
  1.1184 +        throw new Error('Cannot generate more classes than there are data values');
  1.1185 +    }
  1.1186 +
  1.1187 +    var sorted = numericSort(data),
  1.1188 +        // we'll use this as the maximum number of clusters
  1.1189 +        uniqueCount = uniqueCountSorted(sorted);
  1.1190 +
  1.1191 +    // if all of the input values are identical, there's one cluster
  1.1192 +    // with all of the input in it.
  1.1193 +    if (uniqueCount === 1) {
  1.1194 +        return [sorted];
  1.1195 +    }
  1.1196 +
  1.1197 +    // named 'S' originally
  1.1198 +    var matrix = makeMatrix(nClusters, sorted.length),
  1.1199 +        // named 'J' originally
  1.1200 +        backtrackMatrix = makeMatrix(nClusters, sorted.length);
  1.1201 +
  1.1202 +    // This is a dynamic programming way to solve the problem of minimizing
  1.1203 +    // within-cluster sum of squares. It's similar to linear regression
  1.1204 +    // in this way, and this calculation incrementally computes the
  1.1205 +    // sum of squares that are later read.
  1.1206 +    fillMatrices(sorted, matrix, backtrackMatrix);
  1.1207 +
  1.1208 +    // The real work of Ckmeans clustering happens in the matrix generation:
  1.1209 +    // the generated matrices encode all possible clustering combinations, and
  1.1210 +    // once they're generated we can solve for the best clustering groups
  1.1211 +    // very quickly.
  1.1212 +    var clusters = [],
  1.1213 +        clusterRight = backtrackMatrix[0].length - 1;
  1.1214 +
  1.1215 +    // Backtrack the clusters from the dynamic programming matrix. This
  1.1216 +    // starts at the bottom-right corner of the matrix (if the top-left is 0, 0),
  1.1217 +    // and moves the cluster target with the loop.
  1.1218 +    for (var cluster = backtrackMatrix.length - 1; cluster >= 0; cluster--) {
  1.1219 +
  1.1220 +        var clusterLeft = backtrackMatrix[cluster][clusterRight];
  1.1221 +
  1.1222 +        // fill the cluster from the sorted input by taking a slice of the
  1.1223 +        // array. the backtrack matrix makes this easy - it stores the
  1.1224 +        // indexes where the cluster should start and end.
  1.1225 +        clusters[cluster] = sorted.slice(clusterLeft, clusterRight + 1);
  1.1226 +
  1.1227 +        if (cluster > 0) {
  1.1228 +            clusterRight = clusterLeft - 1;
  1.1229 +        }
  1.1230 +    }
  1.1231 +
  1.1232 +    return clusters;
  1.1233 +}
  1.1234 +
  1.1235 +module.exports = ckmeans;
  1.1236 +
  1.1237 +},{"34":34,"61":61}],10:[function(require,module,exports){
  1.1238 +/* @flow */
  1.1239 +'use strict';
  1.1240 +/**
  1.1241 + * Implementation of Combinations
  1.1242 + * Combinations are unique subsets of a collection - in this case, k elements from a collection at a time.
  1.1243 + * https://en.wikipedia.org/wiki/Combination
  1.1244 + * @param {Array} elements any type of data
  1.1245 + * @param {int} k the number of objects in each group (without replacement)
  1.1246 + * @returns {Array<Array>} array of permutations
  1.1247 + * @example
  1.1248 + * combinations([1, 2, 3], 2); // => [[1,2], [1,3], [2,3]]
  1.1249 + */
  1.1250 +
  1.1251 +function combinations(elements /*: Array<any> */, k/*: number */) {
  1.1252 +    var i;
  1.1253 +    var subI;
  1.1254 +    var combinationList = [];
  1.1255 +    var subsetCombinations;
  1.1256 +    var next;
  1.1257 +
  1.1258 +    for (i = 0; i < elements.length; i++) {
  1.1259 +        if (k === 1) {
  1.1260 +            combinationList.push([elements[i]])
  1.1261 +        } else {
  1.1262 +            subsetCombinations = combinations(elements.slice( i + 1, elements.length ), k - 1);
  1.1263 +            for (subI = 0; subI < subsetCombinations.length; subI++) {
  1.1264 +                next = subsetCombinations[subI];
  1.1265 +                next.unshift(elements[i]);
  1.1266 +                combinationList.push(next);
  1.1267 +            }
  1.1268 +        }
  1.1269 +    }
  1.1270 +    return combinationList;
  1.1271 +}
  1.1272 +
  1.1273 +module.exports = combinations;
  1.1274 +
  1.1275 +},{}],11:[function(require,module,exports){
  1.1276 +/* @flow */
  1.1277 +'use strict';
  1.1278 +
  1.1279 +/**
  1.1280 + * Implementation of [Combinations](https://en.wikipedia.org/wiki/Combination) with replacement
  1.1281 + * Combinations are unique subsets of a collection - in this case, k elements from a collection at a time.
  1.1282 + * 'With replacement' means that a given element can be chosen multiple times.
  1.1283 + * Unlike permutation, order doesn't matter for combinations.
  1.1284 + * 
  1.1285 + * @param {Array} elements any type of data
  1.1286 + * @param {int} k the number of objects in each group (without replacement)
  1.1287 + * @returns {Array<Array>} array of permutations
  1.1288 + * @example
  1.1289 + * combinationsReplacement([1, 2], 2); // => [[1, 1], [1, 2], [2, 2]]
  1.1290 + */
  1.1291 +function combinationsReplacement(
  1.1292 +    elements /*: Array<any> */,
  1.1293 +    k /*: number */) {
  1.1294 +
  1.1295 +    var combinationList = [];
  1.1296 +
  1.1297 +    for (var i = 0; i < elements.length; i++) {
  1.1298 +        if (k === 1) {
  1.1299 +            // If we're requested to find only one element, we don't need
  1.1300 +            // to recurse: just push `elements[i]` onto the list of combinations.
  1.1301 +            combinationList.push([elements[i]])
  1.1302 +        } else {
  1.1303 +            // Otherwise, recursively find combinations, given `k - 1`. Note that
  1.1304 +            // we request `k - 1`, so if you were looking for k=3 combinations, we're
  1.1305 +            // requesting k=2. This -1 gets reversed in the for loop right after this
  1.1306 +            // code, since we concatenate `elements[i]` onto the selected combinations,
  1.1307 +            // bringing `k` back up to your requested level.
  1.1308 +            // This recursion may go many levels deep, since it only stops once
  1.1309 +            // k=1.
  1.1310 +            var subsetCombinations = combinationsReplacement(
  1.1311 +                elements.slice(i, elements.length),
  1.1312 +                k - 1);
  1.1313 +
  1.1314 +            for (var j = 0; j < subsetCombinations.length; j++) {
  1.1315 +                combinationList.push([elements[i]]
  1.1316 +                    .concat(subsetCombinations[j]));
  1.1317 +            }
  1.1318 +        }
  1.1319 +    }
  1.1320 +
  1.1321 +    return combinationList;
  1.1322 +}
  1.1323 +
  1.1324 +module.exports = combinationsReplacement;
  1.1325 +
  1.1326 +},{}],12:[function(require,module,exports){
  1.1327 +'use strict';
  1.1328 +/* @flow */
  1.1329 +
  1.1330 +var standardNormalTable = require(55);
  1.1331 +
  1.1332 +/**
  1.1333 + * **[Cumulative Standard Normal Probability](http://en.wikipedia.org/wiki/Standard_normal_table)**
  1.1334 + *
  1.1335 + * Since probability tables cannot be
  1.1336 + * printed for every normal distribution, as there are an infinite variety
  1.1337 + * of normal distributions, it is common practice to convert a normal to a
  1.1338 + * standard normal and then use the standard normal table to find probabilities.
  1.1339 + *
  1.1340 + * You can use `.5 + .5 * errorFunction(x / Math.sqrt(2))` to calculate the probability
  1.1341 + * instead of looking it up in a table.
  1.1342 + *
  1.1343 + * @param {number} z
  1.1344 + * @returns {number} cumulative standard normal probability
  1.1345 + */
  1.1346 +function cumulativeStdNormalProbability(z /*:number */)/*:number */ {
  1.1347 +
  1.1348 +    // Calculate the position of this value.
  1.1349 +    var absZ = Math.abs(z),
  1.1350 +        // Each row begins with a different
  1.1351 +        // significant digit: 0.5, 0.6, 0.7, and so on. Each value in the table
  1.1352 +        // corresponds to a range of 0.01 in the input values, so the value is
  1.1353 +        // multiplied by 100.
  1.1354 +        index = Math.min(Math.round(absZ * 100), standardNormalTable.length - 1);
  1.1355 +
  1.1356 +    // The index we calculate must be in the table as a positive value,
  1.1357 +    // but we still pay attention to whether the input is positive
  1.1358 +    // or negative, and flip the output value as a last step.
  1.1359 +    if (z >= 0) {
  1.1360 +        return standardNormalTable[index];
  1.1361 +    } else {
  1.1362 +        // due to floating-point arithmetic, values in the table with
  1.1363 +        // 4 significant figures can nevertheless end up as repeating
  1.1364 +        // fractions when they're computed here.
  1.1365 +        return +(1 - standardNormalTable[index]).toFixed(4);
  1.1366 +    }
  1.1367 +}
  1.1368 +
  1.1369 +module.exports = cumulativeStdNormalProbability;
  1.1370 +
  1.1371 +},{"55":55}],13:[function(require,module,exports){
  1.1372 +'use strict';
  1.1373 +/* @flow */
  1.1374 +
  1.1375 +/**
  1.1376 + * We use `ε`, epsilon, as a stopping criterion when we want to iterate
  1.1377 + * until we're "close enough". Epsilon is a very small number: for
  1.1378 + * simple statistics, that number is **0.0001**
  1.1379 + *
  1.1380 + * This is used in calculations like the binomialDistribution, in which
  1.1381 + * the process of finding a value is [iterative](https://en.wikipedia.org/wiki/Iterative_method):
  1.1382 + * it progresses until it is close enough.
  1.1383 + *
  1.1384 + * Below is an example of using epsilon in [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent),
  1.1385 + * where we're trying to find a local minimum of a function's derivative,
  1.1386 + * given by the `fDerivative` method.
  1.1387 + *
  1.1388 + * @example
  1.1389 + * // From calculation, we expect that the local minimum occurs at x=9/4
  1.1390 + * var x_old = 0;
  1.1391 + * // The algorithm starts at x=6
  1.1392 + * var x_new = 6;
  1.1393 + * var stepSize = 0.01;
  1.1394 + *
  1.1395 + * function fDerivative(x) {
  1.1396 + *   return 4 * Math.pow(x, 3) - 9 * Math.pow(x, 2);
  1.1397 + * }
  1.1398 + *
  1.1399 + * // The loop runs until the difference between the previous
  1.1400 + * // value and the current value is smaller than epsilon - a rough
  1.1401 + * // meaure of 'close enough'
  1.1402 + * while (Math.abs(x_new - x_old) > ss.epsilon) {
  1.1403 + *   x_old = x_new;
  1.1404 + *   x_new = x_old - stepSize * fDerivative(x_old);
  1.1405 + * }
  1.1406 + *
  1.1407 + * console.log('Local minimum occurs at', x_new);
  1.1408 + */
  1.1409 +var epsilon = 0.0001;
  1.1410 +
  1.1411 +module.exports = epsilon;
  1.1412 +
  1.1413 +},{}],14:[function(require,module,exports){
  1.1414 +'use strict';
  1.1415 +/* @flow */
  1.1416 +
  1.1417 +var max = require(23),
  1.1418 +    min = require(29);
  1.1419 +
  1.1420 +/**
  1.1421 + * Given an array of data, this will find the extent of the
  1.1422 + * data and return an array of breaks that can be used
  1.1423 + * to categorize the data into a number of classes. The
  1.1424 + * returned array will always be 1 longer than the number of
  1.1425 + * classes because it includes the minimum value.
  1.1426 + *
  1.1427 + * @param {Array<number>} data input data, as an array of number values
  1.1428 + * @param {number} nClasses number of desired classes
  1.1429 + * @returns {Array<number>} array of class break positions
  1.1430 + * @example
  1.1431 + * equalIntervalBreaks([1, 2, 3, 4, 5, 6], 4); //= [1, 2.25, 3.5, 4.75, 6]
  1.1432 + */
  1.1433 +function equalIntervalBreaks(data/*: Array<number> */, nClasses/*:number*/)/*: Array<number> */ {
  1.1434 +
  1.1435 +    if (data.length <= 1) {
  1.1436 +        return data;
  1.1437 +    }
  1.1438 +
  1.1439 +    var theMin = min(data),
  1.1440 +        theMax = max(data); 
  1.1441 +
  1.1442 +    // the first break will always be the minimum value
  1.1443 +    // in the dataset
  1.1444 +    var breaks = [theMin];
  1.1445 +
  1.1446 +    // The size of each break is the full range of the data
  1.1447 +    // divided by the number of classes requested
  1.1448 +    var breakSize = (theMax - theMin) / nClasses;
  1.1449 +
  1.1450 +    // In the case of nClasses = 1, this loop won't run
  1.1451 +    // and the returned breaks will be [min, max]
  1.1452 +    for (var i = 1; i < nClasses; i++) {
  1.1453 +        breaks.push(breaks[0] + breakSize * i);
  1.1454 +    }
  1.1455 +
  1.1456 +    // the last break will always be the
  1.1457 +    // maximum.
  1.1458 +    breaks.push(theMax);
  1.1459 +
  1.1460 +    return breaks;
  1.1461 +}
  1.1462 +
  1.1463 +module.exports = equalIntervalBreaks;
  1.1464 +
  1.1465 +},{"23":23,"29":29}],15:[function(require,module,exports){
  1.1466 +'use strict';
  1.1467 +/* @flow */
  1.1468 +
  1.1469 +/**
  1.1470 + * **[Gaussian error function](http://en.wikipedia.org/wiki/Error_function)**
  1.1471 + *
  1.1472 + * The `errorFunction(x/(sd * Math.sqrt(2)))` is the probability that a value in a
  1.1473 + * normal distribution with standard deviation sd is within x of the mean.
  1.1474 + *
  1.1475 + * This function returns a numerical approximation to the exact value.
  1.1476 + *
  1.1477 + * @param {number} x input
  1.1478 + * @return {number} error estimation
  1.1479 + * @example
  1.1480 + * errorFunction(1).toFixed(2); // => '0.84'
  1.1481 + */
  1.1482 +function errorFunction(x/*: number */)/*: number */ {
  1.1483 +    var t = 1 / (1 + 0.5 * Math.abs(x));
  1.1484 +    var tau = t * Math.exp(-Math.pow(x, 2) -
  1.1485 +        1.26551223 +
  1.1486 +        1.00002368 * t +
  1.1487 +        0.37409196 * Math.pow(t, 2) +
  1.1488 +        0.09678418 * Math.pow(t, 3) -
  1.1489 +        0.18628806 * Math.pow(t, 4) +
  1.1490 +        0.27886807 * Math.pow(t, 5) -
  1.1491 +        1.13520398 * Math.pow(t, 6) +
  1.1492 +        1.48851587 * Math.pow(t, 7) -
  1.1493 +        0.82215223 * Math.pow(t, 8) +
  1.1494 +        0.17087277 * Math.pow(t, 9));
  1.1495 +    if (x >= 0) {
  1.1496 +        return 1 - tau;
  1.1497 +    } else {
  1.1498 +        return tau - 1;
  1.1499 +    }
  1.1500 +}
  1.1501 +
  1.1502 +module.exports = errorFunction;
  1.1503 +
  1.1504 +},{}],16:[function(require,module,exports){
  1.1505 +'use strict';
  1.1506 +/* @flow */
  1.1507 +
  1.1508 +/**
  1.1509 + * A [Factorial](https://en.wikipedia.org/wiki/Factorial), usually written n!, is the product of all positive
  1.1510 + * integers less than or equal to n. Often factorial is implemented
  1.1511 + * recursively, but this iterative approach is significantly faster
  1.1512 + * and simpler.
  1.1513 + *
  1.1514 + * @param {number} n input
  1.1515 + * @returns {number} factorial: n!
  1.1516 + * @example
  1.1517 + * factorial(5); // => 120
  1.1518 + */
  1.1519 +function factorial(n /*: number */)/*: number */ {
  1.1520 +
  1.1521 +    // factorial is mathematically undefined for negative numbers
  1.1522 +    if (n < 0) { return NaN; }
  1.1523 +
  1.1524 +    // typically you'll expand the factorial function going down, like
  1.1525 +    // 5! = 5 * 4 * 3 * 2 * 1. This is going in the opposite direction,
  1.1526 +    // counting from 2 up to the number in question, and since anything
  1.1527 +    // multiplied by 1 is itself, the loop only needs to start at 2.
  1.1528 +    var accumulator = 1;
  1.1529 +    for (var i = 2; i <= n; i++) {
  1.1530 +        // for each number up to and including the number `n`, multiply
  1.1531 +        // the accumulator my that number.
  1.1532 +        accumulator *= i;
  1.1533 +    }
  1.1534 +    return accumulator;
  1.1535 +}
  1.1536 +
  1.1537 +module.exports = factorial;
  1.1538 +
  1.1539 +},{}],17:[function(require,module,exports){
  1.1540 +'use strict';
  1.1541 +/* @flow */
  1.1542 +
  1.1543 +/**
  1.1544 + * The [Geometric Mean](https://en.wikipedia.org/wiki/Geometric_mean) is
  1.1545 + * a mean function that is more useful for numbers in different
  1.1546 + * ranges.
  1.1547 + *
  1.1548 + * This is the nth root of the input numbers multiplied by each other.
  1.1549 + *
  1.1550 + * The geometric mean is often useful for
  1.1551 + * **[proportional growth](https://en.wikipedia.org/wiki/Geometric_mean#Proportional_growth)**: given
  1.1552 + * growth rates for multiple years, like _80%, 16.66% and 42.85%_, a simple
  1.1553 + * mean will incorrectly estimate an average growth rate, whereas a geometric
  1.1554 + * mean will correctly estimate a growth rate that, over those years,
  1.1555 + * will yield the same end value.
  1.1556 + *
  1.1557 + * This runs on `O(n)`, linear time in respect to the array
  1.1558 + *
  1.1559 + * @param {Array<number>} x input array
  1.1560 + * @returns {number} geometric mean
  1.1561 + * @example
  1.1562 + * var growthRates = [1.80, 1.166666, 1.428571];
  1.1563 + * var averageGrowth = geometricMean(growthRates);
  1.1564 + * var averageGrowthRates = [averageGrowth, averageGrowth, averageGrowth];
  1.1565 + * var startingValue = 10;
  1.1566 + * var startingValueMean = 10;
  1.1567 + * growthRates.forEach(function(rate) {
  1.1568 + *   startingValue *= rate;
  1.1569 + * });
  1.1570 + * averageGrowthRates.forEach(function(rate) {
  1.1571 + *   startingValueMean *= rate;
  1.1572 + * });
  1.1573 + * startingValueMean === startingValue;
  1.1574 + */
  1.1575 +function geometricMean(x /*: Array<number> */) {
  1.1576 +    // The mean of no numbers is null
  1.1577 +    if (x.length === 0) { return undefined; }
  1.1578 +
  1.1579 +    // the starting value.
  1.1580 +    var value = 1;
  1.1581 +
  1.1582 +    for (var i = 0; i < x.length; i++) {
  1.1583 +        // the geometric mean is only valid for positive numbers
  1.1584 +        if (x[i] <= 0) { return undefined; }
  1.1585 +
  1.1586 +        // repeatedly multiply the value by each number
  1.1587 +        value *= x[i];
  1.1588 +    }
  1.1589 +
  1.1590 +    return Math.pow(value, 1 / x.length);
  1.1591 +}
  1.1592 +
  1.1593 +module.exports = geometricMean;
  1.1594 +
  1.1595 +},{}],18:[function(require,module,exports){
  1.1596 +'use strict';
  1.1597 +/* @flow */
  1.1598 +
  1.1599 +/**
  1.1600 + * The [Harmonic Mean](https://en.wikipedia.org/wiki/Harmonic_mean) is
  1.1601 + * a mean function typically used to find the average of rates.
  1.1602 + * This mean is calculated by taking the reciprocal of the arithmetic mean
  1.1603 + * of the reciprocals of the input numbers.
  1.1604 + *
  1.1605 + * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
  1.1606 + * a method of finding a typical or central value of a set of numbers.
  1.1607 + *
  1.1608 + * This runs on `O(n)`, linear time in respect to the array.
  1.1609 + *
  1.1610 + * @param {Array<number>} x input
  1.1611 + * @returns {number} harmonic mean
  1.1612 + * @example
  1.1613 + * harmonicMean([2, 3]).toFixed(2) // => '2.40'
  1.1614 + */
  1.1615 +function harmonicMean(x /*: Array<number> */) {
  1.1616 +    // The mean of no numbers is null
  1.1617 +    if (x.length === 0) { return undefined; }
  1.1618 +
  1.1619 +    var reciprocalSum = 0;
  1.1620 +
  1.1621 +    for (var i = 0; i < x.length; i++) {
  1.1622 +        // the harmonic mean is only valid for positive numbers
  1.1623 +        if (x[i] <= 0) { return undefined; }
  1.1624 +
  1.1625 +        reciprocalSum += 1 / x[i];
  1.1626 +    }
  1.1627 +
  1.1628 +    // divide n by the the reciprocal sum
  1.1629 +    return x.length / reciprocalSum;
  1.1630 +}
  1.1631 +
  1.1632 +module.exports = harmonicMean;
  1.1633 +
  1.1634 +},{}],19:[function(require,module,exports){
  1.1635 +'use strict';
  1.1636 +/* @flow */
  1.1637 +
  1.1638 +var quantile = require(40);
  1.1639 +
  1.1640 +/**
  1.1641 + * The [Interquartile range](http://en.wikipedia.org/wiki/Interquartile_range) is
  1.1642 + * a measure of statistical dispersion, or how scattered, spread, or
  1.1643 + * concentrated a distribution is. It's computed as the difference between
  1.1644 + * the third quartile and first quartile.
  1.1645 + *
  1.1646 + * @param {Array<number>} sample
  1.1647 + * @returns {number} interquartile range: the span between lower and upper quartile,
  1.1648 + * 0.25 and 0.75
  1.1649 + * @example
  1.1650 + * interquartileRange([0, 1, 2, 3]); // => 2
  1.1651 + */
  1.1652 +function interquartileRange(sample/*: Array<number> */) {
  1.1653 +    // Interquartile range is the span between the upper quartile,
  1.1654 +    // at `0.75`, and lower quartile, `0.25`
  1.1655 +    var q1 = quantile(sample, 0.75),
  1.1656 +        q2 = quantile(sample, 0.25);
  1.1657 +
  1.1658 +    if (typeof q1 === 'number' && typeof q2 === 'number') {
  1.1659 +        return q1 - q2;
  1.1660 +    }
  1.1661 +}
  1.1662 +
  1.1663 +module.exports = interquartileRange;
  1.1664 +
  1.1665 +},{"40":40}],20:[function(require,module,exports){
  1.1666 +'use strict';
  1.1667 +/* @flow */
  1.1668 +
  1.1669 +/**
  1.1670 + * The Inverse [Gaussian error function](http://en.wikipedia.org/wiki/Error_function)
  1.1671 + * returns a numerical approximation to the value that would have caused
  1.1672 + * `errorFunction()` to return x.
  1.1673 + *
  1.1674 + * @param {number} x value of error function
  1.1675 + * @returns {number} estimated inverted value
  1.1676 + */
  1.1677 +function inverseErrorFunction(x/*: number */)/*: number */ {
  1.1678 +    var a = (8 * (Math.PI - 3)) / (3 * Math.PI * (4 - Math.PI));
  1.1679 +
  1.1680 +    var inv = Math.sqrt(Math.sqrt(
  1.1681 +        Math.pow(2 / (Math.PI * a) + Math.log(1 - x * x) / 2, 2) -
  1.1682 +        Math.log(1 - x * x) / a) -
  1.1683 +        (2 / (Math.PI * a) + Math.log(1 - x * x) / 2));
  1.1684 +
  1.1685 +    if (x >= 0) {
  1.1686 +        return inv;
  1.1687 +    } else {
  1.1688 +        return -inv;
  1.1689 +    }
  1.1690 +}
  1.1691 +
  1.1692 +module.exports = inverseErrorFunction;
  1.1693 +
  1.1694 +},{}],21:[function(require,module,exports){
  1.1695 +'use strict';
  1.1696 +/* @flow */
  1.1697 +
  1.1698 +/**
  1.1699 + * [Simple linear regression](http://en.wikipedia.org/wiki/Simple_linear_regression)
  1.1700 + * is a simple way to find a fitted line
  1.1701 + * between a set of coordinates. This algorithm finds the slope and y-intercept of a regression line
  1.1702 + * using the least sum of squares.
  1.1703 + *
  1.1704 + * @param {Array<Array<number>>} data an array of two-element of arrays,
  1.1705 + * like `[[0, 1], [2, 3]]`
  1.1706 + * @returns {Object} object containing slope and intersect of regression line
  1.1707 + * @example
  1.1708 + * linearRegression([[0, 0], [1, 1]]); // => { m: 1, b: 0 }
  1.1709 + */
  1.1710 +function linearRegression(data/*: Array<Array<number>> */)/*: { m: number, b: number } */ {
  1.1711 +
  1.1712 +    var m, b;
  1.1713 +
  1.1714 +    // Store data length in a local variable to reduce
  1.1715 +    // repeated object property lookups
  1.1716 +    var dataLength = data.length;
  1.1717 +
  1.1718 +    //if there's only one point, arbitrarily choose a slope of 0
  1.1719 +    //and a y-intercept of whatever the y of the initial point is
  1.1720 +    if (dataLength === 1) {
  1.1721 +        m = 0;
  1.1722 +        b = data[0][1];
  1.1723 +    } else {
  1.1724 +        // Initialize our sums and scope the `m` and `b`
  1.1725 +        // variables that define the line.
  1.1726 +        var sumX = 0, sumY = 0,
  1.1727 +            sumXX = 0, sumXY = 0;
  1.1728 +
  1.1729 +        // Use local variables to grab point values
  1.1730 +        // with minimal object property lookups
  1.1731 +        var point, x, y;
  1.1732 +
  1.1733 +        // Gather the sum of all x values, the sum of all
  1.1734 +        // y values, and the sum of x^2 and (x*y) for each
  1.1735 +        // value.
  1.1736 +        //
  1.1737 +        // In math notation, these would be SS_x, SS_y, SS_xx, and SS_xy
  1.1738 +        for (var i = 0; i < dataLength; i++) {
  1.1739 +            point = data[i];
  1.1740 +            x = point[0];
  1.1741 +            y = point[1];
  1.1742 +
  1.1743 +            sumX += x;
  1.1744 +            sumY += y;
  1.1745 +
  1.1746 +            sumXX += x * x;
  1.1747 +            sumXY += x * y;
  1.1748 +        }
  1.1749 +
  1.1750 +        // `m` is the slope of the regression line
  1.1751 +        m = ((dataLength * sumXY) - (sumX * sumY)) /
  1.1752 +            ((dataLength * sumXX) - (sumX * sumX));
  1.1753 +
  1.1754 +        // `b` is the y-intercept of the line.
  1.1755 +        b = (sumY / dataLength) - ((m * sumX) / dataLength);
  1.1756 +    }
  1.1757 +
  1.1758 +    // Return both values as an object.
  1.1759 +    return {
  1.1760 +        m: m,
  1.1761 +        b: b
  1.1762 +    };
  1.1763 +}
  1.1764 +
  1.1765 +
  1.1766 +module.exports = linearRegression;
  1.1767 +
  1.1768 +},{}],22:[function(require,module,exports){
  1.1769 +'use strict';
  1.1770 +/* @flow */
  1.1771 +
  1.1772 +/**
  1.1773 + * Given the output of `linearRegression`: an object
  1.1774 + * with `m` and `b` values indicating slope and intercept,
  1.1775 + * respectively, generate a line function that translates
  1.1776 + * x values into y values.
  1.1777 + *
  1.1778 + * @param {Object} mb object with `m` and `b` members, representing
  1.1779 + * slope and intersect of desired line
  1.1780 + * @returns {Function} method that computes y-value at any given
  1.1781 + * x-value on the line.
  1.1782 + * @example
  1.1783 + * var l = linearRegressionLine(linearRegression([[0, 0], [1, 1]]));
  1.1784 + * l(0) // = 0
  1.1785 + * l(2) // = 2
  1.1786 + * linearRegressionLine({ b: 0, m: 1 })(1); // => 1
  1.1787 + * linearRegressionLine({ b: 1, m: 1 })(1); // => 2
  1.1788 + */
  1.1789 +function linearRegressionLine(mb/*: { b: number, m: number }*/)/*: Function */ {
  1.1790 +    // Return a function that computes a `y` value for each
  1.1791 +    // x value it is given, based on the values of `b` and `a`
  1.1792 +    // that we just computed.
  1.1793 +    return function(x) {
  1.1794 +        return mb.b + (mb.m * x);
  1.1795 +    };
  1.1796 +}
  1.1797 +
  1.1798 +module.exports = linearRegressionLine;
  1.1799 +
  1.1800 +},{}],23:[function(require,module,exports){
  1.1801 +'use strict';
  1.1802 +/* @flow */
  1.1803 +
  1.1804 +/**
  1.1805 + * This computes the maximum number in an array.
  1.1806 + *
  1.1807 + * This runs on `O(n)`, linear time in respect to the array
  1.1808 + *
  1.1809 + * @param {Array<number>} x input
  1.1810 + * @returns {number} maximum value
  1.1811 + * @example
  1.1812 + * max([1, 2, 3, 4]);
  1.1813 + * // => 4
  1.1814 + */
  1.1815 +function max(x /*: Array<number> */) /*:number*/ {
  1.1816 +    var value;
  1.1817 +    for (var i = 0; i < x.length; i++) {
  1.1818 +        // On the first iteration of this loop, max is
  1.1819 +        // NaN and is thus made the maximum element in the array
  1.1820 +        if (value === undefined || x[i] > value) {
  1.1821 +            value = x[i];
  1.1822 +        }
  1.1823 +    }
  1.1824 +    if (value === undefined) {
  1.1825 +        return NaN;
  1.1826 +    }
  1.1827 +    return value;
  1.1828 +}
  1.1829 +
  1.1830 +module.exports = max;
  1.1831 +
  1.1832 +},{}],24:[function(require,module,exports){
  1.1833 +'use strict';
  1.1834 +/* @flow */
  1.1835 +
  1.1836 +/**
  1.1837 + * The maximum is the highest number in the array. With a sorted array,
  1.1838 + * the last element in the array is always the largest, so this calculation
  1.1839 + * can be done in one step, or constant time.
  1.1840 + *
  1.1841 + * @param {Array<number>} x input
  1.1842 + * @returns {number} maximum value
  1.1843 + * @example
  1.1844 + * maxSorted([-100, -10, 1, 2, 5]); // => 5
  1.1845 + */
  1.1846 +function maxSorted(x /*: Array<number> */)/*:number*/ {
  1.1847 +    return x[x.length - 1];
  1.1848 +}
  1.1849 +
  1.1850 +module.exports = maxSorted;
  1.1851 +
  1.1852 +},{}],25:[function(require,module,exports){
  1.1853 +'use strict';
  1.1854 +/* @flow */
  1.1855 +
  1.1856 +var sum = require(56);
  1.1857 +
  1.1858 +/**
  1.1859 + * The mean, _also known as average_,
  1.1860 + * is the sum of all values over the number of values.
  1.1861 + * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
  1.1862 + * a method of finding a typical or central value of a set of numbers.
  1.1863 + *
  1.1864 + * This runs on `O(n)`, linear time in respect to the array
  1.1865 + *
  1.1866 + * @param {Array<number>} x input values
  1.1867 + * @returns {number} mean
  1.1868 + * @example
  1.1869 + * mean([0, 10]); // => 5
  1.1870 + */
  1.1871 +function mean(x /*: Array<number> */)/*:number*/ {
  1.1872 +    // The mean of no numbers is null
  1.1873 +    if (x.length === 0) { return NaN; }
  1.1874 +
  1.1875 +    return sum(x) / x.length;
  1.1876 +}
  1.1877 +
  1.1878 +module.exports = mean;
  1.1879 +
  1.1880 +},{"56":56}],26:[function(require,module,exports){
  1.1881 +'use strict';
  1.1882 +/* @flow */
  1.1883 +
  1.1884 +var quantile = require(40);
  1.1885 +
  1.1886 +/**
  1.1887 + * The [median](http://en.wikipedia.org/wiki/Median) is
  1.1888 + * the middle number of a list. This is often a good indicator of 'the middle'
  1.1889 + * when there are outliers that skew the `mean()` value.
  1.1890 + * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
  1.1891 + * a method of finding a typical or central value of a set of numbers.
  1.1892 + *
  1.1893 + * The median isn't necessarily one of the elements in the list: the value
  1.1894 + * can be the average of two elements if the list has an even length
  1.1895 + * and the two central values are different.
  1.1896 + *
  1.1897 + * @param {Array<number>} x input
  1.1898 + * @returns {number} median value
  1.1899 + * @example
  1.1900 + * median([10, 2, 5, 100, 2, 1]); // => 3.5
  1.1901 + */
  1.1902 +function median(x /*: Array<number> */)/*:number*/ {
  1.1903 +    return +quantile(x, 0.5);
  1.1904 +}
  1.1905 +
  1.1906 +module.exports = median;
  1.1907 +
  1.1908 +},{"40":40}],27:[function(require,module,exports){
  1.1909 +'use strict';
  1.1910 +/* @flow */
  1.1911 +
  1.1912 +var median = require(26);
  1.1913 +
  1.1914 +/**
  1.1915 + * The [Median Absolute Deviation](http://en.wikipedia.org/wiki/Median_absolute_deviation) is
  1.1916 + * a robust measure of statistical
  1.1917 + * dispersion. It is more resilient to outliers than the standard deviation.
  1.1918 + *
  1.1919 + * @param {Array<number>} x input array
  1.1920 + * @returns {number} median absolute deviation
  1.1921 + * @example
  1.1922 + * medianAbsoluteDeviation([1, 1, 2, 2, 4, 6, 9]); // => 1
  1.1923 + */
  1.1924 +function medianAbsoluteDeviation(x /*: Array<number> */) {
  1.1925 +    // The mad of nothing is null
  1.1926 +    var medianValue = median(x),
  1.1927 +        medianAbsoluteDeviations = [];
  1.1928 +
  1.1929 +    // Make a list of absolute deviations from the median
  1.1930 +    for (var i = 0; i < x.length; i++) {
  1.1931 +        medianAbsoluteDeviations.push(Math.abs(x[i] - medianValue));
  1.1932 +    }
  1.1933 +
  1.1934 +    // Find the median value of that list
  1.1935 +    return median(medianAbsoluteDeviations);
  1.1936 +}
  1.1937 +
  1.1938 +module.exports = medianAbsoluteDeviation;
  1.1939 +
  1.1940 +},{"26":26}],28:[function(require,module,exports){
  1.1941 +'use strict';
  1.1942 +/* @flow */
  1.1943 +
  1.1944 +var quantileSorted = require(41);
  1.1945 +
  1.1946 +/**
  1.1947 + * The [median](http://en.wikipedia.org/wiki/Median) is
  1.1948 + * the middle number of a list. This is often a good indicator of 'the middle'
  1.1949 + * when there are outliers that skew the `mean()` value.
  1.1950 + * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
  1.1951 + * a method of finding a typical or central value of a set of numbers.
  1.1952 + *
  1.1953 + * The median isn't necessarily one of the elements in the list: the value
  1.1954 + * can be the average of two elements if the list has an even length
  1.1955 + * and the two central values are different.
  1.1956 + *
  1.1957 + * @param {Array<number>} sorted input
  1.1958 + * @returns {number} median value
  1.1959 + * @example
  1.1960 + * medianSorted([10, 2, 5, 100, 2, 1]); // => 52.5
  1.1961 + */
  1.1962 +function medianSorted(sorted /*: Array<number> */)/*:number*/ {
  1.1963 +    return quantileSorted(sorted, 0.5);
  1.1964 +}
  1.1965 +
  1.1966 +module.exports = medianSorted;
  1.1967 +
  1.1968 +},{"41":41}],29:[function(require,module,exports){
  1.1969 +'use strict';
  1.1970 +/* @flow */
  1.1971 +
  1.1972 +/**
  1.1973 + * The min is the lowest number in the array. This runs on `O(n)`, linear time in respect to the array
  1.1974 + *
  1.1975 + * @param {Array<number>} x input
  1.1976 + * @returns {number} minimum value
  1.1977 + * @example
  1.1978 + * min([1, 5, -10, 100, 2]); // => -10
  1.1979 + */
  1.1980 +function min(x /*: Array<number> */)/*:number*/ {
  1.1981 +    var value;
  1.1982 +    for (var i = 0; i < x.length; i++) {
  1.1983 +        // On the first iteration of this loop, min is
  1.1984 +        // NaN and is thus made the minimum element in the array
  1.1985 +        if (value === undefined || x[i] < value) {
  1.1986 +            value = x[i];
  1.1987 +        }
  1.1988 +    }
  1.1989 +    if (value === undefined) {
  1.1990 +        return NaN;
  1.1991 +    }
  1.1992 +    return value;
  1.1993 +}
  1.1994 +
  1.1995 +module.exports = min;
  1.1996 +
  1.1997 +},{}],30:[function(require,module,exports){
  1.1998 +'use strict';
  1.1999 +/* @flow */
  1.2000 +
  1.2001 +/**
  1.2002 + * The minimum is the lowest number in the array. With a sorted array,
  1.2003 + * the first element in the array is always the smallest, so this calculation
  1.2004 + * can be done in one step, or constant time.
  1.2005 + *
  1.2006 + * @param {Array<number>} x input
  1.2007 + * @returns {number} minimum value
  1.2008 + * @example
  1.2009 + * minSorted([-100, -10, 1, 2, 5]); // => -100
  1.2010 + */
  1.2011 +function minSorted(x /*: Array<number> */)/*:number*/ {
  1.2012 +    return x[0];
  1.2013 +}
  1.2014 +
  1.2015 +module.exports = minSorted;
  1.2016 +
  1.2017 +},{}],31:[function(require,module,exports){
  1.2018 +'use strict';
  1.2019 +/* @flow */
  1.2020 +
  1.2021 +/**
  1.2022 + * **Mixin** simple_statistics to a single Array instance if provided
  1.2023 + * or the Array native object if not. This is an optional
  1.2024 + * feature that lets you treat simple_statistics as a native feature
  1.2025 + * of Javascript.
  1.2026 + *
  1.2027 + * @param {Object} ss simple statistics
  1.2028 + * @param {Array} [array=] a single array instance which will be augmented
  1.2029 + * with the extra methods. If omitted, mixin will apply to all arrays
  1.2030 + * by changing the global `Array.prototype`.
  1.2031 + * @returns {*} the extended Array, or Array.prototype if no object
  1.2032 + * is given.
  1.2033 + *
  1.2034 + * @example
  1.2035 + * var myNumbers = [1, 2, 3];
  1.2036 + * mixin(ss, myNumbers);
  1.2037 + * console.log(myNumbers.sum()); // 6
  1.2038 + */
  1.2039 +function mixin(ss /*: Object */, array /*: ?Array<any> */)/*: any */ {
  1.2040 +    var support = !!(Object.defineProperty && Object.defineProperties);
  1.2041 +    // Coverage testing will never test this error.
  1.2042 +    /* istanbul ignore next */
  1.2043 +    if (!support) {
  1.2044 +        throw new Error('without defineProperty, simple-statistics cannot be mixed in');
  1.2045 +    }
  1.2046 +
  1.2047 +    // only methods which work on basic arrays in a single step
  1.2048 +    // are supported
  1.2049 +    var arrayMethods = ['median', 'standardDeviation', 'sum', 'product',
  1.2050 +        'sampleSkewness',
  1.2051 +        'mean', 'min', 'max', 'quantile', 'geometricMean',
  1.2052 +        'harmonicMean', 'root_mean_square'];
  1.2053 +
  1.2054 +    // create a closure with a method name so that a reference
  1.2055 +    // like `arrayMethods[i]` doesn't follow the loop increment
  1.2056 +    function wrap(method) {
  1.2057 +        return function() {
  1.2058 +            // cast any arguments into an array, since they're
  1.2059 +            // natively objects
  1.2060 +            var args = Array.prototype.slice.apply(arguments);
  1.2061 +            // make the first argument the array itself
  1.2062 +            args.unshift(this);
  1.2063 +            // return the result of the ss method
  1.2064 +            return ss[method].apply(ss, args);
  1.2065 +        };
  1.2066 +    }
  1.2067 +
  1.2068 +    // select object to extend
  1.2069 +    var extending;
  1.2070 +    if (array) {
  1.2071 +        // create a shallow copy of the array so that our internal
  1.2072 +        // operations do not change it by reference
  1.2073 +        extending = array.slice();
  1.2074 +    } else {
  1.2075 +        extending = Array.prototype;
  1.2076 +    }
  1.2077 +
  1.2078 +    // for each array function, define a function that gets
  1.2079 +    // the array as the first argument.
  1.2080 +    // We use [defineProperty](https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/Object/defineProperty)
  1.2081 +    // because it allows these properties to be non-enumerable:
  1.2082 +    // `for (var in x)` loops will not run into problems with this
  1.2083 +    // implementation.
  1.2084 +    for (var i = 0; i < arrayMethods.length; i++) {
  1.2085 +        Object.defineProperty(extending, arrayMethods[i], {
  1.2086 +            value: wrap(arrayMethods[i]),
  1.2087 +            configurable: true,
  1.2088 +            enumerable: false,
  1.2089 +            writable: true
  1.2090 +        });
  1.2091 +    }
  1.2092 +
  1.2093 +    return extending;
  1.2094 +}
  1.2095 +
  1.2096 +module.exports = mixin;
  1.2097 +
  1.2098 +},{}],32:[function(require,module,exports){
  1.2099 +'use strict';
  1.2100 +/* @flow */
  1.2101 +
  1.2102 +var numericSort = require(34),
  1.2103 +    modeSorted = require(33);
  1.2104 +
  1.2105 +/**
  1.2106 + * The [mode](http://bit.ly/W5K4Yt) is the number that appears in a list the highest number of times.
  1.2107 + * There can be multiple modes in a list: in the event of a tie, this
  1.2108 + * algorithm will return the most recently seen mode.
  1.2109 + *
  1.2110 + * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
  1.2111 + * a method of finding a typical or central value of a set of numbers.
  1.2112 + *
  1.2113 + * This runs on `O(nlog(n))` because it needs to sort the array internally
  1.2114 + * before running an `O(n)` search to find the mode.
  1.2115 + *
  1.2116 + * @param {Array<number>} x input
  1.2117 + * @returns {number} mode
  1.2118 + * @example
  1.2119 + * mode([0, 0, 1]); // => 0
  1.2120 + */
  1.2121 +function mode(x /*: Array<number> */)/*:number*/ {
  1.2122 +    // Sorting the array lets us iterate through it below and be sure
  1.2123 +    // that every time we see a new number it's new and we'll never
  1.2124 +    // see the same number twice
  1.2125 +    return modeSorted(numericSort(x));
  1.2126 +}
  1.2127 +
  1.2128 +module.exports = mode;
  1.2129 +
  1.2130 +},{"33":33,"34":34}],33:[function(require,module,exports){
  1.2131 +'use strict';
  1.2132 +/* @flow */
  1.2133 +
  1.2134 +/**
  1.2135 + * The [mode](http://bit.ly/W5K4Yt) is the number that appears in a list the highest number of times.
  1.2136 + * There can be multiple modes in a list: in the event of a tie, this
  1.2137 + * algorithm will return the most recently seen mode.
  1.2138 + *
  1.2139 + * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
  1.2140 + * a method of finding a typical or central value of a set of numbers.
  1.2141 + *
  1.2142 + * This runs in `O(n)` because the input is sorted.
  1.2143 + *
  1.2144 + * @param {Array<number>} sorted input
  1.2145 + * @returns {number} mode
  1.2146 + * @example
  1.2147 + * modeSorted([0, 0, 1]); // => 0
  1.2148 + */
  1.2149 +function modeSorted(sorted /*: Array<number> */)/*:number*/ {
  1.2150 +
  1.2151 +    // Handle edge cases:
  1.2152 +    // The mode of an empty list is NaN
  1.2153 +    if (sorted.length === 0) { return NaN; }
  1.2154 +    else if (sorted.length === 1) { return sorted[0]; }
  1.2155 +
  1.2156 +    // This assumes it is dealing with an array of size > 1, since size
  1.2157 +    // 0 and 1 are handled immediately. Hence it starts at index 1 in the
  1.2158 +    // array.
  1.2159 +    var last = sorted[0],
  1.2160 +        // store the mode as we find new modes
  1.2161 +        value = NaN,
  1.2162 +        // store how many times we've seen the mode
  1.2163 +        maxSeen = 0,
  1.2164 +        // how many times the current candidate for the mode
  1.2165 +        // has been seen
  1.2166 +        seenThis = 1;
  1.2167 +
  1.2168 +    // end at sorted.length + 1 to fix the case in which the mode is
  1.2169 +    // the highest number that occurs in the sequence. the last iteration
  1.2170 +    // compares sorted[i], which is undefined, to the highest number
  1.2171 +    // in the series
  1.2172 +    for (var i = 1; i < sorted.length + 1; i++) {
  1.2173 +        // we're seeing a new number pass by
  1.2174 +        if (sorted[i] !== last) {
  1.2175 +            // the last number is the new mode since we saw it more
  1.2176 +            // often than the old one
  1.2177 +            if (seenThis > maxSeen) {
  1.2178 +                maxSeen = seenThis;
  1.2179 +                value = last;
  1.2180 +            }
  1.2181 +            seenThis = 1;
  1.2182 +            last = sorted[i];
  1.2183 +        // if this isn't a new number, it's one more occurrence of
  1.2184 +        // the potential mode
  1.2185 +        } else { seenThis++; }
  1.2186 +    }
  1.2187 +    return value;
  1.2188 +}
  1.2189 +
  1.2190 +module.exports = modeSorted;
  1.2191 +
  1.2192 +},{}],34:[function(require,module,exports){
  1.2193 +'use strict';
  1.2194 +/* @flow */
  1.2195 +
  1.2196 +/**
  1.2197 + * Sort an array of numbers by their numeric value, ensuring that the
  1.2198 + * array is not changed in place.
  1.2199 + *
  1.2200 + * This is necessary because the default behavior of .sort
  1.2201 + * in JavaScript is to sort arrays as string values
  1.2202 + *
  1.2203 + *     [1, 10, 12, 102, 20].sort()
  1.2204 + *     // output
  1.2205 + *     [1, 10, 102, 12, 20]
  1.2206 + *
  1.2207 + * @param {Array<number>} array input array
  1.2208 + * @return {Array<number>} sorted array
  1.2209 + * @private
  1.2210 + * @example
  1.2211 + * numericSort([3, 2, 1]) // => [1, 2, 3]
  1.2212 + */
  1.2213 +function numericSort(array /*: Array<number> */) /*: Array<number> */ {
  1.2214 +    return array
  1.2215 +        // ensure the array is not changed in-place
  1.2216 +        .slice()
  1.2217 +        // comparator function that treats input as numeric
  1.2218 +        .sort(function(a, b) {
  1.2219 +            return a - b;
  1.2220 +        });
  1.2221 +}
  1.2222 +
  1.2223 +module.exports = numericSort;
  1.2224 +
  1.2225 +},{}],35:[function(require,module,exports){
  1.2226 +'use strict';
  1.2227 +/* @flow */
  1.2228 +
  1.2229 +/**
  1.2230 + * This is a single-layer [Perceptron Classifier](http://en.wikipedia.org/wiki/Perceptron) that takes
  1.2231 + * arrays of numbers and predicts whether they should be classified
  1.2232 + * as either 0 or 1 (negative or positive examples).
  1.2233 + * @class
  1.2234 + * @example
  1.2235 + * // Create the model
  1.2236 + * var p = new PerceptronModel();
  1.2237 + * // Train the model with input with a diagonal boundary.
  1.2238 + * for (var i = 0; i < 5; i++) {
  1.2239 + *     p.train([1, 1], 1);
  1.2240 + *     p.train([0, 1], 0);
  1.2241 + *     p.train([1, 0], 0);
  1.2242 + *     p.train([0, 0], 0);
  1.2243 + * }
  1.2244 + * p.predict([0, 0]); // 0
  1.2245 + * p.predict([0, 1]); // 0
  1.2246 + * p.predict([1, 0]); // 0
  1.2247 + * p.predict([1, 1]); // 1
  1.2248 + */
  1.2249 +function PerceptronModel() {
  1.2250 +    // The weights, or coefficients of the model;
  1.2251 +    // weights are only populated when training with data.
  1.2252 +    this.weights = [];
  1.2253 +    // The bias term, or intercept; it is also a weight but
  1.2254 +    // it's stored separately for convenience as it is always
  1.2255 +    // multiplied by one.
  1.2256 +    this.bias = 0;
  1.2257 +}
  1.2258 +
  1.2259 +/**
  1.2260 + * **Predict**: Use an array of features with the weight array and bias
  1.2261 + * to predict whether an example is labeled 0 or 1.
  1.2262 + *
  1.2263 + * @param {Array<number>} features an array of features as numbers
  1.2264 + * @returns {number} 1 if the score is over 0, otherwise 0
  1.2265 + */
  1.2266 +PerceptronModel.prototype.predict = function(features) {
  1.2267 +
  1.2268 +    // Only predict if previously trained
  1.2269 +    // on the same size feature array(s).
  1.2270 +    if (features.length !== this.weights.length) { return null; }
  1.2271 +
  1.2272 +    // Calculate the sum of features times weights,
  1.2273 +    // with the bias added (implicitly times one).
  1.2274 +    var score = 0;
  1.2275 +    for (var i = 0; i < this.weights.length; i++) {
  1.2276 +        score += this.weights[i] * features[i];
  1.2277 +    }
  1.2278 +    score += this.bias;
  1.2279 +
  1.2280 +    // Classify as 1 if the score is over 0, otherwise 0.
  1.2281 +    if (score > 0) {
  1.2282 +        return 1;
  1.2283 +    } else {
  1.2284 +        return 0;
  1.2285 +    }
  1.2286 +};
  1.2287 +
  1.2288 +/**
  1.2289 + * **Train** the classifier with a new example, which is
  1.2290 + * a numeric array of features and a 0 or 1 label.
  1.2291 + *
  1.2292 + * @param {Array<number>} features an array of features as numbers
  1.2293 + * @param {number} label either 0 or 1
  1.2294 + * @returns {PerceptronModel} this
  1.2295 + */
  1.2296 +PerceptronModel.prototype.train = function(features, label) {
  1.2297 +    // Require that only labels of 0 or 1 are considered.
  1.2298 +    if (label !== 0 && label !== 1) { return null; }
  1.2299 +    // The length of the feature array determines
  1.2300 +    // the length of the weight array.
  1.2301 +    // The perceptron will continue learning as long as
  1.2302 +    // it keeps seeing feature arrays of the same length.
  1.2303 +    // When it sees a new data shape, it initializes.
  1.2304 +    if (features.length !== this.weights.length) {
  1.2305 +        this.weights = features;
  1.2306 +        this.bias = 1;
  1.2307 +    }
  1.2308 +    // Make a prediction based on current weights.
  1.2309 +    var prediction = this.predict(features);
  1.2310 +    // Update the weights if the prediction is wrong.
  1.2311 +    if (prediction !== label) {
  1.2312 +        var gradient = label - prediction;
  1.2313 +        for (var i = 0; i < this.weights.length; i++) {
  1.2314 +            this.weights[i] += gradient * features[i];
  1.2315 +        }
  1.2316 +        this.bias += gradient;
  1.2317 +    }
  1.2318 +    return this;
  1.2319 +};
  1.2320 +
  1.2321 +module.exports = PerceptronModel;
  1.2322 +
  1.2323 +},{}],36:[function(require,module,exports){
  1.2324 +/* @flow */
  1.2325 +
  1.2326 +'use strict';
  1.2327 +
  1.2328 +/**
  1.2329 + * Implementation of [Heap's Algorithm](https://en.wikipedia.org/wiki/Heap%27s_algorithm)
  1.2330 + * for generating permutations.
  1.2331 + *
  1.2332 + * @param {Array} elements any type of data
  1.2333 + * @returns {Array<Array>} array of permutations
  1.2334 + */
  1.2335 +function permutationsHeap/*:: <T> */(elements /*: Array<T> */)/*: Array<Array<T>> */ {
  1.2336 +    var indexes = new Array(elements.length);
  1.2337 +    var permutations = [elements.slice()];
  1.2338 +
  1.2339 +    for (var i = 0; i < elements.length; i++) {
  1.2340 +        indexes[i] = 0;
  1.2341 +    }
  1.2342 +
  1.2343 +    for (i = 0; i < elements.length;) {
  1.2344 +        if (indexes[i] < i) {
  1.2345 +
  1.2346 +            // At odd indexes, swap from indexes[i] instead
  1.2347 +            // of from the beginning of the array
  1.2348 +            var swapFrom = 0;
  1.2349 +            if (i % 2 !== 0) {
  1.2350 +                swapFrom = indexes[i];
  1.2351 +            }
  1.2352 +
  1.2353 +            // swap between swapFrom and i, using
  1.2354 +            // a temporary variable as storage.
  1.2355 +            var temp = elements[swapFrom];
  1.2356 +            elements[swapFrom] = elements[i];
  1.2357 +            elements[i] = temp;
  1.2358 +
  1.2359 +            permutations.push(elements.slice());
  1.2360 +            indexes[i]++;
  1.2361 +            i = 0;
  1.2362 +
  1.2363 +        } else {
  1.2364 +            indexes[i] = 0;
  1.2365 +            i++;
  1.2366 +        }
  1.2367 +    }
  1.2368 +
  1.2369 +    return permutations;
  1.2370 +}
  1.2371 +
  1.2372 +module.exports = permutationsHeap;
  1.2373 +
  1.2374 +},{}],37:[function(require,module,exports){
  1.2375 +'use strict';
  1.2376 +/* @flow */
  1.2377 +
  1.2378 +var epsilon = require(13);
  1.2379 +var factorial = require(16);
  1.2380 +
  1.2381 +/**
  1.2382 + * The [Poisson Distribution](http://en.wikipedia.org/wiki/Poisson_distribution)
  1.2383 + * is a discrete probability distribution that expresses the probability
  1.2384 + * of a given number of events occurring in a fixed interval of time
  1.2385 + * and/or space if these events occur with a known average rate and
  1.2386 + * independently of the time since the last event.
  1.2387 + *
  1.2388 + * The Poisson Distribution is characterized by the strictly positive
  1.2389 + * mean arrival or occurrence rate, `λ`.
  1.2390 + *
  1.2391 + * @param {number} lambda location poisson distribution
  1.2392 + * @returns {number} value of poisson distribution at that point
  1.2393 + */
  1.2394 +function poissonDistribution(lambda/*: number */) {
  1.2395 +    // Check that lambda is strictly positive
  1.2396 +    if (lambda <= 0) { return undefined; }
  1.2397 +
  1.2398 +    // our current place in the distribution
  1.2399 +    var x = 0,
  1.2400 +        // and we keep track of the current cumulative probability, in
  1.2401 +        // order to know when to stop calculating chances.
  1.2402 +        cumulativeProbability = 0,
  1.2403 +        // the calculated cells to be returned
  1.2404 +        cells = {};
  1.2405 +
  1.2406 +    // This algorithm iterates through each potential outcome,
  1.2407 +    // until the `cumulativeProbability` is very close to 1, at
  1.2408 +    // which point we've defined the vast majority of outcomes
  1.2409 +    do {
  1.2410 +        // a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)
  1.2411 +        cells[x] = (Math.pow(Math.E, -lambda) * Math.pow(lambda, x)) / factorial(x);
  1.2412 +        cumulativeProbability += cells[x];
  1.2413 +        x++;
  1.2414 +    // when the cumulativeProbability is nearly 1, we've calculated
  1.2415 +    // the useful range of this distribution
  1.2416 +    } while (cumulativeProbability < 1 - epsilon);
  1.2417 +
  1.2418 +    return cells;
  1.2419 +}
  1.2420 +
  1.2421 +module.exports = poissonDistribution;
  1.2422 +
  1.2423 +},{"13":13,"16":16}],38:[function(require,module,exports){
  1.2424 +'use strict';
  1.2425 +/* @flow */
  1.2426 +
  1.2427 +var epsilon = require(13);
  1.2428 +var inverseErrorFunction = require(20);
  1.2429 +
  1.2430 +/**
  1.2431 + * The [Probit](http://en.wikipedia.org/wiki/Probit)
  1.2432 + * is the inverse of cumulativeStdNormalProbability(),
  1.2433 + * and is also known as the normal quantile function.
  1.2434 + *
  1.2435 + * It returns the number of standard deviations from the mean
  1.2436 + * where the p'th quantile of values can be found in a normal distribution.
  1.2437 + * So, for example, probit(0.5 + 0.6827/2) ≈ 1 because 68.27% of values are
  1.2438 + * normally found within 1 standard deviation above or below the mean.
  1.2439 + *
  1.2440 + * @param {number} p
  1.2441 + * @returns {number} probit
  1.2442 + */
  1.2443 +function probit(p /*: number */)/*: number */ {
  1.2444 +    if (p === 0) {
  1.2445 +        p = epsilon;
  1.2446 +    } else if (p >= 1) {
  1.2447 +        p = 1 - epsilon;
  1.2448 +    }
  1.2449 +    return Math.sqrt(2) * inverseErrorFunction(2 * p - 1);
  1.2450 +}
  1.2451 +
  1.2452 +module.exports = probit;
  1.2453 +
  1.2454 +},{"13":13,"20":20}],39:[function(require,module,exports){
  1.2455 +'use strict';
  1.2456 +/* @flow */
  1.2457 +
  1.2458 +/**
  1.2459 + * The [product](https://en.wikipedia.org/wiki/Product_(mathematics)) of an array
  1.2460 + * is the result of multiplying all numbers together, starting using one as the multiplicative identity.
  1.2461 + *
  1.2462 + * This runs on `O(n)`, linear time in respect to the array
  1.2463 + *
  1.2464 + * @param {Array<number>} x input
  1.2465 + * @return {number} product of all input numbers
  1.2466 + * @example
  1.2467 + * product([1, 2, 3, 4]); // => 24
  1.2468 + */
  1.2469 +function product(x/*: Array<number> */)/*: number */ {
  1.2470 +    var value = 1;
  1.2471 +    for (var i = 0; i < x.length; i++) {
  1.2472 +        value *= x[i];
  1.2473 +    }
  1.2474 +    return value;
  1.2475 +}
  1.2476 +
  1.2477 +module.exports = product;
  1.2478 +
  1.2479 +},{}],40:[function(require,module,exports){
  1.2480 +'use strict';
  1.2481 +/* @flow */
  1.2482 +
  1.2483 +var quantileSorted = require(41);
  1.2484 +var quickselect = require(42);
  1.2485 +
  1.2486 +/**
  1.2487 + * The [quantile](https://en.wikipedia.org/wiki/Quantile):
  1.2488 + * this is a population quantile, since we assume to know the entire
  1.2489 + * dataset in this library. This is an implementation of the
  1.2490 + * [Quantiles of a Population](http://en.wikipedia.org/wiki/Quantile#Quantiles_of_a_population)
  1.2491 + * algorithm from wikipedia.
  1.2492 + *
  1.2493 + * Sample is a one-dimensional array of numbers,
  1.2494 + * and p is either a decimal number from 0 to 1 or an array of decimal
  1.2495 + * numbers from 0 to 1.
  1.2496 + * In terms of a k/q quantile, p = k/q - it's just dealing with fractions or dealing
  1.2497 + * with decimal values.
  1.2498 + * When p is an array, the result of the function is also an array containing the appropriate
  1.2499 + * quantiles in input order
  1.2500 + *
  1.2501 + * @param {Array<number>} sample a sample from the population
  1.2502 + * @param {number} p the desired quantile, as a number between 0 and 1
  1.2503 + * @returns {number} quantile
  1.2504 + * @example
  1.2505 + * quantile([3, 6, 7, 8, 8, 9, 10, 13, 15, 16, 20], 0.5); // => 9
  1.2506 + */
  1.2507 +function quantile(sample /*: Array<number> */, p /*: Array<number> | number */) {
  1.2508 +    var copy = sample.slice();
  1.2509 +
  1.2510 +    if (Array.isArray(p)) {
  1.2511 +        // rearrange elements so that each element corresponding to a requested
  1.2512 +        // quantile is on a place it would be if the array was fully sorted
  1.2513 +        multiQuantileSelect(copy, p);
  1.2514 +        // Initialize the result array
  1.2515 +        var results = [];
  1.2516 +        // For each requested quantile
  1.2517 +        for (var i = 0; i < p.length; i++) {
  1.2518 +            results[i] = quantileSorted(copy, p[i]);
  1.2519 +        }
  1.2520 +        return results;
  1.2521 +    } else {
  1.2522 +        var idx = quantileIndex(copy.length, p);
  1.2523 +        quantileSelect(copy, idx, 0, copy.length - 1);
  1.2524 +        return quantileSorted(copy, p);
  1.2525 +    }
  1.2526 +}
  1.2527 +
  1.2528 +function quantileSelect(arr, k, left, right) {
  1.2529 +    if (k % 1 === 0) {
  1.2530 +        quickselect(arr, k, left, right);
  1.2531 +    } else {
  1.2532 +        k = Math.floor(k);
  1.2533 +        quickselect(arr, k, left, right);
  1.2534 +        quickselect(arr, k + 1, k + 1, right);
  1.2535 +    }
  1.2536 +}
  1.2537 +
  1.2538 +function multiQuantileSelect(arr, p) {
  1.2539 +    var indices = [0];
  1.2540 +    for (var i = 0; i < p.length; i++) {
  1.2541 +        indices.push(quantileIndex(arr.length, p[i]));
  1.2542 +    }
  1.2543 +    indices.push(arr.length - 1);
  1.2544 +    indices.sort(compare);
  1.2545 +
  1.2546 +    var stack = [0, indices.length - 1];
  1.2547 +
  1.2548 +    while (stack.length) {
  1.2549 +        var r = Math.ceil(stack.pop());
  1.2550 +        var l = Math.floor(stack.pop());
  1.2551 +        if (r - l <= 1) continue;
  1.2552 +
  1.2553 +        var m = Math.floor((l + r) / 2);
  1.2554 +        quantileSelect(arr, indices[m], indices[l], indices[r]);
  1.2555 +
  1.2556 +        stack.push(l, m, m, r);
  1.2557 +    }
  1.2558 +}
  1.2559 +
  1.2560 +function compare(a, b) {
  1.2561 +    return a - b;
  1.2562 +}
  1.2563 +
  1.2564 +function quantileIndex(len /*: number */, p /*: number */)/*:number*/ {
  1.2565 +    var idx = len * p;
  1.2566 +    if (p === 1) {
  1.2567 +        // If p is 1, directly return the last index
  1.2568 +        return len - 1;
  1.2569 +    } else if (p === 0) {
  1.2570 +        // If p is 0, directly return the first index
  1.2571 +        return 0;
  1.2572 +    } else if (idx % 1 !== 0) {
  1.2573 +        // If index is not integer, return the next index in array
  1.2574 +        return Math.ceil(idx) - 1;
  1.2575 +    } else if (len % 2 === 0) {
  1.2576 +        // If the list has even-length, we'll return the middle of two indices
  1.2577 +        // around quantile to indicate that we need an average value of the two
  1.2578 +        return idx - 0.5;
  1.2579 +    } else {
  1.2580 +        // Finally, in the simple case of an integer index
  1.2581 +        // with an odd-length list, return the index
  1.2582 +        return idx;
  1.2583 +    }
  1.2584 +}
  1.2585 +
  1.2586 +module.exports = quantile;
  1.2587 +
  1.2588 +},{"41":41,"42":42}],41:[function(require,module,exports){
  1.2589 +'use strict';
  1.2590 +/* @flow */
  1.2591 +
  1.2592 +/**
  1.2593 + * This is the internal implementation of quantiles: when you know
  1.2594 + * that the order is sorted, you don't need to re-sort it, and the computations
  1.2595 + * are faster.
  1.2596 + *
  1.2597 + * @param {Array<number>} sample input data
  1.2598 + * @param {number} p desired quantile: a number between 0 to 1, inclusive
  1.2599 + * @returns {number} quantile value
  1.2600 + * @example
  1.2601 + * quantileSorted([3, 6, 7, 8, 8, 9, 10, 13, 15, 16, 20], 0.5); // => 9
  1.2602 + */
  1.2603 +function quantileSorted(sample /*: Array<number> */, p /*: number */)/*:number*/ {
  1.2604 +    var idx = sample.length * p;
  1.2605 +    if (p < 0 || p > 1) {
  1.2606 +        return NaN;
  1.2607 +    } else if (p === 1) {
  1.2608 +        // If p is 1, directly return the last element
  1.2609 +        return sample[sample.length - 1];
  1.2610 +    } else if (p === 0) {
  1.2611 +        // If p is 0, directly return the first element
  1.2612 +        return sample[0];
  1.2613 +    } else if (idx % 1 !== 0) {
  1.2614 +        // If p is not integer, return the next element in array
  1.2615 +        return sample[Math.ceil(idx) - 1];
  1.2616 +    } else if (sample.length % 2 === 0) {
  1.2617 +        // If the list has even-length, we'll take the average of this number
  1.2618 +        // and the next value, if there is one
  1.2619 +        return (sample[idx - 1] + sample[idx]) / 2;
  1.2620 +    } else {
  1.2621 +        // Finally, in the simple case of an integer value
  1.2622 +        // with an odd-length list, return the sample value at the index.
  1.2623 +        return sample[idx];
  1.2624 +    }
  1.2625 +}
  1.2626 +
  1.2627 +module.exports = quantileSorted;
  1.2628 +
  1.2629 +},{}],42:[function(require,module,exports){
  1.2630 +'use strict';
  1.2631 +/* @flow */
  1.2632 +
  1.2633 +module.exports = quickselect;
  1.2634 +
  1.2635 +/**
  1.2636 + * Rearrange items in `arr` so that all items in `[left, k]` range are the smallest.
  1.2637 + * The `k`-th element will have the `(k - left + 1)`-th smallest value in `[left, right]`.
  1.2638 + *
  1.2639 + * Implements Floyd-Rivest selection algorithm https://en.wikipedia.org/wiki/Floyd-Rivest_algorithm
  1.2640 + *
  1.2641 + * @private
  1.2642 + * @param {Array<number>} arr input array
  1.2643 + * @param {number} k pivot index
  1.2644 + * @param {number} left left index
  1.2645 + * @param {number} right right index
  1.2646 + * @returns {undefined}
  1.2647 + * @example
  1.2648 + * var arr = [65, 28, 59, 33, 21, 56, 22, 95, 50, 12, 90, 53, 28, 77, 39];
  1.2649 + * quickselect(arr, 8);
  1.2650 + * // = [39, 28, 28, 33, 21, 12, 22, 50, 53, 56, 59, 65, 90, 77, 95]
  1.2651 + */
  1.2652 +function quickselect(arr /*: Array<number> */, k /*: number */, left /*: number */, right /*: number */) {
  1.2653 +    left = left || 0;
  1.2654 +    right = right || (arr.length - 1);
  1.2655 +
  1.2656 +    while (right > left) {
  1.2657 +        // 600 and 0.5 are arbitrary constants chosen in the original paper to minimize execution time
  1.2658 +        if (right - left > 600) {
  1.2659 +            var n = right - left + 1;
  1.2660 +            var m = k - left + 1;
  1.2661 +            var z = Math.log(n);
  1.2662 +            var s = 0.5 * Math.exp(2 * z / 3);
  1.2663 +            var sd = 0.5 * Math.sqrt(z * s * (n - s) / n);
  1.2664 +            if (m - n / 2 < 0) sd *= -1;
  1.2665 +            var newLeft = Math.max(left, Math.floor(k - m * s / n + sd));
  1.2666 +            var newRight = Math.min(right, Math.floor(k + (n - m) * s / n + sd));
  1.2667 +            quickselect(arr, k, newLeft, newRight);
  1.2668 +        }
  1.2669 +
  1.2670 +        var t = arr[k];
  1.2671 +        var i = left;
  1.2672 +        var j = right;
  1.2673 +
  1.2674 +        swap(arr, left, k);
  1.2675 +        if (arr[right] > t) swap(arr, left, right);
  1.2676 +
  1.2677 +        while (i < j) {
  1.2678 +            swap(arr, i, j);
  1.2679 +            i++;
  1.2680 +            j--;
  1.2681 +            while (arr[i] < t) i++;
  1.2682 +            while (arr[j] > t) j--;
  1.2683 +        }
  1.2684 +
  1.2685 +        if (arr[left] === t) swap(arr, left, j);
  1.2686 +        else {
  1.2687 +            j++;
  1.2688 +            swap(arr, j, right);
  1.2689 +        }
  1.2690 +
  1.2691 +        if (j <= k) left = j + 1;
  1.2692 +        if (k <= j) right = j - 1;
  1.2693 +    }
  1.2694 +}
  1.2695 +
  1.2696 +function swap(arr, i, j) {
  1.2697 +    var tmp = arr[i];
  1.2698 +    arr[i] = arr[j];
  1.2699 +    arr[j] = tmp;
  1.2700 +}
  1.2701 +
  1.2702 +},{}],43:[function(require,module,exports){
  1.2703 +'use strict';
  1.2704 +/* @flow */
  1.2705 +
  1.2706 +/**
  1.2707 + * The [R Squared](http://en.wikipedia.org/wiki/Coefficient_of_determination)
  1.2708 + * value of data compared with a function `f`
  1.2709 + * is the sum of the squared differences between the prediction
  1.2710 + * and the actual value.
  1.2711 + *
  1.2712 + * @param {Array<Array<number>>} data input data: this should be doubly-nested
  1.2713 + * @param {Function} func function called on `[i][0]` values within the dataset
  1.2714 + * @returns {number} r-squared value
  1.2715 + * @example
  1.2716 + * var samples = [[0, 0], [1, 1]];
  1.2717 + * var regressionLine = linearRegressionLine(linearRegression(samples));
  1.2718 + * rSquared(samples, regressionLine); // = 1 this line is a perfect fit
  1.2719 + */
  1.2720 +function rSquared(data /*: Array<Array<number>> */, func /*: Function */) /*: number */ {
  1.2721 +    if (data.length < 2) { return 1; }
  1.2722 +
  1.2723 +    // Compute the average y value for the actual
  1.2724 +    // data set in order to compute the
  1.2725 +    // _total sum of squares_
  1.2726 +    var sum = 0, average;
  1.2727 +    for (var i = 0; i < data.length; i++) {
  1.2728 +        sum += data[i][1];
  1.2729 +    }
  1.2730 +    average = sum / data.length;
  1.2731 +
  1.2732 +    // Compute the total sum of squares - the
  1.2733 +    // squared difference between each point
  1.2734 +    // and the average of all points.
  1.2735 +    var sumOfSquares = 0;
  1.2736 +    for (var j = 0; j < data.length; j++) {
  1.2737 +        sumOfSquares += Math.pow(average - data[j][1], 2);
  1.2738 +    }
  1.2739 +
  1.2740 +    // Finally estimate the error: the squared
  1.2741 +    // difference between the estimate and the actual data
  1.2742 +    // value at each point.
  1.2743 +    var err = 0;
  1.2744 +    for (var k = 0; k < data.length; k++) {
  1.2745 +        err += Math.pow(data[k][1] - func(data[k][0]), 2);
  1.2746 +    }
  1.2747 +
  1.2748 +    // As the error grows larger, its ratio to the
  1.2749 +    // sum of squares increases and the r squared
  1.2750 +    // value grows lower.
  1.2751 +    return 1 - err / sumOfSquares;
  1.2752 +}
  1.2753 +
  1.2754 +module.exports = rSquared;
  1.2755 +
  1.2756 +},{}],44:[function(require,module,exports){
  1.2757 +'use strict';
  1.2758 +/* @flow */
  1.2759 +
  1.2760 +/**
  1.2761 + * The Root Mean Square (RMS) is
  1.2762 + * a mean function used as a measure of the magnitude of a set
  1.2763 + * of numbers, regardless of their sign.
  1.2764 + * This is the square root of the mean of the squares of the
  1.2765 + * input numbers.
  1.2766 + * This runs on `O(n)`, linear time in respect to the array
  1.2767 + *
  1.2768 + * @param {Array<number>} x input
  1.2769 + * @returns {number} root mean square
  1.2770 + * @example
  1.2771 + * rootMeanSquare([-1, 1, -1, 1]); // => 1
  1.2772 + */
  1.2773 +function rootMeanSquare(x /*: Array<number> */)/*:number*/ {
  1.2774 +    if (x.length === 0) { return NaN; }
  1.2775 +
  1.2776 +    var sumOfSquares = 0;
  1.2777 +    for (var i = 0; i < x.length; i++) {
  1.2778 +        sumOfSquares += Math.pow(x[i], 2);
  1.2779 +    }
  1.2780 +
  1.2781 +    return Math.sqrt(sumOfSquares / x.length);
  1.2782 +}
  1.2783 +
  1.2784 +module.exports = rootMeanSquare;
  1.2785 +
  1.2786 +},{}],45:[function(require,module,exports){
  1.2787 +'use strict';
  1.2788 +/* @flow */
  1.2789 +
  1.2790 +var shuffle = require(51);
  1.2791 +
  1.2792 +/**
  1.2793 + * Create a [simple random sample](http://en.wikipedia.org/wiki/Simple_random_sample)
  1.2794 + * from a given array of `n` elements.
  1.2795 + *
  1.2796 + * The sampled values will be in any order, not necessarily the order
  1.2797 + * they appear in the input.
  1.2798 + *
  1.2799 + * @param {Array} array input array. can contain any type
  1.2800 + * @param {number} n count of how many elements to take
  1.2801 + * @param {Function} [randomSource=Math.random] an optional source of entropy
  1.2802 + * instead of Math.random
  1.2803 + * @return {Array} subset of n elements in original array
  1.2804 + * @example
  1.2805 + * var values = [1, 2, 4, 5, 6, 7, 8, 9];
  1.2806 + * sample(values, 3); // returns 3 random values, like [2, 5, 8];
  1.2807 + */
  1.2808 +function sample/*:: <T> */(
  1.2809 +    array /*: Array<T> */,
  1.2810 +    n /*: number */,
  1.2811 +    randomSource /*: Function */) /*: Array<T> */ {
  1.2812 +    // shuffle the original array using a fisher-yates shuffle
  1.2813 +    var shuffled = shuffle(array, randomSource);
  1.2814 +
  1.2815 +    // and then return a subset of it - the first `n` elements.
  1.2816 +    return shuffled.slice(0, n);
  1.2817 +}
  1.2818 +
  1.2819 +module.exports = sample;
  1.2820 +
  1.2821 +},{"51":51}],46:[function(require,module,exports){
  1.2822 +'use strict';
  1.2823 +/* @flow */
  1.2824 +
  1.2825 +var sampleCovariance = require(47);
  1.2826 +var sampleStandardDeviation = require(49);
  1.2827 +
  1.2828 +/**
  1.2829 + * The [correlation](http://en.wikipedia.org/wiki/Correlation_and_dependence) is
  1.2830 + * a measure of how correlated two datasets are, between -1 and 1
  1.2831 + *
  1.2832 + * @param {Array<number>} x first input
  1.2833 + * @param {Array<number>} y second input
  1.2834 + * @returns {number} sample correlation
  1.2835 + * @example
  1.2836 + * sampleCorrelation([1, 2, 3, 4, 5, 6], [2, 2, 3, 4, 5, 60]).toFixed(2);
  1.2837 + * // => '0.69'
  1.2838 + */
  1.2839 +function sampleCorrelation(x/*: Array<number> */, y/*: Array<number> */)/*:number*/ {
  1.2840 +    var cov = sampleCovariance(x, y),
  1.2841 +        xstd = sampleStandardDeviation(x),
  1.2842 +        ystd = sampleStandardDeviation(y);
  1.2843 +
  1.2844 +    return cov / xstd / ystd;
  1.2845 +}
  1.2846 +
  1.2847 +module.exports = sampleCorrelation;
  1.2848 +
  1.2849 +},{"47":47,"49":49}],47:[function(require,module,exports){
  1.2850 +'use strict';
  1.2851 +/* @flow */
  1.2852 +
  1.2853 +var mean = require(25);
  1.2854 +
  1.2855 +/**
  1.2856 + * [Sample covariance](https://en.wikipedia.org/wiki/Sample_mean_and_sampleCovariance) of two datasets:
  1.2857 + * how much do the two datasets move together?
  1.2858 + * x and y are two datasets, represented as arrays of numbers.
  1.2859 + *
  1.2860 + * @param {Array<number>} x first input
  1.2861 + * @param {Array<number>} y second input
  1.2862 + * @returns {number} sample covariance
  1.2863 + * @example
  1.2864 + * sampleCovariance([1, 2, 3, 4, 5, 6], [6, 5, 4, 3, 2, 1]); // => -3.5
  1.2865 + */
  1.2866 +function sampleCovariance(x /*:Array<number>*/, y /*:Array<number>*/)/*:number*/ {
  1.2867 +
  1.2868 +    // The two datasets must have the same length which must be more than 1
  1.2869 +    if (x.length <= 1 || x.length !== y.length) {
  1.2870 +        return NaN;
  1.2871 +    }
  1.2872 +
  1.2873 +    // determine the mean of each dataset so that we can judge each
  1.2874 +    // value of the dataset fairly as the difference from the mean. this
  1.2875 +    // way, if one dataset is [1, 2, 3] and [2, 3, 4], their covariance
  1.2876 +    // does not suffer because of the difference in absolute values
  1.2877 +    var xmean = mean(x),
  1.2878 +        ymean = mean(y),
  1.2879 +        sum = 0;
  1.2880 +
  1.2881 +    // for each pair of values, the covariance increases when their
  1.2882 +    // difference from the mean is associated - if both are well above
  1.2883 +    // or if both are well below
  1.2884 +    // the mean, the covariance increases significantly.
  1.2885 +    for (var i = 0; i < x.length; i++) {
  1.2886 +        sum += (x[i] - xmean) * (y[i] - ymean);
  1.2887 +    }
  1.2888 +
  1.2889 +    // this is Bessels' Correction: an adjustment made to sample statistics
  1.2890 +    // that allows for the reduced degree of freedom entailed in calculating
  1.2891 +    // values from samples rather than complete populations.
  1.2892 +    var besselsCorrection = x.length - 1;
  1.2893 +
  1.2894 +    // the covariance is weighted by the length of the datasets.
  1.2895 +    return sum / besselsCorrection;
  1.2896 +}
  1.2897 +
  1.2898 +module.exports = sampleCovariance;
  1.2899 +
  1.2900 +},{"25":25}],48:[function(require,module,exports){
  1.2901 +'use strict';
  1.2902 +/* @flow */
  1.2903 +
  1.2904 +var sumNthPowerDeviations = require(57);
  1.2905 +var sampleStandardDeviation = require(49);
  1.2906 +
  1.2907 +/**
  1.2908 + * [Skewness](http://en.wikipedia.org/wiki/Skewness) is
  1.2909 + * a measure of the extent to which a probability distribution of a
  1.2910 + * real-valued random variable "leans" to one side of the mean.
  1.2911 + * The skewness value can be positive or negative, or even undefined.
  1.2912 + *
  1.2913 + * Implementation is based on the adjusted Fisher-Pearson standardized
  1.2914 + * moment coefficient, which is the version found in Excel and several
  1.2915 + * statistical packages including Minitab, SAS and SPSS.
  1.2916 + *
  1.2917 + * @param {Array<number>} x input
  1.2918 + * @returns {number} sample skewness
  1.2919 + * @example
  1.2920 + * sampleSkewness([2, 4, 6, 3, 1]); // => 0.590128656384365
  1.2921 + */
  1.2922 +function sampleSkewness(x /*: Array<number> */)/*:number*/ {
  1.2923 +    // The skewness of less than three arguments is null
  1.2924 +    var theSampleStandardDeviation = sampleStandardDeviation(x);
  1.2925 +
  1.2926 +    if (isNaN(theSampleStandardDeviation) || x.length < 3) {
  1.2927 +        return NaN;
  1.2928 +    }
  1.2929 +
  1.2930 +    var n = x.length,
  1.2931 +        cubedS = Math.pow(theSampleStandardDeviation, 3),
  1.2932 +        sumCubedDeviations = sumNthPowerDeviations(x, 3);
  1.2933 +
  1.2934 +    return n * sumCubedDeviations / ((n - 1) * (n - 2) * cubedS);
  1.2935 +}
  1.2936 +
  1.2937 +module.exports = sampleSkewness;
  1.2938 +
  1.2939 +},{"49":49,"57":57}],49:[function(require,module,exports){
  1.2940 +'use strict';
  1.2941 +/* @flow */
  1.2942 +
  1.2943 +var sampleVariance = require(50);
  1.2944 +
  1.2945 +/**
  1.2946 + * The [standard deviation](http://en.wikipedia.org/wiki/Standard_deviation)
  1.2947 + * is the square root of the variance.
  1.2948 + *
  1.2949 + * @param {Array<number>} x input array
  1.2950 + * @returns {number} sample standard deviation
  1.2951 + * @example
  1.2952 + * sampleStandardDeviation([2, 4, 4, 4, 5, 5, 7, 9]).toFixed(2);
  1.2953 + * // => '2.14'
  1.2954 + */
  1.2955 +function sampleStandardDeviation(x/*:Array<number>*/)/*:number*/ {
  1.2956 +    // The standard deviation of no numbers is null
  1.2957 +    var sampleVarianceX = sampleVariance(x);
  1.2958 +    if (isNaN(sampleVarianceX)) { return NaN; }
  1.2959 +    return Math.sqrt(sampleVarianceX);
  1.2960 +}
  1.2961 +
  1.2962 +module.exports = sampleStandardDeviation;
  1.2963 +
  1.2964 +},{"50":50}],50:[function(require,module,exports){
  1.2965 +'use strict';
  1.2966 +/* @flow */
  1.2967 +
  1.2968 +var sumNthPowerDeviations = require(57);
  1.2969 +
  1.2970 +/*
  1.2971 + * The [sample variance](https://en.wikipedia.org/wiki/Variance#Sample_variance)
  1.2972 + * is the sum of squared deviations from the mean. The sample variance
  1.2973 + * is distinguished from the variance by the usage of [Bessel's Correction](https://en.wikipedia.org/wiki/Bessel's_correction):
  1.2974 + * instead of dividing the sum of squared deviations by the length of the input,
  1.2975 + * it is divided by the length minus one. This corrects the bias in estimating
  1.2976 + * a value from a set that you don't know if full.
  1.2977 + *
  1.2978 + * References:
  1.2979 + * * [Wolfram MathWorld on Sample Variance](http://mathworld.wolfram.com/SampleVariance.html)
  1.2980 + *
  1.2981 + * @param {Array<number>} x input array
  1.2982 + * @return {number} sample variance
  1.2983 + * @example
  1.2984 + * sampleVariance([1, 2, 3, 4, 5]); // => 2.5
  1.2985 + */
  1.2986 +function sampleVariance(x /*: Array<number> */)/*:number*/ {
  1.2987 +    // The variance of no numbers is null
  1.2988 +    if (x.length <= 1) { return NaN; }
  1.2989 +
  1.2990 +    var sumSquaredDeviationsValue = sumNthPowerDeviations(x, 2);
  1.2991 +
  1.2992 +    // this is Bessels' Correction: an adjustment made to sample statistics
  1.2993 +    // that allows for the reduced degree of freedom entailed in calculating
  1.2994 +    // values from samples rather than complete populations.
  1.2995 +    var besselsCorrection = x.length - 1;
  1.2996 +
  1.2997 +    // Find the mean value of that list
  1.2998 +    return sumSquaredDeviationsValue / besselsCorrection;
  1.2999 +}
  1.3000 +
  1.3001 +module.exports = sampleVariance;
  1.3002 +
  1.3003 +},{"57":57}],51:[function(require,module,exports){
  1.3004 +'use strict';
  1.3005 +/* @flow */
  1.3006 +
  1.3007 +var shuffleInPlace = require(52);
  1.3008 +
  1.3009 +/*
  1.3010 + * A [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle)
  1.3011 + * is a fast way to create a random permutation of a finite set. This is
  1.3012 + * a function around `shuffle_in_place` that adds the guarantee that
  1.3013 + * it will not modify its input.
  1.3014 + *
  1.3015 + * @param {Array} sample an array of any kind of element
  1.3016 + * @param {Function} [randomSource=Math.random] an optional entropy source
  1.3017 + * @return {Array} shuffled version of input
  1.3018 + * @example
  1.3019 + * var shuffled = shuffle([1, 2, 3, 4]);
  1.3020 + * shuffled; // = [2, 3, 1, 4] or any other random permutation
  1.3021 + */
  1.3022 +function shuffle/*::<T>*/(sample/*:Array<T>*/, randomSource/*:Function*/) {
  1.3023 +    // slice the original array so that it is not modified
  1.3024 +    sample = sample.slice();
  1.3025 +
  1.3026 +    // and then shuffle that shallow-copied array, in place
  1.3027 +    return shuffleInPlace(sample.slice(), randomSource);
  1.3028 +}
  1.3029 +
  1.3030 +module.exports = shuffle;
  1.3031 +
  1.3032 +},{"52":52}],52:[function(require,module,exports){
  1.3033 +'use strict';
  1.3034 +/* @flow */
  1.3035 +
  1.3036 +/*
  1.3037 + * A [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle)
  1.3038 + * in-place - which means that it **will change the order of the original
  1.3039 + * array by reference**.
  1.3040 + *
  1.3041 + * This is an algorithm that generates a random [permutation](https://en.wikipedia.org/wiki/Permutation)
  1.3042 + * of a set.
  1.3043 + *
  1.3044 + * @param {Array} sample input array
  1.3045 + * @param {Function} [randomSource=Math.random] an optional source of entropy
  1.3046 + * @returns {Array} sample
  1.3047 + * @example
  1.3048 + * var sample = [1, 2, 3, 4];
  1.3049 + * shuffleInPlace(sample);
  1.3050 + * // sample is shuffled to a value like [2, 1, 4, 3]
  1.3051 + */
  1.3052 +function shuffleInPlace(sample/*:Array<any>*/, randomSource/*:Function*/)/*:Array<any>*/ {
  1.3053 +
  1.3054 +
  1.3055 +    // a custom random number source can be provided if you want to use
  1.3056 +    // a fixed seed or another random number generator, like
  1.3057 +    // [random-js](https://www.npmjs.org/package/random-js)
  1.3058 +    randomSource = randomSource || Math.random;
  1.3059 +
  1.3060 +    // store the current length of the sample to determine
  1.3061 +    // when no elements remain to shuffle.
  1.3062 +    var length = sample.length;
  1.3063 +
  1.3064 +    // temporary is used to hold an item when it is being
  1.3065 +    // swapped between indices.
  1.3066 +    var temporary;
  1.3067 +
  1.3068 +    // The index to swap at each stage.
  1.3069 +    var index;
  1.3070 +
  1.3071 +    // While there are still items to shuffle
  1.3072 +    while (length > 0) {
  1.3073 +        // chose a random index within the subset of the array
  1.3074 +        // that is not yet shuffled
  1.3075 +        index = Math.floor(randomSource() * length--);
  1.3076 +
  1.3077 +        // store the value that we'll move temporarily
  1.3078 +        temporary = sample[length];
  1.3079 +
  1.3080 +        // swap the value at `sample[length]` with `sample[index]`
  1.3081 +        sample[length] = sample[index];
  1.3082 +        sample[index] = temporary;
  1.3083 +    }
  1.3084 +
  1.3085 +    return sample;
  1.3086 +}
  1.3087 +
  1.3088 +module.exports = shuffleInPlace;
  1.3089 +
  1.3090 +},{}],53:[function(require,module,exports){
  1.3091 +'use strict';
  1.3092 +/* @flow */
  1.3093 +
  1.3094 +/**
  1.3095 + * [Sign](https://en.wikipedia.org/wiki/Sign_function) is a function 
  1.3096 + * that extracts the sign of a real number
  1.3097 + * 
  1.3098 + * @param {Number} x input value
  1.3099 + * @returns {Number} sign value either 1, 0 or -1
  1.3100 + * @throws {TypeError} if the input argument x is not a number
  1.3101 + * @private
  1.3102 + * 
  1.3103 + * @example
  1.3104 + * sign(2); // => 1
  1.3105 + */
  1.3106 +function sign(x/*: number */)/*: number */ {
  1.3107 +    if (typeof x === 'number') {
  1.3108 +        if (x < 0) {
  1.3109 +            return -1;
  1.3110 +        } else if (x === 0) {
  1.3111 +            return 0
  1.3112 +        } else {
  1.3113 +            return 1;
  1.3114 +        }
  1.3115 +    } else {
  1.3116 +        throw new TypeError('not a number');
  1.3117 +    }
  1.3118 +}
  1.3119 +
  1.3120 +module.exports = sign;
  1.3121 +
  1.3122 +},{}],54:[function(require,module,exports){
  1.3123 +'use strict';
  1.3124 +/* @flow */
  1.3125 +
  1.3126 +var variance = require(62);
  1.3127 +
  1.3128 +/**
  1.3129 + * The [standard deviation](http://en.wikipedia.org/wiki/Standard_deviation)
  1.3130 + * is the square root of the variance. It's useful for measuring the amount
  1.3131 + * of variation or dispersion in a set of values.
  1.3132 + *
  1.3133 + * Standard deviation is only appropriate for full-population knowledge: for
  1.3134 + * samples of a population, {@link sampleStandardDeviation} is
  1.3135 + * more appropriate.
  1.3136 + *
  1.3137 + * @param {Array<number>} x input
  1.3138 + * @returns {number} standard deviation
  1.3139 + * @example
  1.3140 + * variance([2, 4, 4, 4, 5, 5, 7, 9]); // => 4
  1.3141 + * standardDeviation([2, 4, 4, 4, 5, 5, 7, 9]); // => 2
  1.3142 + */
  1.3143 +function standardDeviation(x /*: Array<number> */)/*:number*/ {
  1.3144 +    // The standard deviation of no numbers is null
  1.3145 +    var v = variance(x);
  1.3146 +    if (isNaN(v)) { return 0; }
  1.3147 +    return Math.sqrt(v);
  1.3148 +}
  1.3149 +
  1.3150 +module.exports = standardDeviation;
  1.3151 +
  1.3152 +},{"62":62}],55:[function(require,module,exports){
  1.3153 +'use strict';
  1.3154 +/* @flow */
  1.3155 +
  1.3156 +var SQRT_2PI = Math.sqrt(2 * Math.PI);
  1.3157 +
  1.3158 +function cumulativeDistribution(z) {
  1.3159 +    var sum = z,
  1.3160 +        tmp = z;
  1.3161 +
  1.3162 +    // 15 iterations are enough for 4-digit precision
  1.3163 +    for (var i = 1; i < 15; i++) {
  1.3164 +        tmp *= z * z / (2 * i + 1);
  1.3165 +        sum += tmp;
  1.3166 +    }
  1.3167 +    return Math.round((0.5 + (sum / SQRT_2PI) * Math.exp(-z * z / 2)) * 1e4) / 1e4;
  1.3168 +}
  1.3169 +
  1.3170 +/**
  1.3171 + * A standard normal table, also called the unit normal table or Z table,
  1.3172 + * is a mathematical table for the values of Φ (phi), which are the values of
  1.3173 + * the cumulative distribution function of the normal distribution.
  1.3174 + * It is used to find the probability that a statistic is observed below,
  1.3175 + * above, or between values on the standard normal distribution, and by
  1.3176 + * extension, any normal distribution.
  1.3177 + *
  1.3178 + * The probabilities are calculated using the
  1.3179 + * [Cumulative distribution function](https://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution_function).
  1.3180 + * The table used is the cumulative, and not cumulative from 0 to mean
  1.3181 + * (even though the latter has 5 digits precision, instead of 4).
  1.3182 + */
  1.3183 +var standardNormalTable/*: Array<number> */ = [];
  1.3184 +
  1.3185 +for (var z = 0; z <= 3.09; z += 0.01) {
  1.3186 +    standardNormalTable.push(cumulativeDistribution(z));
  1.3187 +}
  1.3188 +
  1.3189 +module.exports = standardNormalTable;
  1.3190 +
  1.3191 +},{}],56:[function(require,module,exports){
  1.3192 +'use strict';
  1.3193 +/* @flow */
  1.3194 +
  1.3195 +/**
  1.3196 + * Our default sum is the [Kahan summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm) is
  1.3197 + * a method for computing the sum of a list of numbers while correcting
  1.3198 + * for floating-point errors. Traditionally, sums are calculated as many
  1.3199 + * successive additions, each one with its own floating-point roundoff. These
  1.3200 + * losses in precision add up as the number of numbers increases. This alternative
  1.3201 + * algorithm is more accurate than the simple way of calculating sums by simple
  1.3202 + * addition.
  1.3203 + *
  1.3204 + * This runs on `O(n)`, linear time in respect to the array
  1.3205 + *
  1.3206 + * @param {Array<number>} x input
  1.3207 + * @return {number} sum of all input numbers
  1.3208 + * @example
  1.3209 + * sum([1, 2, 3]); // => 6
  1.3210 + */
  1.3211 +function sum(x/*: Array<number> */)/*: number */ {
  1.3212 +
  1.3213 +    // like the traditional sum algorithm, we keep a running
  1.3214 +    // count of the current sum.
  1.3215 +    var sum = 0;
  1.3216 +
  1.3217 +    // but we also keep three extra variables as bookkeeping:
  1.3218 +    // most importantly, an error correction value. This will be a very
  1.3219 +    // small number that is the opposite of the floating point precision loss.
  1.3220 +    var errorCompensation = 0;
  1.3221 +
  1.3222 +    // this will be each number in the list corrected with the compensation value.
  1.3223 +    var correctedCurrentValue;
  1.3224 +
  1.3225 +    // and this will be the next sum
  1.3226 +    var nextSum;
  1.3227 +
  1.3228 +    for (var i = 0; i < x.length; i++) {
  1.3229 +        // first correct the value that we're going to add to the sum
  1.3230 +        correctedCurrentValue = x[i] - errorCompensation;
  1.3231 +
  1.3232 +        // compute the next sum. sum is likely a much larger number
  1.3233 +        // than correctedCurrentValue, so we'll lose precision here,
  1.3234 +        // and measure how much precision is lost in the next step
  1.3235 +        nextSum = sum + correctedCurrentValue;
  1.3236 +
  1.3237 +        // we intentionally didn't assign sum immediately, but stored
  1.3238 +        // it for now so we can figure out this: is (sum + nextValue) - nextValue
  1.3239 +        // not equal to 0? ideally it would be, but in practice it won't:
  1.3240 +        // it will be some very small number. that's what we record
  1.3241 +        // as errorCompensation.
  1.3242 +        errorCompensation = nextSum - sum - correctedCurrentValue;
  1.3243 +
  1.3244 +        // now that we've computed how much we'll correct for in the next
  1.3245 +        // loop, start treating the nextSum as the current sum.
  1.3246 +        sum = nextSum;
  1.3247 +    }
  1.3248 +
  1.3249 +    return sum;
  1.3250 +}
  1.3251 +
  1.3252 +module.exports = sum;
  1.3253 +
  1.3254 +},{}],57:[function(require,module,exports){
  1.3255 +'use strict';
  1.3256 +/* @flow */
  1.3257 +
  1.3258 +var mean = require(25);
  1.3259 +
  1.3260 +/**
  1.3261 + * The sum of deviations to the Nth power.
  1.3262 + * When n=2 it's the sum of squared deviations.
  1.3263 + * When n=3 it's the sum of cubed deviations.
  1.3264 + *
  1.3265 + * @param {Array<number>} x
  1.3266 + * @param {number} n power
  1.3267 + * @returns {number} sum of nth power deviations
  1.3268 + * @example
  1.3269 + * var input = [1, 2, 3];
  1.3270 + * // since the variance of a set is the mean squared
  1.3271 + * // deviations, we can calculate that with sumNthPowerDeviations:
  1.3272 + * var variance = sumNthPowerDeviations(input) / input.length;
  1.3273 + */
  1.3274 +function sumNthPowerDeviations(x/*: Array<number> */, n/*: number */)/*:number*/ {
  1.3275 +    var meanValue = mean(x),
  1.3276 +        sum = 0;
  1.3277 +
  1.3278 +    for (var i = 0; i < x.length; i++) {
  1.3279 +        sum += Math.pow(x[i] - meanValue, n);
  1.3280 +    }
  1.3281 +
  1.3282 +    return sum;
  1.3283 +}
  1.3284 +
  1.3285 +module.exports = sumNthPowerDeviations;
  1.3286 +
  1.3287 +},{"25":25}],58:[function(require,module,exports){
  1.3288 +'use strict';
  1.3289 +/* @flow */
  1.3290 +
  1.3291 +/**
  1.3292 + * The simple [sum](https://en.wikipedia.org/wiki/Summation) of an array
  1.3293 + * is the result of adding all numbers together, starting from zero.
  1.3294 + *
  1.3295 + * This runs on `O(n)`, linear time in respect to the array
  1.3296 + *
  1.3297 + * @param {Array<number>} x input
  1.3298 + * @return {number} sum of all input numbers
  1.3299 + * @example
  1.3300 + * sumSimple([1, 2, 3]); // => 6
  1.3301 + */
  1.3302 +function sumSimple(x/*: Array<number> */)/*: number */ {
  1.3303 +    var value = 0;
  1.3304 +    for (var i = 0; i < x.length; i++) {
  1.3305 +        value += x[i];
  1.3306 +    }
  1.3307 +    return value;
  1.3308 +}
  1.3309 +
  1.3310 +module.exports = sumSimple;
  1.3311 +
  1.3312 +},{}],59:[function(require,module,exports){
  1.3313 +'use strict';
  1.3314 +/* @flow */
  1.3315 +
  1.3316 +var standardDeviation = require(54);
  1.3317 +var mean = require(25);
  1.3318 +
  1.3319 +/**
  1.3320 + * This is to compute [a one-sample t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#One-sample_t-test), comparing the mean
  1.3321 + * of a sample to a known value, x.
  1.3322 + *
  1.3323 + * in this case, we're trying to determine whether the
  1.3324 + * population mean is equal to the value that we know, which is `x`
  1.3325 + * here. usually the results here are used to look up a
  1.3326 + * [p-value](http://en.wikipedia.org/wiki/P-value), which, for
  1.3327 + * a certain level of significance, will let you determine that the
  1.3328 + * null hypothesis can or cannot be rejected.
  1.3329 + *
  1.3330 + * @param {Array<number>} sample an array of numbers as input
  1.3331 + * @param {number} x expected value of the population mean
  1.3332 + * @returns {number} value
  1.3333 + * @example
  1.3334 + * tTest([1, 2, 3, 4, 5, 6], 3.385).toFixed(2); // => '0.16'
  1.3335 + */
  1.3336 +function tTest(sample/*: Array<number> */, x/*: number */)/*:number*/ {
  1.3337 +    // The mean of the sample
  1.3338 +    var sampleMean = mean(sample);
  1.3339 +
  1.3340 +    // The standard deviation of the sample
  1.3341 +    var sd = standardDeviation(sample);
  1.3342 +
  1.3343 +    // Square root the length of the sample
  1.3344 +    var rootN = Math.sqrt(sample.length);
  1.3345 +
  1.3346 +    // returning the t value
  1.3347 +    return (sampleMean - x) / (sd / rootN);
  1.3348 +}
  1.3349 +
  1.3350 +module.exports = tTest;
  1.3351 +
  1.3352 +},{"25":25,"54":54}],60:[function(require,module,exports){
  1.3353 +'use strict';
  1.3354 +/* @flow */
  1.3355 +
  1.3356 +var mean = require(25);
  1.3357 +var sampleVariance = require(50);
  1.3358 +
  1.3359 +/**
  1.3360 + * This is to compute [two sample t-test](http://en.wikipedia.org/wiki/Student's_t-test).
  1.3361 + * Tests whether "mean(X)-mean(Y) = difference", (
  1.3362 + * in the most common case, we often have `difference == 0` to test if two samples
  1.3363 + * are likely to be taken from populations with the same mean value) with
  1.3364 + * no prior knowledge on standard deviations of both samples
  1.3365 + * other than the fact that they have the same standard deviation.
  1.3366 + *
  1.3367 + * Usually the results here are used to look up a
  1.3368 + * [p-value](http://en.wikipedia.org/wiki/P-value), which, for
  1.3369 + * a certain level of significance, will let you determine that the
  1.3370 + * null hypothesis can or cannot be rejected.
  1.3371 + *
  1.3372 + * `diff` can be omitted if it equals 0.
  1.3373 + *
  1.3374 + * [This is used to confirm or deny](http://www.monarchlab.org/Lab/Research/Stats/2SampleT.aspx)
  1.3375 + * a null hypothesis that the two populations that have been sampled into
  1.3376 + * `sampleX` and `sampleY` are equal to each other.
  1.3377 + *
  1.3378 + * @param {Array<number>} sampleX a sample as an array of numbers
  1.3379 + * @param {Array<number>} sampleY a sample as an array of numbers
  1.3380 + * @param {number} [difference=0]
  1.3381 + * @returns {number} test result
  1.3382 + * @example
  1.3383 + * ss.tTestTwoSample([1, 2, 3, 4], [3, 4, 5, 6], 0); //= -2.1908902300206643
  1.3384 + */
  1.3385 +function tTestTwoSample(
  1.3386 +    sampleX/*: Array<number> */,
  1.3387 +    sampleY/*: Array<number> */,
  1.3388 +    difference/*: number */) {
  1.3389 +    var n = sampleX.length,
  1.3390 +        m = sampleY.length;
  1.3391 +
  1.3392 +    // If either sample doesn't actually have any values, we can't
  1.3393 +    // compute this at all, so we return `null`.
  1.3394 +    if (!n || !m) { return null; }
  1.3395 +
  1.3396 +    // default difference (mu) is zero
  1.3397 +    if (!difference) {
  1.3398 +        difference = 0;
  1.3399 +    }
  1.3400 +
  1.3401 +    var meanX = mean(sampleX),
  1.3402 +        meanY = mean(sampleY),
  1.3403 +        sampleVarianceX = sampleVariance(sampleX),
  1.3404 +        sampleVarianceY = sampleVariance(sampleY);
  1.3405 +
  1.3406 +    if (typeof meanX === 'number' &&
  1.3407 +        typeof meanY === 'number' &&
  1.3408 +        typeof sampleVarianceX === 'number' &&
  1.3409 +        typeof sampleVarianceY === 'number') {
  1.3410 +        var weightedVariance = ((n - 1) * sampleVarianceX +
  1.3411 +            (m - 1) * sampleVarianceY) / (n + m - 2);
  1.3412 +
  1.3413 +        return (meanX - meanY - difference) /
  1.3414 +            Math.sqrt(weightedVariance * (1 / n + 1 / m));
  1.3415 +    }
  1.3416 +}
  1.3417 +
  1.3418 +module.exports = tTestTwoSample;
  1.3419 +
  1.3420 +},{"25":25,"50":50}],61:[function(require,module,exports){
  1.3421 +'use strict';
  1.3422 +/* @flow */
  1.3423 +
  1.3424 +/**
  1.3425 + * For a sorted input, counting the number of unique values
  1.3426 + * is possible in constant time and constant memory. This is
  1.3427 + * a simple implementation of the algorithm.
  1.3428 + *
  1.3429 + * Values are compared with `===`, so objects and non-primitive objects
  1.3430 + * are not handled in any special way.
  1.3431 + *
  1.3432 + * @param {Array} input an array of primitive values.
  1.3433 + * @returns {number} count of unique values
  1.3434 + * @example
  1.3435 + * uniqueCountSorted([1, 2, 3]); // => 3
  1.3436 + * uniqueCountSorted([1, 1, 1]); // => 1
  1.3437 + */
  1.3438 +function uniqueCountSorted(input/*: Array<any>*/)/*: number */ {
  1.3439 +    var uniqueValueCount = 0,
  1.3440 +        lastSeenValue;
  1.3441 +    for (var i = 0; i < input.length; i++) {
  1.3442 +        if (i === 0 || input[i] !== lastSeenValue) {
  1.3443 +            lastSeenValue = input[i];
  1.3444 +            uniqueValueCount++;
  1.3445 +        }
  1.3446 +    }
  1.3447 +    return uniqueValueCount;
  1.3448 +}
  1.3449 +
  1.3450 +module.exports = uniqueCountSorted;
  1.3451 +
  1.3452 +},{}],62:[function(require,module,exports){
  1.3453 +'use strict';
  1.3454 +/* @flow */
  1.3455 +
  1.3456 +var sumNthPowerDeviations = require(57);
  1.3457 +
  1.3458 +/**
  1.3459 + * The [variance](http://en.wikipedia.org/wiki/Variance)
  1.3460 + * is the sum of squared deviations from the mean.
  1.3461 + *
  1.3462 + * This is an implementation of variance, not sample variance:
  1.3463 + * see the `sampleVariance` method if you want a sample measure.
  1.3464 + *
  1.3465 + * @param {Array<number>} x a population
  1.3466 + * @returns {number} variance: a value greater than or equal to zero.
  1.3467 + * zero indicates that all values are identical.
  1.3468 + * @example
  1.3469 + * variance([1, 2, 3, 4, 5, 6]); // => 2.9166666666666665
  1.3470 + */
  1.3471 +function variance(x/*: Array<number> */)/*:number*/ {
  1.3472 +    // The variance of no numbers is null
  1.3473 +    if (x.length === 0) { return NaN; }
  1.3474 +
  1.3475 +    // Find the mean of squared deviations between the
  1.3476 +    // mean value and each value.
  1.3477 +    return sumNthPowerDeviations(x, 2) / x.length;
  1.3478 +}
  1.3479 +
  1.3480 +module.exports = variance;
  1.3481 +
  1.3482 +},{"57":57}],63:[function(require,module,exports){
  1.3483 +'use strict';
  1.3484 +/* @flow */
  1.3485 +
  1.3486 +/**
  1.3487 + * The [Z-Score, or Standard Score](http://en.wikipedia.org/wiki/Standard_score).
  1.3488 + *
  1.3489 + * The standard score is the number of standard deviations an observation
  1.3490 + * or datum is above or below the mean. Thus, a positive standard score
  1.3491 + * represents a datum above the mean, while a negative standard score
  1.3492 + * represents a datum below the mean. It is a dimensionless quantity
  1.3493 + * obtained by subtracting the population mean from an individual raw
  1.3494 + * score and then dividing the difference by the population standard
  1.3495 + * deviation.
  1.3496 + *
  1.3497 + * The z-score is only defined if one knows the population parameters;
  1.3498 + * if one only has a sample set, then the analogous computation with
  1.3499 + * sample mean and sample standard deviation yields the
  1.3500 + * Student's t-statistic.
  1.3501 + *
  1.3502 + * @param {number} x
  1.3503 + * @param {number} mean
  1.3504 + * @param {number} standardDeviation
  1.3505 + * @return {number} z score
  1.3506 + * @example
  1.3507 + * zScore(78, 80, 5); // => -0.4
  1.3508 + */
  1.3509 +function zScore(x/*:number*/, mean/*:number*/, standardDeviation/*:number*/)/*:number*/ {
  1.3510 +    return (x - mean) / standardDeviation;
  1.3511 +}
  1.3512 +
  1.3513 +module.exports = zScore;
  1.3514 +
  1.3515 +},{}]},{},[1])(1)
  1.3516 +});
  1.3517 +//# sourceMappingURL=simple-statistics.js.map