001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018package org.apache.commons.math3.stat.inference;
019
020import java.math.BigDecimal;
021import java.util.Arrays;
022import java.util.Iterator;
023
024import org.apache.commons.math3.distribution.RealDistribution;
025import org.apache.commons.math3.exception.InsufficientDataException;
026import org.apache.commons.math3.exception.MathArithmeticException;
027import org.apache.commons.math3.exception.NullArgumentException;
028import org.apache.commons.math3.exception.NumberIsTooLargeException;
029import org.apache.commons.math3.exception.OutOfRangeException;
030import org.apache.commons.math3.exception.TooManyIterationsException;
031import org.apache.commons.math3.exception.util.LocalizedFormats;
032import org.apache.commons.math3.fraction.BigFraction;
033import org.apache.commons.math3.fraction.BigFractionField;
034import org.apache.commons.math3.fraction.FractionConversionException;
035import org.apache.commons.math3.linear.Array2DRowFieldMatrix;
036import org.apache.commons.math3.linear.FieldMatrix;
037import org.apache.commons.math3.linear.MatrixUtils;
038import org.apache.commons.math3.linear.RealMatrix;
039import org.apache.commons.math3.random.RandomGenerator;
040import org.apache.commons.math3.random.Well19937c;
041import org.apache.commons.math3.util.CombinatoricsUtils;
042import org.apache.commons.math3.util.FastMath;
043import org.apache.commons.math3.util.MathArrays;
044
045import static org.apache.commons.math3.util.MathUtils.PI_SQUARED;
046import static org.apache.commons.math3.util.FastMath.PI;
047
048/**
049 * Implementation of the <a href="http://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test">
050 * Kolmogorov-Smirnov (K-S) test</a> for equality of continuous distributions.
051 * <p>
052 * The K-S test uses a statistic based on the maximum deviation of the empirical distribution of
053 * sample data points from the distribution expected under the null hypothesis. For one-sample tests
054 * evaluating the null hypothesis that a set of sample data points follow a given distribution, the
055 * test statistic is \(D_n=\sup_x |F_n(x)-F(x)|\), where \(F\) is the expected distribution and
056 * \(F_n\) is the empirical distribution of the \(n\) sample data points. The distribution of
057 * \(D_n\) is estimated using a method based on [1] with certain quick decisions for extreme values
058 * given in [2].
059 * </p>
060 * <p>
061 * Two-sample tests are also supported, evaluating the null hypothesis that the two samples
062 * {@code x} and {@code y} come from the same underlying distribution. In this case, the test
063 * statistic is \(D_{n,m}=\sup_t | F_n(t)-F_m(t)|\) where \(n\) is the length of {@code x}, \(m\) is
064 * the length of {@code y}, \(F_n\) is the empirical distribution that puts mass \(1/n\) at each of
065 * the values in {@code x} and \(F_m\) is the empirical distribution of the {@code y} values. The
066 * default 2-sample test method, {@link #kolmogorovSmirnovTest(double[], double[])} works as
067 * follows:
068 * <ul>
069 * <li>For very small samples (where the product of the sample sizes is less than
070 * {@value #SMALL_SAMPLE_PRODUCT}), the exact distribution is used to compute the p-value for the
071 * 2-sample test.</li>
072 * <li>For mid-size samples (product of sample sizes greater than or equal to
073 * {@value #SMALL_SAMPLE_PRODUCT} but less than {@value #LARGE_SAMPLE_PRODUCT}), Monte Carlo
074 * simulation is used to compute the p-value. The simulation randomly generates partitions of \(m +
075 * n\) into an \(m\)-set and an \(n\)-set and reports the proportion that give \(D\) values
076 * exceeding the observed value.</li>
077 * <li>When the product of the sample sizes exceeds {@value #LARGE_SAMPLE_PRODUCT}, the asymptotic
078 * distribution of \(D_{n,m}\) is used. See {@link #approximateP(double, int, int)} for details on
079 * the approximation.</li>
080 * </ul>
081 * </p>
082 * <p>
083 * In the two-sample case, \(D_{n,m}\) has a discrete distribution. This makes the p-value
084 * associated with the null hypothesis \(H_0 : D_{n,m} \ge d \) differ from \(H_0 : D_{n,m} > d \)
085 * by the mass of the observed value \(d\). To distinguish these, the two-sample tests use a boolean
086 * {@code strict} parameter. This parameter is ignored for large samples.
087 * </p>
088 * <p>
089 * The methods used by the 2-sample default implementation are also exposed directly:
090 * <ul>
091 * <li>{@link #exactP(double, int, int, boolean)} computes exact 2-sample p-values</li>
092 * <li>{@link #monteCarloP(double, int, int, boolean, int)} computes 2-sample p-values by Monte
093 * Carlo simulation</li>
094 * <li>{@link #approximateP(double, int, int)} uses the asymptotic distribution The {@code boolean}
095 * arguments in the first two methods allow the probability used to estimate the p-value to be
096 * expressed using strict or non-strict inequality. See
097 * {@link #kolmogorovSmirnovTest(double[], double[], boolean)}.</li>
098 * </ul>
099 * </p>
100 * <p>
101 * References:
102 * <ul>
103 * <li>[1] <a href="http://www.jstatsoft.org/v08/i18/"> Evaluating Kolmogorov's Distribution</a> by
104 * George Marsaglia, Wai Wan Tsang, and Jingbo Wang</li>
105 * <li>[2] <a href="http://www.jstatsoft.org/v39/i11/"> Computing the Two-Sided Kolmogorov-Smirnov
106 * Distribution</a> by Richard Simard and Pierre L'Ecuyer</li>
107 * </ul>
108 * <br/>
109 * Note that [1] contains an error in computing h, refer to <a
110 * href="https://issues.apache.org/jira/browse/MATH-437">MATH-437</a> for details.
111 * </p>
112 *
113 * @since 3.3
114 */
115public class KolmogorovSmirnovTest {
116
117    /**
118     * Bound on the number of partial sums in {@link #ksSum(double, double, int)}
119     */
120    protected static final int MAXIMUM_PARTIAL_SUM_COUNT = 100000;
121
122    /** Convergence criterion for {@link #ksSum(double, double, int)} */
123    protected static final double KS_SUM_CAUCHY_CRITERION = 1E-20;
124
125    /** Convergence criterion for the sums in #pelzGood(double, double, int)} */
126    protected static final double PG_SUM_RELATIVE_ERROR = 1.0e-10;
127
128    /** When product of sample sizes is less than this value, 2-sample K-S test is exact */
129    protected static final int SMALL_SAMPLE_PRODUCT = 200;
130
131    /**
132     * When product of sample sizes exceeds this value, 2-sample K-S test uses asymptotic
133     * distribution for strict inequality p-value.
134     */
135    protected static final int LARGE_SAMPLE_PRODUCT = 10000;
136
137    /** Default number of iterations used by {@link #monteCarloP(double, int, int, boolean, int)} */
138    protected static final int MONTE_CARLO_ITERATIONS = 1000000;
139
140    /** Random data generator used by {@link #monteCarloP(double, int, int, boolean, int)} */
141    private final RandomGenerator rng;
142
143    /**
144     * Construct a KolmogorovSmirnovTest instance with a default random data generator.
145     */
146    public KolmogorovSmirnovTest() {
147        rng = new Well19937c();
148    }
149
150    /**
151     * Construct a KolmogorovSmirnovTest with the provided random data generator.
152     *
153     * @param rng random data generator used by {@link #monteCarloP(double, int, int, boolean, int)}
154     */
155    public KolmogorovSmirnovTest(RandomGenerator rng) {
156        this.rng = rng;
157    }
158
159    /**
160     * Computes the <i>p-value</i>, or <i>observed significance level</i>, of a one-sample <a
161     * href="http://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test"> Kolmogorov-Smirnov test</a>
162     * evaluating the null hypothesis that {@code data} conforms to {@code distribution}. If
163     * {@code exact} is true, the distribution used to compute the p-value is computed using
164     * extended precision. See {@link #cdfExact(double, int)}.
165     *
166     * @param distribution reference distribution
167     * @param data sample being being evaluated
168     * @param exact whether or not to force exact computation of the p-value
169     * @return the p-value associated with the null hypothesis that {@code data} is a sample from
170     *         {@code distribution}
171     * @throws InsufficientDataException if {@code data} does not have length at least 2
172     * @throws NullArgumentException if {@code data} is null
173     */
174    public double kolmogorovSmirnovTest(RealDistribution distribution, double[] data, boolean exact) {
175        return 1d - cdf(kolmogorovSmirnovStatistic(distribution, data), data.length, exact);
176    }
177
178    /**
179     * Computes the one-sample Kolmogorov-Smirnov test statistic, \(D_n=\sup_x |F_n(x)-F(x)|\) where
180     * \(F\) is the distribution (cdf) function associated with {@code distribution}, \(n\) is the
181     * length of {@code data} and \(F_n\) is the empirical distribution that puts mass \(1/n\) at
182     * each of the values in {@code data}.
183     *
184     * @param distribution reference distribution
185     * @param data sample being evaluated
186     * @return Kolmogorov-Smirnov statistic \(D_n\)
187     * @throws InsufficientDataException if {@code data} does not have length at least 2
188     * @throws NullArgumentException if {@code data} is null
189     */
190    public double kolmogorovSmirnovStatistic(RealDistribution distribution, double[] data) {
191        checkArray(data);
192        final int n = data.length;
193        final double nd = n;
194        final double[] dataCopy = new double[n];
195        System.arraycopy(data, 0, dataCopy, 0, n);
196        Arrays.sort(dataCopy);
197        double d = 0d;
198        for (int i = 1; i <= n; i++) {
199            final double yi = distribution.cumulativeProbability(dataCopy[i - 1]);
200            final double currD = FastMath.max(yi - (i - 1) / nd, i / nd - yi);
201            if (currD > d) {
202                d = currD;
203            }
204        }
205        return d;
206    }
207
208    /**
209     * Computes the <i>p-value</i>, or <i>observed significance level</i>, of a two-sample <a
210     * href="http://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test"> Kolmogorov-Smirnov test</a>
211     * evaluating the null hypothesis that {@code x} and {@code y} are samples drawn from the same
212     * probability distribution. Specifically, what is returned is an estimate of the probability
213     * that the {@link #kolmogorovSmirnovStatistic(double[], double[])} associated with a randomly
214     * selected partition of the combined sample into subsamples of sizes {@code x.length} and
215     * {@code y.length} will strictly exceed (if {@code strict} is {@code true}) or be at least as
216     * large as {@code strict = false}) as {@code kolmogorovSmirnovStatistic(x, y)}.
217     * <ul>
218     * <li>For very small samples (where the product of the sample sizes is less than
219     * {@value #SMALL_SAMPLE_PRODUCT}), the exact distribution is used to compute the p-value. This
220     * is accomplished by enumerating all partitions of the combined sample into two subsamples of
221     * the respective sample sizes, computing \(D_{n,m}\) for each partition and returning the
222     * proportion of partitions that give \(D\) values exceeding the observed value.</li>
223     * <li>For mid-size samples (product of sample sizes greater than or equal to
224     * {@value #SMALL_SAMPLE_PRODUCT} but less than {@value #LARGE_SAMPLE_PRODUCT}), Monte Carlo
225     * simulation is used to compute the p-value. The simulation randomly generates partitions and
226     * reports the proportion that give \(D\) values exceeding the observed value.</li>
227     * <li>When the product of the sample sizes exceeds {@value #LARGE_SAMPLE_PRODUCT}, the
228     * asymptotic distribution of \(D_{n,m}\) is used. See {@link #approximateP(double, int, int)}
229     * for details on the approximation.</li>
230     * </ul>
231     *
232     * @param x first sample dataset
233     * @param y second sample dataset
234     * @param strict whether or not the probability to compute is expressed as a strict inequality
235     *        (ignored for large samples)
236     * @return p-value associated with the null hypothesis that {@code x} and {@code y} represent
237     *         samples from the same distribution
238     * @throws InsufficientDataException if either {@code x} or {@code y} does not have length at
239     *         least 2
240     * @throws NullArgumentException if either {@code x} or {@code y} is null
241     */
242    public double kolmogorovSmirnovTest(double[] x, double[] y, boolean strict) {
243        if (x.length * y.length < SMALL_SAMPLE_PRODUCT) {
244            return exactP(kolmogorovSmirnovStatistic(x, y), x.length, y.length, strict);
245        }
246        if (x.length * y.length < LARGE_SAMPLE_PRODUCT) {
247            return monteCarloP(kolmogorovSmirnovStatistic(x, y), x.length, y.length, strict, MONTE_CARLO_ITERATIONS);
248        }
249        return approximateP(kolmogorovSmirnovStatistic(x, y), x.length, y.length);
250    }
251
252    /**
253     * Computes the <i>p-value</i>, or <i>observed significance level</i>, of a two-sample <a
254     * href="http://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test"> Kolmogorov-Smirnov test</a>
255     * evaluating the null hypothesis that {@code x} and {@code y} are samples drawn from the same
256     * probability distribution. Assumes the strict form of the inequality used to compute the
257     * p-value. See {@link #kolmogorovSmirnovTest(RealDistribution, double[], boolean)}.
258     *
259     * @param x first sample dataset
260     * @param y second sample dataset
261     * @return p-value associated with the null hypothesis that {@code x} and {@code y} represent
262     *         samples from the same distribution
263     * @throws InsufficientDataException if either {@code x} or {@code y} does not have length at
264     *         least 2
265     * @throws NullArgumentException if either {@code x} or {@code y} is null
266     */
267    public double kolmogorovSmirnovTest(double[] x, double[] y) {
268        return kolmogorovSmirnovTest(x, y, true);
269    }
270
271    /**
272     * Computes the two-sample Kolmogorov-Smirnov test statistic, \(D_{n,m}=\sup_x |F_n(x)-F_m(x)|\)
273     * where \(n\) is the length of {@code x}, \(m\) is the length of {@code y}, \(F_n\) is the
274     * empirical distribution that puts mass \(1/n\) at each of the values in {@code x} and \(F_m\)
275     * is the empirical distribution of the {@code y} values.
276     *
277     * @param x first sample
278     * @param y second sample
279     * @return test statistic \(D_{n,m}\) used to evaluate the null hypothesis that {@code x} and
280     *         {@code y} represent samples from the same underlying distribution
281     * @throws InsufficientDataException if either {@code x} or {@code y} does not have length at
282     *         least 2
283     * @throws NullArgumentException if either {@code x} or {@code y} is null
284     */
285    public double kolmogorovSmirnovStatistic(double[] x, double[] y) {
286        checkArray(x);
287        checkArray(y);
288        // Copy and sort the sample arrays
289        final double[] sx = MathArrays.copyOf(x);
290        final double[] sy = MathArrays.copyOf(y);
291        Arrays.sort(sx);
292        Arrays.sort(sy);
293        final int n = sx.length;
294        final int m = sy.length;
295
296        // Find the max difference between cdf_x and cdf_y
297        double supD = 0d;
298        // First walk x points
299        for (int i = 0; i < n; i++) {
300            final double cdf_x = (i + 1d) / n;
301            final int yIndex = Arrays.binarySearch(sy, sx[i]);
302            final double cdf_y = yIndex >= 0 ? (yIndex + 1d) / m : (-yIndex - 1d) / m;
303            final double curD = FastMath.abs(cdf_x - cdf_y);
304            if (curD > supD) {
305                supD = curD;
306            }
307        }
308        // Now look at y
309        for (int i = 0; i < m; i++) {
310            final double cdf_y = (i + 1d) / m;
311            final int xIndex = Arrays.binarySearch(sx, sy[i]);
312            final double cdf_x = xIndex >= 0 ? (xIndex + 1d) / n : (-xIndex - 1d) / n;
313            final double curD = FastMath.abs(cdf_x - cdf_y);
314            if (curD > supD) {
315                supD = curD;
316            }
317        }
318        return supD;
319    }
320
321    /**
322     * Computes the <i>p-value</i>, or <i>observed significance level</i>, of a one-sample <a
323     * href="http://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test"> Kolmogorov-Smirnov test</a>
324     * evaluating the null hypothesis that {@code data} conforms to {@code distribution}.
325     *
326     * @param distribution reference distribution
327     * @param data sample being being evaluated
328     * @return the p-value associated with the null hypothesis that {@code data} is a sample from
329     *         {@code distribution}
330     * @throws InsufficientDataException if {@code data} does not have length at least 2
331     * @throws NullArgumentException if {@code data} is null
332     */
333    public double kolmogorovSmirnovTest(RealDistribution distribution, double[] data) {
334        return kolmogorovSmirnovTest(distribution, data, false);
335    }
336
337    /**
338     * Performs a <a href="http://en.wikipedia.org/wiki/Kolmogorov-Smirnov_test"> Kolmogorov-Smirnov
339     * test</a> evaluating the null hypothesis that {@code data} conforms to {@code distribution}.
340     *
341     * @param distribution reference distribution
342     * @param data sample being being evaluated
343     * @param alpha significance level of the test
344     * @return true iff the null hypothesis that {@code data} is a sample from {@code distribution}
345     *         can be rejected with confidence 1 - {@code alpha}
346     * @throws InsufficientDataException if {@code data} does not have length at least 2
347     * @throws NullArgumentException if {@code data} is null
348     */
349    public boolean kolmogorovSmirnovTest(RealDistribution distribution, double[] data, double alpha) {
350        if ((alpha <= 0) || (alpha > 0.5)) {
351            throw new OutOfRangeException(LocalizedFormats.OUT_OF_BOUND_SIGNIFICANCE_LEVEL, alpha, 0, 0.5);
352        }
353        return kolmogorovSmirnovTest(distribution, data) < alpha;
354    }
355
356    /**
357     * Calculates \(P(D_n < d)\) using the method described in [1] with quick decisions for extreme
358     * values given in [2] (see above). The result is not exact as with
359     * {@link #cdfExact(double, int)} because calculations are based on
360     * {@code double} rather than {@link org.apache.commons.math3.fraction.BigFraction}.
361     *
362     * @param d statistic
363     * @param n sample size
364     * @return \(P(D_n < d)\)
365     * @throws MathArithmeticException if algorithm fails to convert {@code h} to a
366     *         {@link org.apache.commons.math3.fraction.BigFraction} in expressing {@code d} as \((k
367     *         - h) / m\) for integer {@code k, m} and \(0 \le h < 1\)
368     */
369    public double cdf(double d, int n)
370        throws MathArithmeticException {
371        return cdf(d, n, false);
372    }
373
374    /**
375     * Calculates {@code P(D_n < d)}. The result is exact in the sense that BigFraction/BigReal is
376     * used everywhere at the expense of very slow execution time. Almost never choose this in real
377     * applications unless you are very sure; this is almost solely for verification purposes.
378     * Normally, you would choose {@link #cdf(double, int)}. See the class
379     * javadoc for definitions and algorithm description.
380     *
381     * @param d statistic
382     * @param n sample size
383     * @return \(P(D_n < d)\)
384     * @throws MathArithmeticException if the algorithm fails to convert {@code h} to a
385     *         {@link org.apache.commons.math3.fraction.BigFraction} in expressing {@code d} as \((k
386     *         - h) / m\) for integer {@code k, m} and \(0 \le h < 1\)
387     */
388    public double cdfExact(double d, int n)
389        throws MathArithmeticException {
390        return cdf(d, n, true);
391    }
392
393    /**
394     * Calculates {@code P(D_n < d)} using method described in [1] with quick decisions for extreme
395     * values given in [2] (see above).
396     *
397     * @param d statistic
398     * @param n sample size
399     * @param exact whether the probability should be calculated exact using
400     *        {@link org.apache.commons.math3.fraction.BigFraction} everywhere at the expense of
401     *        very slow execution time, or if {@code double} should be used convenient places to
402     *        gain speed. Almost never choose {@code true} in real applications unless you are very
403     *        sure; {@code true} is almost solely for verification purposes.
404     * @return \(P(D_n < d)\)
405     * @throws MathArithmeticException if algorithm fails to convert {@code h} to a
406     *         {@link org.apache.commons.math3.fraction.BigFraction} in expressing {@code d} as \((k
407     *         - h) / m\) for integer {@code k, m} and \(0 \le h < 1\).
408     */
409    public double cdf(double d, int n, boolean exact)
410        throws MathArithmeticException {
411
412        final double ninv = 1 / ((double) n);
413        final double ninvhalf = 0.5 * ninv;
414
415        if (d <= ninvhalf) {
416            return 0;
417        } else if (ninvhalf < d && d <= ninv) {
418            double res = 1;
419            final double f = 2 * d - ninv;
420            // n! f^n = n*f * (n-1)*f * ... * 1*x
421            for (int i = 1; i <= n; ++i) {
422                res *= i * f;
423            }
424            return res;
425        } else if (1 - ninv <= d && d < 1) {
426            return 1 - 2 * Math.pow(1 - d, n);
427        } else if (1 <= d) {
428            return 1;
429        }
430        if (exact) {
431            return exactK(d,n);
432        }
433        if (n <= 140) {
434            return roundedK(d, n);
435        }
436        return pelzGood(d, n);
437    }
438
439    /**
440     * Calculates the exact value of {@code P(D_n < d)} using the method described in [1] (reference
441     * in class javadoc above) and {@link org.apache.commons.math3.fraction.BigFraction} (see
442     * above).
443     *
444     * @param d statistic
445     * @param n sample size
446     * @return the two-sided probability of \(P(D_n < d)\)
447     * @throws MathArithmeticException if algorithm fails to convert {@code h} to a
448     *         {@link org.apache.commons.math3.fraction.BigFraction} in expressing {@code d} as \((k
449     *         - h) / m\) for integer {@code k, m} and \(0 \le h < 1\).
450     */
451    private double exactK(double d, int n)
452        throws MathArithmeticException {
453
454        final int k = (int) Math.ceil(n * d);
455
456        final FieldMatrix<BigFraction> H = this.createExactH(d, n);
457        final FieldMatrix<BigFraction> Hpower = H.power(n);
458
459        BigFraction pFrac = Hpower.getEntry(k - 1, k - 1);
460
461        for (int i = 1; i <= n; ++i) {
462            pFrac = pFrac.multiply(i).divide(n);
463        }
464
465        /*
466         * BigFraction.doubleValue converts numerator to double and the denominator to double and
467         * divides afterwards. That gives NaN quite easy. This does not (scale is the number of
468         * digits):
469         */
470        return pFrac.bigDecimalValue(20, BigDecimal.ROUND_HALF_UP).doubleValue();
471    }
472
473    /**
474     * Calculates {@code P(D_n < d)} using method described in [1] and doubles (see above).
475     *
476     * @param d statistic
477     * @param n sample size
478     * @return \(P(D_n < d)\)
479     */
480    private double roundedK(double d, int n) {
481
482        final int k = (int) Math.ceil(n * d);
483        final RealMatrix H = this.createRoundedH(d, n);
484        final RealMatrix Hpower = H.power(n);
485
486        double pFrac = Hpower.getEntry(k - 1, k - 1);
487        for (int i = 1; i <= n; ++i) {
488            pFrac *= (double) i / (double) n;
489        }
490
491        return pFrac;
492    }
493
494    /**
495     * Computes the Pelz-Good approximation for \(P(D_n < d)\) as described in [2] in the class javadoc.
496     *
497     * @param d value of d-statistic (x in [2])
498     * @param n sample size
499     * @return \(P(D_n < d)\)
500     * @since 3.4
501     */
502    public double pelzGood(double d, int n) {
503
504        // Change the variable since approximation is for the distribution evaluated at d / sqrt(n)
505        final double sqrtN = FastMath.sqrt(n);
506        final double z = d * sqrtN;
507        final double z2 = d * d * n;
508        final double z4 = z2 * z2;
509        final double z6 = z4 * z2;
510        final double z8 = z4 * z4;
511
512        // Eventual return value
513        double ret = 0;
514
515        // Compute K_0(z)
516        double sum = 0;
517        double increment = 0;
518        double kTerm = 0;
519        double z2Term = PI_SQUARED / (8 * z2);
520        int k = 1;
521        for (; k < MAXIMUM_PARTIAL_SUM_COUNT; k++) {
522            kTerm = 2 * k - 1;
523            increment = FastMath.exp(-z2Term * kTerm * kTerm);
524            sum += increment;
525            if (increment <= PG_SUM_RELATIVE_ERROR * sum) {
526                break;
527            }
528        }
529        if (k == MAXIMUM_PARTIAL_SUM_COUNT) {
530            throw new TooManyIterationsException(MAXIMUM_PARTIAL_SUM_COUNT);
531        }
532        ret = sum * FastMath.sqrt(2 * FastMath.PI) / z;
533
534        // K_1(z)
535        // Sum is -inf to inf, but k term is always (k + 1/2) ^ 2, so really have
536        // twice the sum from k = 0 to inf (k = -1 is same as 0, -2 same as 1, ...)
537        final double twoZ2 = 2 * z2;
538        sum = 0;
539        kTerm = 0;
540        double kTerm2 = 0;
541        for (k = 0; k < MAXIMUM_PARTIAL_SUM_COUNT; k++) {
542            kTerm = k + 0.5;
543            kTerm2 = kTerm * kTerm;
544            increment = (PI_SQUARED * kTerm2 - z2) * FastMath.exp(-PI_SQUARED * kTerm2 / twoZ2);
545            sum += increment;
546            if (FastMath.abs(increment) < PG_SUM_RELATIVE_ERROR * FastMath.abs(sum)) {
547                break;
548            }
549        }
550        if (k == MAXIMUM_PARTIAL_SUM_COUNT) {
551            throw new TooManyIterationsException(MAXIMUM_PARTIAL_SUM_COUNT);
552        }
553        final double sqrtHalfPi = FastMath.sqrt(PI / 2);
554        // Instead of doubling sum, divide by 3 instead of 6
555        ret += sum * sqrtHalfPi / (3 * z4 * sqrtN);
556
557        // K_2(z)
558        // Same drill as K_1, but with two doubly infinite sums, all k terms are even powers.
559        final double z4Term = 2 * z4;
560        final double z6Term = 6 * z6;
561        z2Term = 5 * z2;
562        final double pi4 = PI_SQUARED * PI_SQUARED;
563        sum = 0;
564        kTerm = 0;
565        kTerm2 = 0;
566        for (k = 0; k < MAXIMUM_PARTIAL_SUM_COUNT; k++) {
567            kTerm = k + 0.5;
568            kTerm2 = kTerm * kTerm;
569            increment =  (z6Term + z4Term + PI_SQUARED * (z4Term - z2Term) * kTerm2 +
570                    pi4 * (1 - twoZ2) * kTerm2 * kTerm2) * FastMath.exp(-PI_SQUARED * kTerm2 / twoZ2);
571            sum += increment;
572            if (FastMath.abs(increment) < PG_SUM_RELATIVE_ERROR * FastMath.abs(sum)) {
573                break;
574            }
575        }
576        if (k == MAXIMUM_PARTIAL_SUM_COUNT) {
577            throw new TooManyIterationsException(MAXIMUM_PARTIAL_SUM_COUNT);
578        }
579        double sum2 = 0;
580        kTerm2 = 0;
581        for (k = 1; k < MAXIMUM_PARTIAL_SUM_COUNT; k++) {
582            kTerm2 = k * k;
583            increment = PI_SQUARED * kTerm2 * FastMath.exp(-PI_SQUARED * kTerm2 / twoZ2);
584            sum2 += increment;
585            if (FastMath.abs(increment) < PG_SUM_RELATIVE_ERROR * FastMath.abs(sum2)) {
586                break;
587            }
588        }
589        if (k == MAXIMUM_PARTIAL_SUM_COUNT) {
590            throw new TooManyIterationsException(MAXIMUM_PARTIAL_SUM_COUNT);
591        }
592        // Again, adjust coefficients instead of doubling sum, sum2
593        ret += (sqrtHalfPi / n) * (sum / (36 * z2 * z2 * z2 * z) - sum2 / (18 * z2 * z));
594
595        // K_3(z) One more time with feeling - two doubly infinite sums, all k powers even.
596        // Multiply coefficient denominators by 2, so omit doubling sums.
597        final double pi6 = pi4 * PI_SQUARED;
598        sum = 0;
599        double kTerm4 = 0;
600        double kTerm6 = 0;
601        for (k = 0; k < MAXIMUM_PARTIAL_SUM_COUNT; k++) {
602            kTerm = k + 0.5;
603            kTerm2 = kTerm * kTerm;
604            kTerm4 = kTerm2 * kTerm2;
605            kTerm6 = kTerm4 * kTerm2;
606            increment = (pi6 * kTerm6 * (5 - 30 * z2) + pi4 * kTerm4 * (-60 * z2 + 212 * z4) +
607                    PI_SQUARED * kTerm2 * (135 * z4 - 96 * z6) - 30 * z6 - 90 * z8) *
608                    FastMath.exp(-PI_SQUARED * kTerm2 / twoZ2);
609            sum += increment;
610            if (FastMath.abs(increment) < PG_SUM_RELATIVE_ERROR * FastMath.abs(sum)) {
611                break;
612            }
613        }
614        if (k == MAXIMUM_PARTIAL_SUM_COUNT) {
615            throw new TooManyIterationsException(MAXIMUM_PARTIAL_SUM_COUNT);
616        }
617        sum2 = 0;
618        for (k = 1; k < MAXIMUM_PARTIAL_SUM_COUNT; k++) {
619            kTerm2 = k * k;
620            kTerm4 = kTerm2 * kTerm2;
621            increment = (-pi4 * kTerm4 + 3 * PI_SQUARED * kTerm2 * z2) *
622                    FastMath.exp(-PI_SQUARED * kTerm2 / twoZ2);
623            sum2 += increment;
624            if (FastMath.abs(increment) < PG_SUM_RELATIVE_ERROR * FastMath.abs(sum2)) {
625                break;
626            }
627        }
628        if (k == MAXIMUM_PARTIAL_SUM_COUNT) {
629            throw new TooManyIterationsException(MAXIMUM_PARTIAL_SUM_COUNT);
630        }
631        return ret + (sqrtHalfPi / (sqrtN * n)) * (sum / (3240 * z6 * z4) +
632                + sum2 / (108 * z6));
633
634    }
635
636    /***
637     * Creates {@code H} of size {@code m x m} as described in [1] (see above).
638     *
639     * @param d statistic
640     * @param n sample size
641     * @return H matrix
642     * @throws NumberIsTooLargeException if fractional part is greater than 1
643     * @throws FractionConversionException if algorithm fails to convert {@code h} to a
644     *         {@link org.apache.commons.math3.fraction.BigFraction} in expressing {@code d} as \((k
645     *         - h) / m\) for integer {@code k, m} and \(0 <= h < 1\).
646     */
647    private FieldMatrix<BigFraction> createExactH(double d, int n)
648        throws NumberIsTooLargeException, FractionConversionException {
649
650        final int k = (int) Math.ceil(n * d);
651        final int m = 2 * k - 1;
652        final double hDouble = k - n * d;
653        if (hDouble >= 1) {
654            throw new NumberIsTooLargeException(hDouble, 1.0, false);
655        }
656        BigFraction h = null;
657        try {
658            h = new BigFraction(hDouble, 1.0e-20, 10000);
659        } catch (final FractionConversionException e1) {
660            try {
661                h = new BigFraction(hDouble, 1.0e-10, 10000);
662            } catch (final FractionConversionException e2) {
663                h = new BigFraction(hDouble, 1.0e-5, 10000);
664            }
665        }
666        final BigFraction[][] Hdata = new BigFraction[m][m];
667
668        /*
669         * Start by filling everything with either 0 or 1.
670         */
671        for (int i = 0; i < m; ++i) {
672            for (int j = 0; j < m; ++j) {
673                if (i - j + 1 < 0) {
674                    Hdata[i][j] = BigFraction.ZERO;
675                } else {
676                    Hdata[i][j] = BigFraction.ONE;
677                }
678            }
679        }
680
681        /*
682         * Setting up power-array to avoid calculating the same value twice: hPowers[0] = h^1 ...
683         * hPowers[m-1] = h^m
684         */
685        final BigFraction[] hPowers = new BigFraction[m];
686        hPowers[0] = h;
687        for (int i = 1; i < m; ++i) {
688            hPowers[i] = h.multiply(hPowers[i - 1]);
689        }
690
691        /*
692         * First column and last row has special values (each other reversed).
693         */
694        for (int i = 0; i < m; ++i) {
695            Hdata[i][0] = Hdata[i][0].subtract(hPowers[i]);
696            Hdata[m - 1][i] = Hdata[m - 1][i].subtract(hPowers[m - i - 1]);
697        }
698
699        /*
700         * [1] states: "For 1/2 < h < 1 the bottom left element of the matrix should be (1 - 2*h^m +
701         * (2h - 1)^m )/m!" Since 0 <= h < 1, then if h > 1/2 is sufficient to check:
702         */
703        if (h.compareTo(BigFraction.ONE_HALF) == 1) {
704            Hdata[m - 1][0] = Hdata[m - 1][0].add(h.multiply(2).subtract(1).pow(m));
705        }
706
707        /*
708         * Aside from the first column and last row, the (i, j)-th element is 1/(i - j + 1)! if i -
709         * j + 1 >= 0, else 0. 1's and 0's are already put, so only division with (i - j + 1)! is
710         * needed in the elements that have 1's. There is no need to calculate (i - j + 1)! and then
711         * divide - small steps avoid overflows. Note that i - j + 1 > 0 <=> i + 1 > j instead of
712         * j'ing all the way to m. Also note that it is started at g = 2 because dividing by 1 isn't
713         * really necessary.
714         */
715        for (int i = 0; i < m; ++i) {
716            for (int j = 0; j < i + 1; ++j) {
717                if (i - j + 1 > 0) {
718                    for (int g = 2; g <= i - j + 1; ++g) {
719                        Hdata[i][j] = Hdata[i][j].divide(g);
720                    }
721                }
722            }
723        }
724        return new Array2DRowFieldMatrix<BigFraction>(BigFractionField.getInstance(), Hdata);
725    }
726
727    /***
728     * Creates {@code H} of size {@code m x m} as described in [1] (see above)
729     * using double-precision.
730     *
731     * @param d statistic
732     * @param n sample size
733     * @return H matrix
734     * @throws NumberIsTooLargeException if fractional part is greater than 1
735     */
736    private RealMatrix createRoundedH(double d, int n)
737        throws NumberIsTooLargeException {
738
739        final int k = (int) Math.ceil(n * d);
740        final int m = 2 * k - 1;
741        final double h = k - n * d;
742        if (h >= 1) {
743            throw new NumberIsTooLargeException(h, 1.0, false);
744        }
745        final double[][] Hdata = new double[m][m];
746
747        /*
748         * Start by filling everything with either 0 or 1.
749         */
750        for (int i = 0; i < m; ++i) {
751            for (int j = 0; j < m; ++j) {
752                if (i - j + 1 < 0) {
753                    Hdata[i][j] = 0;
754                } else {
755                    Hdata[i][j] = 1;
756                }
757            }
758        }
759
760        /*
761         * Setting up power-array to avoid calculating the same value twice: hPowers[0] = h^1 ...
762         * hPowers[m-1] = h^m
763         */
764        final double[] hPowers = new double[m];
765        hPowers[0] = h;
766        for (int i = 1; i < m; ++i) {
767            hPowers[i] = h * hPowers[i - 1];
768        }
769
770        /*
771         * First column and last row has special values (each other reversed).
772         */
773        for (int i = 0; i < m; ++i) {
774            Hdata[i][0] = Hdata[i][0] - hPowers[i];
775            Hdata[m - 1][i] -= hPowers[m - i - 1];
776        }
777
778        /*
779         * [1] states: "For 1/2 < h < 1 the bottom left element of the matrix should be (1 - 2*h^m +
780         * (2h - 1)^m )/m!" Since 0 <= h < 1, then if h > 1/2 is sufficient to check:
781         */
782        if (Double.compare(h, 0.5) > 0) {
783            Hdata[m - 1][0] += FastMath.pow(2 * h - 1, m);
784        }
785
786        /*
787         * Aside from the first column and last row, the (i, j)-th element is 1/(i - j + 1)! if i -
788         * j + 1 >= 0, else 0. 1's and 0's are already put, so only division with (i - j + 1)! is
789         * needed in the elements that have 1's. There is no need to calculate (i - j + 1)! and then
790         * divide - small steps avoid overflows. Note that i - j + 1 > 0 <=> i + 1 > j instead of
791         * j'ing all the way to m. Also note that it is started at g = 2 because dividing by 1 isn't
792         * really necessary.
793         */
794        for (int i = 0; i < m; ++i) {
795            for (int j = 0; j < i + 1; ++j) {
796                if (i - j + 1 > 0) {
797                    for (int g = 2; g <= i - j + 1; ++g) {
798                        Hdata[i][j] /= g;
799                    }
800                }
801            }
802        }
803        return MatrixUtils.createRealMatrix(Hdata);
804    }
805
806    /**
807     * Verifies that {@code array} has length at least 2.
808     *
809     * @param array array to test
810     * @throws NullArgumentException if array is null
811     * @throws InsufficientDataException if array is too short
812     */
813    private void checkArray(double[] array) {
814        if (array == null) {
815            throw new NullArgumentException(LocalizedFormats.NULL_NOT_ALLOWED);
816        }
817        if (array.length < 2) {
818            throw new InsufficientDataException(LocalizedFormats.INSUFFICIENT_OBSERVED_POINTS_IN_SAMPLE, array.length,
819                                                2);
820        }
821    }
822
823    /**
824     * Computes \( 1 + 2 \sum_{i=1}^\infty (-1)^i e^{-2 i^2 t^2} \) stopping when successive partial
825     * sums are within {@code tolerance} of one another, or when {@code maxIterations} partial sums
826     * have been computed. If the sum does not converge before {@code maxIterations} iterations a
827     * {@link TooManyIterationsException} is thrown.
828     *
829     * @param t argument
830     * @param tolerance Cauchy criterion for partial sums
831     * @param maxIterations maximum number of partial sums to compute
832     * @return Kolmogorov sum evaluated at t
833     * @throws TooManyIterationsException if the series does not converge
834     */
835    public double ksSum(double t, double tolerance, int maxIterations) {
836        // TODO: for small t (say less than 1), the alternative expansion in part 3 of [1]
837        // from class javadoc should be used.
838        final double x = -2 * t * t;
839        int sign = -1;
840        long i = 1;
841        double partialSum = 0.5d;
842        double delta = 1;
843        while (delta > tolerance && i < maxIterations) {
844            delta = FastMath.exp(x * i * i);
845            partialSum += sign * delta;
846            sign *= -1;
847            i++;
848        }
849        if (i == maxIterations) {
850            throw new TooManyIterationsException(maxIterations);
851        }
852        return partialSum * 2;
853    }
854
855    /**
856     * Computes \(P(D_{n,m} > d)\) if {@code strict} is {@code true}; otherwise \(P(D_{n,m} \ge
857     * d)\), where \(D_{n,m}\) is the 2-sample Kolmogorov-Smirnov statistic. See
858     * {@link #kolmogorovSmirnovStatistic(double[], double[])} for the definition of \(D_{n,m}\).
859     * <p>
860     * The returned probability is exact, obtained by enumerating all partitions of {@code m + n}
861     * into {@code m} and {@code n} sets, computing \(D_{n,m}\) for each partition and counting the
862     * number of partitions that yield \(D_{n,m}\) values exceeding (resp. greater than or equal to)
863     * {@code d}.
864     * </p>
865     * <p>
866     * <strong>USAGE NOTE</strong>: Since this method enumerates all combinations in \({m+n} \choose
867     * {n}\), it is very slow if called for large {@code m, n}. For this reason,
868     * {@link #kolmogorovSmirnovTest(double[], double[])} uses this only for {@code m * n < }
869     * {@value #SMALL_SAMPLE_PRODUCT}.
870     * </p>
871     *
872     * @param d D-statistic value
873     * @param n first sample size
874     * @param m second sample size
875     * @param strict whether or not the probability to compute is expressed as a strict inequality
876     * @return probability that a randomly selected m-n partition of m + n generates \(D_{n,m}\)
877     *         greater than (resp. greater than or equal to) {@code d}
878     */
879    public double exactP(double d, int n, int m, boolean strict) {
880        Iterator<int[]> combinationsIterator = CombinatoricsUtils.combinationsIterator(n + m, n);
881        long tail = 0;
882        final double[] nSet = new double[n];
883        final double[] mSet = new double[m];
884        while (combinationsIterator.hasNext()) {
885            // Generate an n-set
886            final int[] nSetI = combinationsIterator.next();
887            // Copy the n-set to nSet and its complement to mSet
888            int j = 0;
889            int k = 0;
890            for (int i = 0; i < n + m; i++) {
891                if (j < n && nSetI[j] == i) {
892                    nSet[j++] = i;
893                } else {
894                    mSet[k++] = i;
895                }
896            }
897            final double curD = kolmogorovSmirnovStatistic(nSet, mSet);
898            if (curD > d) {
899                tail++;
900            } else if (curD == d && !strict) {
901                tail++;
902            }
903        }
904        return (double) tail / (double) CombinatoricsUtils.binomialCoefficient(n + m, n);
905    }
906
907    /**
908     * Uses the Kolmogorov-Smirnov distribution to approximate \(P(D_{n,m} > d)\) where \(D_{n,m}\)
909     * is the 2-sample Kolmogorov-Smirnov statistic. See
910     * {@link #kolmogorovSmirnovStatistic(double[], double[])} for the definition of \(D_{n,m}\).
911     * <p>
912     * Specifically, what is returned is \(1 - k(d \sqrt{mn / (m + n)})\) where \(k(t) = 1 + 2
913     * \sum_{i=1}^\infty (-1)^i e^{-2 i^2 t^2}\). See {@link #ksSum(double, double, int)} for
914     * details on how convergence of the sum is determined. This implementation passes {@code ksSum}
915     * {@value #KS_SUM_CAUCHY_CRITERION} as {@code tolerance} and
916     * {@value #MAXIMUM_PARTIAL_SUM_COUNT} as {@code maxIterations}.
917     * </p>
918     *
919     * @param d D-statistic value
920     * @param n first sample size
921     * @param m second sample size
922     * @return approximate probability that a randomly selected m-n partition of m + n generates
923     *         \(D_{n,m}\) greater than {@code d}
924     */
925    public double approximateP(double d, int n, int m) {
926        final double dm = m;
927        final double dn = n;
928        return 1 - ksSum(d * FastMath.sqrt((dm * dn) / (dm + dn)), KS_SUM_CAUCHY_CRITERION, MAXIMUM_PARTIAL_SUM_COUNT);
929    }
930
931    /**
932     * Uses Monte Carlo simulation to approximate \(P(D_{n,m} > d)\) where \(D_{n,m}\) is the
933     * 2-sample Kolmogorov-Smirnov statistic. See
934     * {@link #kolmogorovSmirnovStatistic(double[], double[])} for the definition of \(D_{n,m}\).
935     * <p>
936     * The simulation generates {@code iterations} random partitions of {@code m + n} into an
937     * {@code n} set and an {@code m} set, computing \(D_{n,m}\) for each partition and returning
938     * the proportion of values that are greater than {@code d}, or greater than or equal to
939     * {@code d} if {@code strict} is {@code false}.
940     * </p>
941     *
942     * @param d D-statistic value
943     * @param n first sample size
944     * @param m second sample size
945     * @param iterations number of random partitions to generate
946     * @param strict whether or not the probability to compute is expressed as a strict inequality
947     * @return proportion of randomly generated m-n partitions of m + n that result in \(D_{n,m}\)
948     *         greater than (resp. greater than or equal to) {@code d}
949     */
950    public double monteCarloP(double d, int n, int m, boolean strict, int iterations) {
951        final int[] nPlusMSet = MathArrays.natural(m + n);
952        final double[] nSet = new double[n];
953        final double[] mSet = new double[m];
954        int tail = 0;
955        for (int i = 0; i < iterations; i++) {
956            copyPartition(nSet, mSet, nPlusMSet, n, m);
957            final double curD = kolmogorovSmirnovStatistic(nSet, mSet);
958            if (curD > d) {
959                tail++;
960            } else if (curD == d && !strict) {
961                tail++;
962            }
963            MathArrays.shuffle(nPlusMSet, rng);
964            Arrays.sort(nPlusMSet, 0, n);
965        }
966        return (double) tail / iterations;
967    }
968
969    /**
970     * Copies the first {@code n} elements of {@code nSetI} into {@code nSet} and its complement
971     * relative to {@code m + n} into {@code mSet}. For example, if {@code m = 3}, {@code n = 3} and
972     * {@code nSetI = [1,4,5,2,3,0]} then after this method returns, we will have
973     * {@code nSet = [1,4,5], mSet = [0,2,3]}.
974     * <p>
975     * <strong>Precondition:</strong> The first {@code n} elements of {@code nSetI} must be sorted
976     * in ascending order.
977     * </p>
978     *
979     * @param nSet array to fill with the first {@code n} elements of {@code nSetI}
980     * @param mSet array to fill with the {@code m} complementary elements of {@code nSet} relative
981     *        to {@code m + n}
982     * @param nSetI array whose first {@code n} elements specify the members of {@code nSet}
983     * @param n number of elements in the first output array
984     * @param m number of elements in the second output array
985     */
986    private void copyPartition(double[] nSet, double[] mSet, int[] nSetI, int n, int m) {
987        int j = 0;
988        int k = 0;
989        for (int i = 0; i < n + m; i++) {
990            if (j < n && nSetI[j] == i) {
991                nSet[j++] = i;
992            } else {
993                mSet[k++] = i;
994            }
995        }
996    }
997}