1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 package org.apache.commons.statistics.inference;
19
20 import java.util.Arrays;
21 import org.apache.commons.numbers.combinatorics.Factorial;
22 import org.apache.commons.numbers.combinatorics.LogFactorial;
23 import org.apache.commons.numbers.core.DD;
24 import org.apache.commons.numbers.core.DDMath;
25 import org.apache.commons.numbers.core.Sum;
26 import org.apache.commons.statistics.inference.SquareMatrixSupport.RealSquareMatrix;
27
28 /**
29 * Computes the complementary probability for the one-sample Kolmogorov-Smirnov distribution.
30 *
31 * @since 1.1
32 */
33 final class KolmogorovSmirnovDistribution {
34 /** pi^2. */
35 private static final double PI2 = 9.8696044010893586188344909;
36 /** sqrt(2*pi). */
37 private static final double ROOT_TWO_PI = 2.5066282746310005024157652;
38 /** Value of x when the KS sum is 0.5. */
39 private static final double X_KS_HALF = 0.8275735551899077;
40 /** Value of x when the KS sum is 1.0. */
41 private static final double X_KS_ONE = 0.1754243674345323;
42 /** Machine epsilon, 2^-52. */
43 private static final double EPS = 0x1.0p-52;
44
45 /** No instances. */
46 private KolmogorovSmirnovDistribution() {}
47
48 /**
49 * Computes the complementary probability {@code P[D_n >= x]}, or survival function (SF),
50 * for the two-sided one-sample Kolmogorov-Smirnov distribution.
51 *
52 * <pre>
53 * D_n = sup_x |F(x) - CDF_n(x)|
54 * </pre>
55 *
56 * <p>where {@code n} is the sample size; {@code CDF_n(x)} is an empirical
57 * cumulative distribution function; and {@code F(x)} is the expected
58 * distribution.
59 *
60 * <p>
61 * References:
62 * <ol>
63 * <li>Simard, R., & L’Ecuyer, P. (2011).
64 * <a href="https://doi.org/10.18637/jss.v039.i11">Computing the Two-Sided Kolmogorov-Smirnov Distribution.</a>
65 * Journal of Statistical Software, 39(11), 1–18.
66 * <li>
67 * Marsaglia, G., Tsang, W. W., & Wang, J. (2003).
68 * <a href="https://doi.org/10.18637/jss.v008.i18">Evaluating Kolmogorov's Distribution.</a>
69 * Journal of Statistical Software, 8(18), 1–4.
70 * </ol>
71 *
72 * <p>Note that [2] contains an error in computing h, refer to <a
73 * href="https://issues.apache.org/jira/browse/MATH-437">MATH-437</a> for details.
74 *
75 * @since 1.1
76 */
77 static final class Two {
78 /** pi^2. */
79 private static final double PI2 = 9.8696044010893586188344909;
80 /** pi^4. */
81 private static final double PI4 = 97.409091034002437236440332;
82 /** pi^6. */
83 private static final double PI6 = 961.38919357530443703021944;
84 /** sqrt(2*pi). */
85 private static final double ROOT_TWO_PI = 2.5066282746310005024157652;
86 /** sqrt(pi/2). */
87 private static final double ROOT_HALF_PI = 1.2533141373155002512078826;
88 /** Threshold for Pelz-Good where the 1 - CDF == 1.
89 * Occurs when sqrt(2pi/z) exp(-pi^2 / (8 z^2)) is far below 2^-53.
90 * Threshold set at exp(-pi^2 / (8 z^2)) = 2^-80. */
91 private static final double LOG_PG_MIN = -55.451774444795625;
92 /** Factor 4a in the quadratic equation to solve max k: log(2^-52) * 8. */
93 private static final double FOUR_A = -288.3492271129372;
94 /** The scaling threshold in the MTW algorithm. Marsaglia used 1e-140. This uses 2^-400 ~ 3.87e-121. */
95 private static final double MTW_SCALE_THRESHOLD = 0x1.0p-400;
96 /** The up-scaling factor in the MTW algorithm. Marsaglia used 1e140. This uses 2^400 ~ 2.58e120. */
97 private static final double MTW_UP_SCALE = 0x1.0p400;
98 /** The power-of-2 of the up-scaling factor in the MTW algorithm, n if the up-scale factor is 2^n. */
99 private static final int MTW_UP_SCALE_POWER = 400;
100 /** The scaling threshold in the Pomeranz algorithm. */
101 private static final double P_DOWN_SCALE = 0x1.0p-128;
102 /** The up-scaling factor in the Pomeranz algorithm. */
103 private static final double P_UP_SCALE = 0x1.0p128;
104 /** The power-of-2 of the up-scaling factor in the Pomeranz algorithm, n if the up-scale factor is 2^n. */
105 private static final int P_SCALE_POWER = 128;
106 /** Maximum finite factorial. */
107 private static final int MAX_FACTORIAL = 170;
108 /** Approximate threshold for ln(MIN_NORMAL). */
109 private static final int LOG_MIN_NORMAL = -708;
110 /** 140, n threshold for small n for the sf computation.*/
111 private static final int N140 = 140;
112 /** 0.754693, nxx threshold for small n Durbin matrix sf computation. */
113 private static final double NXX_0_754693 = 0.754693;
114 /** 4, nxx threshold for small n Pomeranz sf computation. */
115 private static final int NXX_4 = 4;
116 /** 2.2, nxx threshold for large n Miller approximation sf computation. */
117 private static final double NXX_2_2 = 2.2;
118 /** 100000, n threshold for large n Durbin matrix sf computation. */
119 private static final int N_100000 = 100000;
120 /** 1.4, nx^(3/2) threshold for large n Durbin matrix sf computation. */
121 private static final double NX32_1_4 = 1.4;
122 /** 1/2. */
123 private static final double HALF = 0.5;
124
125 /** No instances. */
126 private Two() {}
127
128 /**
129 * Calculates complementary probability {@code P[D_n >= x]} for the two-sided
130 * one-sample Kolmogorov-Smirnov distribution.
131 *
132 * @param x Statistic.
133 * @param n Sample size (assumed to be positive).
134 * @return \(P(D_n ≥ x)\)
135 */
136 static double sf(double x, int n) {
137 final double p = sfExact(x, n);
138 if (p >= 0) {
139 return p;
140 }
141
142 // The computation is divided based on the x-n plane.
143 final double nxx = n * x * x;
144 if (n <= N140) {
145 // 10 decimal digits of precision
146
147 // nx^2 < 4 use 1 - CDF(x).
148 if (nxx < NXX_0_754693) {
149 // Durbin matrix (MTW)
150 return 1 - durbinMTW(x, n);
151 }
152 if (nxx < NXX_4) {
153 // Pomeranz
154 return 1 - pomeranz(x, n);
155 }
156 // Miller approximation: 2 * one-sided D+ computation
157 return 2 * One.sf(x, n);
158 }
159 // n > 140
160 if (nxx >= NXX_2_2) {
161 // 6 decimal digits of precision
162
163 // Miller approximation: 2 * one-sided D+ computation
164 return 2 * One.sf(x, n);
165 }
166 // nx^2 < 2.2 use 1 - CDF(x).
167 // 5 decimal digits of precision (for n < 200000)
168
169 // nx^1.5 <= 1.4
170 if (n <= N_100000 && n * Math.pow(x, 1.5) < NX32_1_4) {
171 // Durbin matrix (MTW)
172 return 1 - durbinMTW(x, n);
173 }
174 // Pelz-Good, algorithm modified to sum negative terms from 1 for the SF.
175 // (precision increases with n)
176 return pelzGood(x, n);
177 }
178
179 /**
180 * Calculates exact cases for the complementary probability
181 * {@code P[D_n >= x]} the two-sided one-sample Kolmogorov-Smirnov distribution.
182 *
183 * <p>Exact cases handle x not in [0, 1]. It is assumed n is positive.
184 *
185 * @param x Statistic.
186 * @param n Sample size (assumed to be positive).
187 * @return \(P(D_n ≥ x)\)
188 */
189 private static double sfExact(double x, int n) {
190 if (n * x * x >= 370 || x >= 1) {
191 // p would underflow, or x is out of the domain
192 return 0;
193 }
194 final double nx = x * n;
195 if (nx <= 1) {
196 // x <= 1/(2n)
197 if (nx <= HALF) {
198 // Also detects x <= 0 (iff n is positive)
199 return 1;
200 }
201 if (n == 1) {
202 // Simplification of:
203 // 1 - (n! (2x - 1/n)^n) == 1 - (2x - 1)
204 return 2.0 - 2.0 * x;
205 }
206 // 1/(2n) < x <= 1/n
207 // 1 - (n! (2x - 1/n)^n)
208 final double f = 2 * x - 1.0 / n;
209 // Switch threshold where (2x - 1/n)^n is sub-normal
210 // Max factorial threshold is n=170
211 final double logf = Math.log(f);
212 if (n <= MAX_FACTORIAL && n * logf > LOG_MIN_NORMAL) {
213 return 1 - Factorial.doubleValue(n) * Math.pow(f, n);
214 }
215 return -Math.expm1(LogFactorial.create().value(n) + n * logf);
216 }
217 // 1 - 1/n <= x < 1
218 if (n - 1 <= nx) {
219 // 2 * (1-x)^n
220 return 2 * Math.pow(1 - x, n);
221 }
222
223 return -1;
224 }
225
226 /**
227 * Computes the Durbin matrix approximation for {@code P(D_n < d)} using the method
228 * of Marsaglia, Tsang and Wang (2003).
229 *
230 * @param x Statistic.
231 * @param n Sample size (assumed to be positive).
232 * @return \(P(D_n < x)\)
233 */
234 private static double durbinMTW(double x, int n) {
235 final int k = (int) Math.ceil(n * x);
236 final RealSquareMatrix h = createH(x, n).power(n);
237
238 // Use scaling as per Marsaglia's code to avoid underflow.
239 double pFrac = h.get(k - 1, k - 1);
240 int scale = h.scale();
241 // Omit i == n as this is a no-op
242 for (int i = 1; i < n; ++i) {
243 pFrac *= (double) i / n;
244 if (pFrac < MTW_SCALE_THRESHOLD) {
245 pFrac *= MTW_UP_SCALE;
246 scale -= MTW_UP_SCALE_POWER;
247 }
248 }
249 // Return the CDF
250 return clipProbability(Math.scalb(pFrac, scale));
251 }
252
253 /***
254 * Creates {@code H} of size {@code m x m} as described in [1].
255 *
256 * @param x Statistic.
257 * @param n Sample size (assumed to be positive).
258 * @return H matrix
259 */
260 private static RealSquareMatrix createH(double x, int n) {
261 // MATH-437:
262 // This is *not* (int) (n * x) + 1.
263 // This is only ever called when 1/n < x < 1 - 1/n.
264 // => h cannot be >= 1 when using ceil. h can be 0 if nx is integral.
265 final int k = (int) Math.ceil(n * x);
266 final double h = k - n * x;
267
268 final int m = 2 * k - 1;
269 final double[] data = new double[m * m];
270 // Start by filling everything with either 0 or 1.
271 for (int i = 0; i < m; ++i) {
272 // h[i][j] = i - j + 1 < 0 ? 0 : 1
273 // => h[i][j<=i+1] = 1
274 final int jend = Math.min(m - 1, i + 1);
275 for (int j = i * m; j <= i * m + jend; j++) {
276 data[j] = 1;
277 }
278 }
279
280 // Setting up power-array to avoid calculating the same value twice:
281 // hp[0] = h^1, ..., hp[m-1] = h^m
282 final double[] hp = new double[m];
283 hp[0] = h;
284 for (int i = 1; i < m; ++i) {
285 // Avoid compound rounding errors using h * hp[i - 1]
286 // with Math.pow as it is within 1 ulp of the exact result
287 hp[i] = Math.pow(h, i + 1);
288 }
289
290 // First column and last row has special values (each other reversed).
291 for (int i = 0; i < m; ++i) {
292 data[i * m] -= hp[i];
293 data[(m - 1) * m + i] -= hp[m - i - 1];
294 }
295
296 // [1] states: "For 1/2 < h < 1 the bottom left element of the matrix should be
297 // (1 - 2*h^m + (2h - 1)^m )/m!"
298 if (2 * h - 1 > 0) {
299 data[(m - 1) * m] += Math.pow(2 * h - 1, m);
300 }
301
302 // Aside from the first column and last row, the (i, j)-th element is 1/(i - j + 1)! if i -
303 // j + 1 >= 0, else 0. 1's and 0's are already put, so only division with (i - j + 1)! is
304 // needed in the elements that have 1's. Note that i - j + 1 > 0 <=> i + 1 > j instead of
305 // j'ing all the way to m. Also note that we can use pre-computed factorials given
306 // the limits where this method is called.
307 for (int i = 0; i < m; ++i) {
308 final int im = i * m;
309 for (int j = 0; j < i + 1; ++j) {
310 // Here (i - j + 1 > 0)
311 // Divide by (i - j + 1)!
312 // Note: This method is used when:
313 // n <= 140; nxx < 0.754693
314 // n <= 100000; n x^1.5 < 1.4
315 // max m ~ 2nx ~ (1.4/1e5)^(2/3) * 2e5 = 116
316 // Use a tabulated factorial
317 data[im + j] /= Factorial.doubleValue(i - j + 1);
318 }
319 }
320 return SquareMatrixSupport.create(m, data);
321 }
322
323 /**
324 * Computes the Pomeranz approximation for {@code P(D_n < d)} using the method
325 * as described in Simard and L’Ecuyer (2011).
326 *
327 * <p>Modifications have been made to the scaling of the intermediate values.
328 *
329 * @param x Statistic.
330 * @param n Sample size (assumed to be positive).
331 * @return \(P(D_n < x)\)
332 */
333 private static double pomeranz(double x, int n) {
334 final double t = n * x;
335 // Store floor(A-t) and ceil(A+t). This does not require computing A.
336 final int[] amt = new int[2 * n + 3];
337 final int[] apt = new int[2 * n + 3];
338 computeA(n, t, amt, apt);
339 // Precompute ((A[i] - A[i-1])/n)^(j-k) / (j-k)!
340 // A[i] - A[i-1] has 4 possible values (based on multiples of A2)
341 // A1 - A0 = 0 - 0 = 0
342 // A2 - A1 = A2 - 0 = A2
343 // A3 - A2 = (1 - A2) - A2 = 1 - 2 * A2
344 // A4 - A3 = (A2 + 1) - (1 - A2) = 2 * A2
345 // A5 - A4 = (1 - A2 + 1) - (A2 + 1) = 1 - 2 * A2
346 // A6 - A5 = (A2 + 1 + 1) - (1 - A2 + 1) = 2 * A2
347 // A7 - A6 = (1 - A2 + 1 + 1) - (A2 + 1 + 1) = 1 - 2 * A2
348 // A8 - A7 = (A2 + 1 + 1 + 1) - (1 - A2 + 1 + 1) = 2 * A2
349 // ...
350 // Ai - Ai-1 = ((i-1)/2 - A2) - (A2 + (i-2)/2) = 1 - 2 * A2 ; i = odd
351 // Ai - Ai-1 = (A2 + (i-1)/2) - ((i-2)/2 - A2) = 2 * A2 ; i = even
352 // ...
353 // A2n+2 - A2n+1 = n - (n - A2) = A2
354
355 // ap[][j - k] = ((A[i] - A[i-1])/n)^(j-k) / (j-k)!
356 // for each case: A[i] - A[i-1] in [A2, 1 - 2 * A2, 2 * A2]
357 // Ignore case 0 as this is not used. Factors are ap[0] = 1, else 0.
358 // If A2==0.5 then this is computed as a no-op due to multiplication by zero.
359 final int n2 = n + 2;
360 final double[][] ap = new double[3][n2];
361 final double a2 = Math.min(t - Math.floor(t), Math.ceil(t) - t);
362 computeAP(ap[0], a2 / n);
363 computeAP(ap[1], (1 - 2 * a2) / n);
364 computeAP(ap[2], (2 * a2) / n);
365
366 // Current and previous V
367 double[] vc = new double[n2];
368 double[] vp = new double[n2];
369 // Count of re-scaling
370 int scale = 0;
371
372 // V_1,1 = 1
373 vc[1] = 1;
374
375 for (int i = 2; i <= 2 * n + 2; i++) {
376 final double[] v = vc;
377 vc = vp;
378 vp = v;
379 // This is useful for following current values of vc
380 Arrays.fill(vc, 0);
381
382 // Select (A[i] - A[i-1]) factor
383 final double[] p;
384 if (i == 2 || i == 2 * n + 2) {
385 // First or last
386 p = ap[0];
387 } else {
388 // odd: [1] 1 - 2 * 2A
389 // even: [2] 2 * A2
390 p = ap[2 - (i & 1)];
391 }
392
393 // Set limits.
394 // j is the ultimate bound for k and should be in [1, n+1]
395 final int jmin = Math.max(1, amt[i] + 2);
396 final int jmax = Math.min(n + 1, apt[i]);
397 final int k1 = Math.max(1, amt[i - 1] + 2);
398
399 // All numbers will reduce in size.
400 // Maintain the largest close to 1.0.
401 // This is a change from Simard and L’Ecuyer which scaled based on the smallest.
402 double max = 0;
403 for (int j = jmin; j <= jmax; j++) {
404 final int k2 = Math.min(j, apt[i - 1]);
405 // Accurate sum.
406 // vp[high] is smaller
407 // p[high] is smaller
408 // Sum ascending has smaller products first.
409 double sum = 0;
410 for (int k = k1; k <= k2; k++) {
411 sum += vp[k] * p[j - k];
412 }
413 vc[j] = sum;
414 if (max < sum) {
415 // Note: max *may* always be the first sum: vc[jmin]
416 max = sum;
417 }
418 }
419
420 // Rescale if too small
421 if (max < P_DOWN_SCALE) {
422 // Only scale in current range from V
423 for (int j = jmin; j <= jmax; j++) {
424 vc[j] *= P_UP_SCALE;
425 }
426 scale -= P_SCALE_POWER;
427 }
428 }
429
430 // F_n(x) = n! V_{2n+2,n+1}
431 double v = vc[n + 1];
432
433 // This method is used when n < 140 where all n! are finite.
434 // v is below 1 so we can directly compute the result without using logs.
435 v *= Factorial.doubleValue(n);
436 // Return the CDF (rescaling as required)
437 return Math.scalb(v, scale);
438 }
439
440 /**
441 * Compute the power factors.
442 * <pre>
443 * factor[j] = z^j / j!
444 * </pre>
445 *
446 * @param p Power factors.
447 * @param z (A[i] - A[i-1]) / n
448 */
449 private static void computeAP(double[] p, double z) {
450 // Note z^0 / 0! = 1 for any z
451 p[0] = 1;
452 p[1] = z;
453 for (int j = 2; j < p.length; j++) {
454 // Only used when n <= 140 and can use the tabulated values of n!
455 // This avoids using recursion: p[j] = z * p[j-1] / j.
456 // Direct computation more closely agrees with the recursion using BigDecimal
457 // with 200 digits of precision.
458 p[j] = Math.pow(z, j) / Factorial.doubleValue(j);
459 }
460 }
461
462 /**
463 * Compute the factors floor(A-t) and ceil(A+t).
464 * Arrays should have length 2n+3.
465 *
466 * @param n Sample size.
467 * @param t Statistic x multiplied by n.
468 * @param amt floor(A-t)
469 * @param apt ceil(A+t)
470 */
471 // package-private for testing
472 static void computeA(int n, double t, int[] amt, int[] apt) {
473 final int l = (int) Math.floor(t);
474 final double f = t - l;
475 final int limit = 2 * n + 2;
476
477 // 3-cases
478 if (f > HALF) {
479 // Case (iii): 1/2 < f < 1
480 // for i = 1, 2, ...
481 for (int j = 2; j <= limit; j += 2) {
482 final int i = j >>> 1;
483 amt[j] = i - 2 - l;
484 apt[j] = i + l;
485 }
486 // for i = 0, 1, 2, ...
487 for (int j = 1; j <= limit; j += 2) {
488 final int i = j >>> 1;
489 amt[j] = i - 1 - l;
490 apt[j] = i + 1 + l;
491 }
492 } else if (f > 0) {
493 // Case (ii): 0 < f <= 1/2
494 amt[1] = -l - 1;
495 apt[1] = l + 1;
496 // for i = 1, 2, ...
497 for (int j = 2; j <= limit; j++) {
498 final int i = j >>> 1;
499 amt[j] = i - 1 - l;
500 apt[j] = i + l;
501 }
502 } else {
503 // Case (i): f = 0
504 // for i = 1, 2, ...
505 for (int j = 2; j <= limit; j += 2) {
506 final int i = j >>> 1;
507 amt[j] = i - 1 - l;
508 apt[j] = i - 1 + l;
509 }
510 // for i = 0, 1, 2, ...
511 for (int j = 1; j <= limit; j += 2) {
512 final int i = j >>> 1;
513 amt[j] = i - l;
514 apt[j] = i + l;
515 }
516 }
517 }
518
519 /**
520 * Computes the Pelz-Good approximation for {@code P(D_n >= d)} as described in
521 * Simard and L’Ecuyer (2011).
522 *
523 * <p>This has been modified to compute the complementary CDF by subtracting the
524 * terms k0, k1, k2, k3 from 1. For use in computing the CDF the method should
525 * be updated to return the sum of k0 ... k3.
526 *
527 * @param x Statistic.
528 * @param n Sample size (assumed to be positive).
529 * @return \(P(D_n ≥ x)\)
530 * @throws ArithmeticException if the series does not converge
531 */
532 // package-private for testing
533 static double pelzGood(double x, int n) {
534 // Change the variable to z since approximation is for the distribution evaluated at d / sqrt(n)
535 final double z2 = x * x * n;
536
537 double lne = -PI2 / (8 * z2);
538 // Final result is ~ (1 - K0) ~ 1 - sqrt(2pi/z) exp(-pi^2 / (8 z^2))
539 // Do not compute when the exp value is far below eps.
540 if (lne < LOG_PG_MIN) {
541 // z ~ sqrt(-pi^2/(8*min)) ~ 0.1491
542 return 1;
543 }
544 // Note that summing K1, ..., K3 over all k with factor
545 // (k + 1/2) is equivalent to summing over all k with
546 // 2 (k - 1/2) / 2 == (2k - 1) / 2
547 // This is the form for K0.
548 // Compute all together over odd integers and divide factors
549 // of (k + 1/2)^b by 2^b.
550 double k0 = 0;
551 double k1 = 0;
552 double k2 = 0;
553 double k3 = 0;
554
555 final double rootN = Math.sqrt(n);
556 final double z = x * rootN;
557 final double z3 = z * z2;
558 final double z4 = z2 * z2;
559 final double z6 = Math.pow(z2, 3);
560 final double z7 = Math.pow(z2, 3.5);
561 final double z8 = Math.pow(z2, 4);
562 final double z10 = Math.pow(z2, 5);
563
564 final double a1 = PI2 / 4;
565
566 final double a2 = 6 * z6 + 2 * z4;
567 final double b2 = (PI2 * (2 * z4 - 5 * z2)) / 4;
568 final double c2 = (PI4 * (1 - 2 * z2)) / 16;
569
570 final double a3 = (PI6 * (5 - 30 * z2)) / 64;
571 final double b3 = (PI4 * (-60 * z2 + 212 * z4)) / 16;
572 final double c3 = (PI2 * (135 * z4 - 96 * z6)) / 4;
573 final double d3 = -(30 * z6 + 90 * z8);
574
575 // Iterate j=(2k - 1) for k=1, 2, ...
576 // Terms reduce in size. Stop when:
577 // exp(-pi^2 / 8z^2) * eps = exp((2k-1)^2 * -pi^2 / 8z^2)
578 // (2k-1)^2 = 1 - log(eps) * 8z^2 / pi^2
579 // 0 = k^2 - k + log(eps) * 2z^2 / pi^2
580 // Solve using quadratic equation and eps = ulp(1.0): 4a ~ -288
581 final int max = (int) Math.ceil((1 + Math.sqrt(1 - FOUR_A * z2 / PI2)) / 2);
582 // Sum smallest terms first
583 for (int k = max; k > 0; k--) {
584 final int j = 2 * k - 1;
585 // Create (2k-1)^2; (2k-1)^4; (2k-1)^6
586 final double j2 = (double) j * j;
587 final double j4 = Math.pow(j, 4);
588 final double j6 = Math.pow(j, 6);
589 // exp(-pi^2 * (2k-1)^2 / 8z^2)
590 final double e = Math.exp(lne * j2);
591 k0 += e;
592 k1 += (a1 * j2 - z2) * e;
593 k2 += (a2 + b2 * j2 + c2 * j4) * e;
594 k3 += (a3 * j6 + b3 * j4 + c3 * j2 + d3) * e;
595 }
596 k0 *= ROOT_TWO_PI / z;
597 // Factors are halved as the sum is for k in -inf to +inf
598 k1 *= ROOT_HALF_PI / (3 * z4);
599 k2 *= ROOT_HALF_PI / (36 * z7);
600 k3 *= ROOT_HALF_PI / (3240 * z10);
601
602 // Compute additional K2,K3 terms
603 double k2b = 0;
604 double k3b = 0;
605
606 // -pi^2 / (2z^2)
607 lne *= 4;
608
609 final double a3b = 3 * PI2 * z2;
610
611 // Iterate for j=1, 2, ...
612 // Note: Here max = sqrt(1 - FOUR_A z^2 / (4 pi^2)).
613 // This is marginally smaller so we reuse the same value.
614 for (int j = max; j > 0; j--) {
615 final double j2 = (double) j * j;
616 final double j4 = Math.pow(j, 4);
617 // exp(-pi^2 * k^2 / 2z^2)
618 final double e = Math.exp(lne * j2);
619 k2b += PI2 * j2 * e;
620 k3b += (-PI4 * j4 + a3b * j2) * e;
621 }
622 // Factors are halved as the sum is for k in -inf to +inf
623 k2b *= ROOT_HALF_PI / (18 * z3);
624 k3b *= ROOT_HALF_PI / (108 * z6);
625
626 // Series: K0(z) + K1(z)/n^0.5 + K2(z)/n + K3(z)/n^1.5 + O(1/n^2)
627 k1 /= rootN;
628 k2 /= n;
629 k3 /= n * rootN;
630 k2b /= n;
631 k3b /= n * rootN;
632
633 // Return (1 - CDF) with an extended precision sum in order of descending magnitude
634 return clipProbability(Sum.of(1, -k0, -k1, -k2, -k3, +k2b, -k3b).getAsDouble());
635 }
636 }
637
638 /**
639 * Computes the complementary probability {@code P[D_n^+ >= x]} for the one-sided
640 * one-sample Kolmogorov-Smirnov distribution.
641 *
642 * <pre>
643 * D_n^+ = sup_x {CDF_n(x) - F(x)}
644 * </pre>
645 *
646 * <p>where {@code n} is the sample size; {@code CDF_n(x)} is an empirical
647 * cumulative distribution function; and {@code F(x)} is the expected
648 * distribution. The computation uses Smirnov's stable formula:
649 *
650 * <pre>
651 * floor(n(1-x)) (n) ( j ) (j-1) ( j ) (n-j)
652 * P[D_n^+ >= x] = x Sum ( ) ( - + x ) ( 1 - x - - )
653 * j=0 (j) ( n ) ( n )
654 * </pre>
655 *
656 * <p>Computing using logs is not as accurate as direct multiplication when n is large.
657 * However the terms are very large and small. Multiplication uses a scaled representation
658 * with a separate exponent term to support the extreme range. Extended precision
659 * representation of the numbers reduces the error in the power terms. Details in
660 * van Mulbregt (2018).
661 *
662 * <p>
663 * References:
664 * <ol>
665 * <li>
666 * van Mulbregt, P. (2018).
667 * <a href="https://doi.org/10.48550/arxiv.1802.06966">Computing the Cumulative Distribution Function and Quantiles of the One-sided Kolmogorov-Smirnov Statistic</a>
668 * arxiv:1802.06966.
669 * <li>Magg & Dicaire (1971).
670 * <a href="https://doi.org/10.1093/biomet/58.3.653">On Kolmogorov-Smirnov Type One-Sample Statistics</a>
671 * Biometrika 58.3 pp. 653–656.
672 * </ol>
673 *
674 * @since 1.1
675 */
676 static final class One {
677 /** "Very large" n to use a asymptotic limiting form.
678 * [1] suggests 1e12 but this is reduced to avoid excess
679 * computation time. */
680 private static final int VERY_LARGE_N = 1000000;
681 /** Maximum number of term for the Smirnov-Dwass algorithm. */
682 private static final int SD_MAX_TERMS = 3;
683 /** Minimum sample size for the Smirnov-Dwass algorithm. */
684 private static final int SD_MIN_N = 8;
685 /** Number of bits of precision in the sum of terms Aj.
686 * This does not have to be the full 106 bits of a double-double as the final result
687 * is used as a double. The terms are represented as fractions with an exponent:
688 * <pre>
689 * Aj = 2^b * f
690 * f of sum(A) in [0.5, 1)
691 * f of Aj in [0.25, 2]
692 * </pre>
693 * <p>The terms can be added if their exponents overlap. The bits of precision must
694 * account for the extra range of the fractional part of Aj by 1 bit. Note that
695 * additional bits are added to this dynamically based on the number of terms. */
696 private static final int SUM_PRECISION_BITS = 53;
697 /** Number of bits of precision in the sum of terms Aj.
698 * For Smirnov-Dwass we use the full 106 bits of a double-double due to the summation
699 * of terms that cancel. Account for the extra range of the fractional part of Aj by 1 bit. */
700 private static final int SD_SUM_PRECISION_BITS = 107;
701 /** Proxy for the default choice of the scaled power function.
702 * The actual choice is based on the chosen algorithm. */
703 private static final ScaledPower POWER_DEFAULT = null;
704
705 /**
706 * Defines a scaled power function.
707 * Package-private to allow the main sf method to be called direct in testing.
708 */
709 @FunctionalInterface
710 interface ScaledPower {
711 /**
712 * Compute the number {@code x} raised to the power {@code n}.
713 *
714 * <p>The value is returned as fractional {@code f} and integral
715 * {@code 2^exp} components.
716 * <pre>
717 * (x+xx)^n = (f+ff) * 2^exp
718 * </pre>
719 *
720 * @param x x.
721 * @param n Power.
722 * @param exp Result power of two scale factor (integral exponent).
723 * @return Fraction part.
724 * @see DD#frexp(int[])
725 * @see DD#pow(int, long[])
726 * @see DDMath#pow(DD, int, long[])
727 */
728 DD pow(DD x, int n, long[] exp);
729 }
730
731 /** No instances. */
732 private One() {}
733
734 /**
735 * Calculates complementary probability {@code P[D_n^+ >= x]}, or survival
736 * function (SF), for the one-sided one-sample Kolmogorov-Smirnov distribution.
737 *
738 * @param x Statistic.
739 * @param n Sample size (assumed to be positive).
740 * @return \(P(D_n^+ ≥ x)\)
741 */
742 static double sf(double x, int n) {
743 final double p = sfExact(x, n);
744 if (p >= 0) {
745 return p;
746 }
747 // Note: This is not referring to N = floor(n*x).
748 // Here n is the sample size and a suggested limit 10^12 is noted on pp.15 in [1].
749 // This uses a lower threshold where the full computation takes ~ 1 second.
750 if (n > VERY_LARGE_N) {
751 return sfAsymptotic(x, n);
752 }
753 return sf(x, n, POWER_DEFAULT);
754 }
755
756 /**
757 * Calculates exact cases for the complementary probability
758 * {@code P[D_n^+ >= x]} the one-sided one-sample Kolmogorov-Smirnov distribution.
759 *
760 * <p>Exact cases handle x not in [0, 1]. It is assumed n is positive.
761 *
762 * @param x Statistic.
763 * @param n Sample size (assumed to be positive).
764 * @return \(P(D_n^+ ≥ x)\)
765 */
766 private static double sfExact(double x, int n) {
767 if (n * x * x >= 372.5 || x >= 1) {
768 // p would underflow, or x is out of the domain
769 return 0;
770 }
771 if (x <= 0) {
772 // edge-of, or out-of, the domain
773 return 1;
774 }
775 if (n == 1) {
776 return x;
777 }
778 // x <= 1/n
779 // [1] Equation (33)
780 final double nx = n * x;
781 if (nx <= 1) {
782 // 1 - x (1+x)^(n-1): here x may be small so use log1p
783 return 1 - x * Math.exp((n - 1) * Math.log1p(x));
784 }
785 // 1 - 1/n <= x < 1
786 // [1] Equation (16)
787 if (n - 1 <= nx) {
788 // (1-x)^n: here x > 0.5 and 1-x is exact
789 return Math.pow(1 - x, n);
790 }
791 return -1;
792 }
793
794 /**
795 * Calculates complementary probability {@code P[D_n^+ >= x]}, or survival
796 * function (SF), for the one-sided one-sample Kolmogorov-Smirnov distribution.
797 *
798 * <p>Computes the result using the asymptotic formula Eq 5 in [1].
799 *
800 * @param x Statistic.
801 * @param n Sample size (assumed to be positive).
802 * @return \(P(D_n^+ ≥ x)\)
803 */
804 private static double sfAsymptotic(double x, int n) {
805 // Magg & Dicaire (1971) limiting form
806 return Math.exp(-Math.pow(6.0 * n * x + 1, 2) / (18.0 * n));
807 }
808
809 /**
810 * Calculates complementary probability {@code P[D_n^+ >= x]}, or survival
811 * function (SF), for the one-sided one-sample Kolmogorov-Smirnov distribution.
812 *
813 * <p>Computes the result using double-double arithmetic. The power function
814 * can use a fast approximation or a full power computation.
815 *
816 * <p>This function is safe for {@code x > 1/n}. When {@code x} approaches
817 * sub-normal then division or multiplication by x can under/overflow. The
818 * case of {@code x < 1/n} can be computed in {@code sfExact}.
819 *
820 * @param x Statistic (typically in (1/n, 1 - 1/n)).
821 * @param n Sample size (assumed to be positive).
822 * @param power Function to compute the scaled power (can be null).
823 * @return \(P(D_n^+ ≥ x)\)
824 * @see DD#pow(int, long[])
825 * @see DDMath#pow(DD, int, long[])
826 */
827 static double sf(double x, int n, ScaledPower power) {
828 // Compute only the SF using Algorithm 1 pp 12.
829
830 // Compute: k = floor(n*x), alpha = nx - k; x = (k+alpha)/n with 0 <= alpha < 1
831 final double[] alpha = {0};
832 final int k = splitX(n, x, alpha);
833
834 // Choose the algorithm:
835 // Eq (13) Smirnov/Birnbaum-Tingey; or Smirnov/Dwass Eq (31)
836 // Eq. 13 sums j = 0 : floor( n(1-x) ) = n - 1 - floor(nx) iff alpha != 0; else n - floor(nx)
837 // Eq. 31 sums j = ceil( n(1-x) ) : n = n - floor(nx)
838 // Drop a term term if x = (n-j)/n. Equates to shifting the floor* down and ceil* up:
839 // Eq. 13 N = floor*( n(1-x) ) = n - k - ((alpha!=0) ? 1 : 0) - ((alpha==0) ? 1 : 0)
840 // Eq. 31 N = n - ceil*( n(1-x) ) = k - ((alpha==0) ? 1 : 0)
841 // Where N is the number of terms - 1. This differs from Algorithm 1 by dropping
842 // a SD term when it should be zero (to working precision).
843 final int regN = n - k - 1;
844 final int sdN = k - ((alpha[0] == 0) ? 1 : 0);
845
846 // SD : Figure 3 (c) (pp. 6)
847 // Terms Aj (j = n -> 0) have alternating signs through the range and may involve
848 // numbers much bigger than 1 causing cancellation; magnitudes increase then decrease.
849 // Section 3.3: Extra digits of precision required
850 // grows like Order(sqrt(n)). E.g. sf=0.7 (x ~ 0.4/sqrt(n)) loses 8 digits.
851 //
852 // Regular : Figure 3 (a, b)
853 // Terms Aj can have similar magnitude through the range; when x >= 1/sqrt(n)
854 // the final few terms can be magnitudes smaller and could be ignored.
855 // Section 3.4: As x increases the magnitude of terms becomes more peaked,
856 // centred at j = (n-nx)/2, i.e. 50% of the terms.
857 //
858 // As n -> inf the sf for x = k/n agrees with the asymptote Eq 5 in log2(n) bits.
859 //
860 // Figure 4 has lines at x = 1/n and x = 3/sqrt(n).
861 // Point between is approximately x = 4/n, i.e. nx < 4 : k <= 3.
862 // If faster when x < 0.5 and requiring nx ~ 4 then requires n >= 8.
863 //
864 // Note: If SD accuracy scales with sqrt(n) then we could use 1 / sqrt(n).
865 // That threshold is always above 4 / n when n is 16 (4/n = 1/sqrt(n) : n = 4^2).
866 // So the current thresholds are conservative.
867 boolean sd = false;
868 if (sdN < regN) {
869 // Here x < 0.5 and SD has fewer terms
870 // Always choose when we only have one additional term (i.e x < 2/n)
871 sd = sdN <= 1;
872 // Otherwise when x < 4 / n
873 sd |= sdN <= SD_MAX_TERMS && n >= SD_MIN_N;
874 }
875
876 final int maxN = sd ? sdN : regN;
877
878 // Note: if N > "very large" use the asymptotic approximation.
879 // Currently this check is done on n (sample size) in the calling function.
880 // This provides a monotonic p-value for all x with the same n.
881
882 // Configure the algorithm.
883 // The error of double-double addition and multiplication is low (< 2^-102).
884 // The error in Aj is mainly from the power function.
885 // fastPow error is around 2^-52, pow error is ~ 2^-70 or lower.
886 // Smirnoff-Dwass has a sum of terms that cancel and requires higher precision.
887 // The power can optionally be specified.
888 final ScaledPower fpow;
889 if (power == POWER_DEFAULT) {
890 // SD has only a few terms. Use a high accuracy power.
891 fpow = sd ? DDMath::pow : DD::pow;
892 } else {
893 fpow = power;
894 }
895 // For the regular summation we must sum at least 50% of the terms. The number
896 // of required bits to sum remaining terms of the same magnitude is log2(N/2).
897 // These guards bits are conservative and > ~99% of terms are typically used.
898 final int sumBits = sd ? SD_SUM_PRECISION_BITS : SUM_PRECISION_BITS + log2(maxN >> 1);
899
900 // Working variable for the exponent of scaled values
901 final int[] ie = {0};
902 final long[] le = {0};
903
904 // The terms Aj may over/underflow.
905 // This is handled by maintaining the sum(Aj) using a fractional representation.
906 // sum(Aj) maintained as 2^e * f with f in [0.5, 1)
907 DD sum;
908 long esum;
909
910 // Compute A0
911 if (sd) {
912 // A0 = (1+x)^(n-1)
913 sum = fpow.pow(DD.ofSum(1, x), n - 1, le);
914 esum = le[0];
915 } else {
916 // A0 = (1-x)^n / x
917 sum = fpow.pow(DD.ofDifference(1, x), n, le);
918 esum = le[0];
919 // x in (1/n, 1 - 1/n) so the divide of the fraction is safe
920 sum = sum.divide(x).frexp(ie);
921 esum += ie[0];
922 }
923
924 // Binomial coefficient c(n, j) maintained as 2^e * f with f in [1, 2)
925 // This value is integral but maintained to limited precision
926 DD c = DD.ONE;
927 long ec = 0;
928 for (int i = 1; i <= maxN; i++) {
929 // c(n, j) = c(n, j-1) * (n-j+1) / j
930 c = c.multiply(DD.fromQuotient(n - i + 1, i));
931 // Here we maintain c in [1, 2) to restrict the scaled Aj term to [0.25, 2].
932 final int b = Math.getExponent(c.hi());
933 if (b != 0) {
934 c = c.scalb(-b);
935 ec += b;
936 }
937 // Compute Aj
938 final int j = sd ? n - i : i;
939 // Algorithm 4 pp. 27
940 // S = ((j/n) + x)^(j-1)
941 // T = ((n-j)/n - x)^(n-j)
942 final DD s = fpow.pow(DD.fromQuotient(j, n).add(x), j - 1, le);
943 final long es = le[0];
944 final DD t = fpow.pow(DD.fromQuotient(n - j, n).subtract(x), n - j, le);
945 final long et = le[0];
946 // Aj = C(n, j) * T * S
947 // = 2^e * [1, 2] * [0.5, 1] * [0.5, 1]
948 // = 2^e * [0.25, 2]
949 final long eaj = ec + es + et;
950 // Only compute and add to the sum when the exponents overlap by n-bits.
951 if (eaj > esum - sumBits) {
952 DD aj = c.multiply(t).multiply(s);
953 // Scaling must offset by the scale of the sum
954 aj = aj.scalb((int) (eaj - esum));
955 sum = sum.add(aj);
956 } else {
957 // Terms are expected to increase in magnitude then reduce.
958 // Here the terms are insignificant and we can stop.
959 // Effectively Aj -> eps * sum, and most of the computation is done.
960 break;
961 }
962
963 // Re-scale the sum
964 sum = sum.frexp(ie);
965 esum += ie[0];
966 }
967
968 // p = x * sum(Ai). Since the sum is normalised
969 // this is safe as long as x does not approach a sub-normal.
970 // Typically x in (1/n, 1 - 1/n).
971 sum = sum.multiply(x);
972 // Rescale the result
973 sum = sum.scalb((int) esum);
974 if (sd) {
975 // SF = 1 - CDF
976 sum = sum.negate().add(1);
977 }
978 return clipProbability(sum.doubleValue());
979 }
980
981 /**
982 * Compute exactly {@code x = (k + alpha) / n} with {@code k} an integer and
983 * {@code alpha in [0, 1)}. Note that {@code k ~ floor(nx)} but may be rounded up
984 * if {@code alpha -> 1} within working precision.
985 *
986 * <p>This computation is a significant source of increased error if performed in
987 * 64-bit arithmetic. Although the value alpha is only used for the PDF computation
988 * a value of {@code alpha == 0} indicates the final term of the SF summation can be
989 * dropped due to the cancellation of a power term {@code (x + j/n)} to zero with
990 * {@code x = (n-j)/n}. That is if {@code alpha == 0} then x is the fraction {@code k/n}
991 * and one Aj term is zero.
992 *
993 * @param n Sample size.
994 * @param x Statistic.
995 * @param alpha Output alpha.
996 * @return k
997 */
998 static int splitX(int n, double x, double[] alpha) {
999 // Described on page 14 in van Mulbregt [1].
1000 // nx = U+V (exact)
1001 DD z = DD.ofProduct(n, x);
1002 // Integer part of nx is *almost* the integer part of U.
1003 // Compute k = floor((U,V)) (changed from the listing of floor(U)).
1004 int k = (int) z.floor().hi();
1005 // nx = k + ((U - k) + V) = k + (U1 + V1)
1006 // alpha = (U1, V1) = z - k
1007 z = z.subtract(k);
1008 // alpha is in [0, 1) in double-double precision.
1009 // Ensure the high part is in [0, 1) (i.e. in double precision).
1010 if (z.hi() == 1) {
1011 // Here alpha is ~ 1.0-eps.
1012 // This occurs when x ~ j/n and n is large.
1013 k += 1;
1014 alpha[0] = 0;
1015 } else {
1016 alpha[0] = z.hi();
1017 }
1018 return k;
1019 }
1020
1021 /**
1022 * Returns {@code floor(log2(n))}.
1023 *
1024 * @param n Value.
1025 * @return approximate log2(n)
1026 */
1027 private static int log2(int n) {
1028 return 31 - Integer.numberOfLeadingZeros(n);
1029 }
1030 }
1031
1032 /**
1033 * Computes {@code P(sqrt(n) D_n > x)}, the limiting form for the distribution of
1034 * Kolmogorov's D_n as described in Simard and L’Ecuyer (2011) (Eq. 5, or K0 Eq. 6).
1035 *
1036 * <p>Computes \( 2 \sum_{i=1}^\infty (-1)^(i-1) e^{-2 i^2 x^2} \), or
1037 * \( 1 - (\sqrt{2 \pi} / x) * \sum_{i=1}^\infty { e^{-(2i-1)^2 \pi^2 / (8x^2) } } \)
1038 * when x is small.
1039 *
1040 * <p>Note: This computes the upper Kolmogorov sum.
1041 *
1042 * @param x Argument x = sqrt(n) * d
1043 * @return Upper Kolmogorov sum evaluated at x
1044 */
1045 static double ksSum(double x) {
1046 // Switch computation when p ~ 0.5
1047 if (x < X_KS_HALF) {
1048 // When x -> 0 the result is 1
1049 if (x < X_KS_ONE) {
1050 return 1;
1051 }
1052
1053 // t = exp(-pi^2/8x^2)
1054 // p = 1 - sqrt(2pi)/x * (t + t^9 + t^25 + t^49 + t^81 + ...)
1055 // = 1 - sqrt(2pi)/x * t * (1 + t^8 + t^24 + t^48 + t^80 + ...)
1056
1057 final double logt = -PI2 / (8 * x * x);
1058 final double t = Math.exp(logt);
1059 final double s = ROOT_TWO_PI / x;
1060
1061 final double t8 = Math.pow(t, 8);
1062 if (t8 < EPS) {
1063 // Cannot compute 1 + t^8.
1064 // 1 - sqrt(2pi)/x * exp(-pi^2/8x^2)
1065 // 1 - exp(log(sqrt(2pi)/x) - pi^2/8x^2)
1066 return -Math.expm1(Math.log(s) + logt);
1067 }
1068
1069 // sum = t^((2i-1)^2 - 1), i=1, 2, 3, 4, 5, ...
1070 // = 1 + t^8 + t^24 + t^48 + t^80 + ...
1071 // With x = 0.82757... the smallest terms cannot be added when i==5
1072 // i.e. t^48 + t^80 == t^48
1073 // sum = 1 + (t^8 * (1 + t^16 * (1 + t^24)))
1074 final double sum = 1 + (t8 * (1 + t8 * t8 * (1 + t8 * t8 * t8)));
1075 return 1 - s * t * sum;
1076 }
1077
1078 // t = exp(-2 x^2)
1079 // p = 2 * (t - t^4 + t^9 - t^16 + ...)
1080 // sum = -1^(i-1) t^(i^2), i=i, 2, 3, ...
1081
1082 // Sum of alternating terms of reducing magnitude:
1083 // Will converge when exp(-2x^2) * eps >= exp(-2x^2)^(i^2)
1084 // When x = 0.82757... this requires max i==5
1085 // i.e. t * eps >= t^36 (i=6)
1086 final double t = Math.exp(-2 * x * x);
1087
1088 // (t - t^4 + t^9 - t^16 + t^25)
1089 // t * (1 - t^3 * (1 - t^5 * (1 - t^7 * (1 - t^9))))
1090 final double t2 = t * t;
1091 final double t3 = t * t * t;
1092 final double t4 = t2 * t2;
1093 final double sum = t * (1 - t3 * (1 - t2 * t3 * (1 - t3 * t4 * (1 - t2 * t3 * t4))));
1094 return clipProbability(2 * sum);
1095 }
1096
1097 /**
1098 * Clip the probability to the range [0, 1].
1099 *
1100 * @param p Probability.
1101 * @return p in [0, 1]
1102 */
1103 static double clipProbability(double p) {
1104 return Math.min(1, Math.max(0, p));
1105 }
1106 }