View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  
18  package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
19  
20  import java.util.ArrayList;
21  import java.util.Arrays;
22  import java.util.List;
23  
24  import org.apache.commons.math3.exception.DimensionMismatchException;
25  import org.apache.commons.math3.exception.NotPositiveException;
26  import org.apache.commons.math3.exception.NotStrictlyPositiveException;
27  import org.apache.commons.math3.exception.OutOfRangeException;
28  import org.apache.commons.math3.exception.TooManyEvaluationsException;
29  import org.apache.commons.math3.linear.Array2DRowRealMatrix;
30  import org.apache.commons.math3.linear.EigenDecomposition;
31  import org.apache.commons.math3.linear.MatrixUtils;
32  import org.apache.commons.math3.linear.RealMatrix;
33  import org.apache.commons.math3.optim.ConvergenceChecker;
34  import org.apache.commons.math3.optim.OptimizationData;
35  import org.apache.commons.math3.optim.nonlinear.scalar.GoalType;
36  import org.apache.commons.math3.optim.PointValuePair;
37  import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
38  import org.apache.commons.math3.random.RandomGenerator;
39  import org.apache.commons.math3.util.FastMath;
40  import org.apache.commons.math3.util.MathArrays;
41  
42  /**
43   * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
44   * for non-linear, non-convex, non-smooth, global function minimization.
45   * <p>
46   * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
47   * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
48   * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
49   * optima, outlier, etc.) of the objective function. Like a
50   * quasi-Newton method, the CMA-ES learns and applies a variable metric
51   * on the underlying search space. Unlike a quasi-Newton method, the
52   * CMA-ES neither estimates nor uses gradients, making it considerably more
53   * reliable in terms of finding a good, or even close to optimal, solution.
54   * <p>
55   * In general, on smooth objective functions the CMA-ES is roughly ten times
56   * slower than BFGS (counting objective function evaluations, no gradients provided).
57   * For up to <math>N=10</math> variables also the derivative-free simplex
58   * direct search method (Nelder and Mead) can be faster, but it is
59   * far less reliable than CMA-ES.
60   * <p>
61   * The CMA-ES is particularly well suited for non-separable
62   * and/or badly conditioned problems. To observe the advantage of CMA compared
63   * to a conventional evolution strategy, it will usually take about
64   * <math>30 N</math> function evaluations. On difficult problems the complete
65   * optimization (a single run) is expected to take <em>roughly</em> between
66   * <math>30 N</math> and <math>300 N<sup>2</sup></math>
67   * function evaluations.
68   * <p>
69   * This implementation is translated and adapted from the Matlab version
70   * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
71   * <p>
72   * For more information, please refer to the following links:
73   * <ul>
74   *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
75   *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
76   *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
77   * </ul>
78   *
79   * @version $Id: CMAESOptimizer.java 1573506 2014-03-03 09:58:29Z luc $
80   * @since 3.0
81   */
82  public class CMAESOptimizer
83      extends MultivariateOptimizer {
84      // global search parameters
85      /**
86       * Population size, offspring number. The primary strategy parameter to play
87       * with, which can be increased from its default value. Increasing the
88       * population size improves global search properties in exchange to speed.
89       * Speed decreases, as a rule, at most linearly with increasing population
90       * size. It is advisable to begin with the default small population size.
91       */
92      private int lambda; // population size
93      /**
94       * Covariance update mechanism, default is active CMA. isActiveCMA = true
95       * turns on "active CMA" with a negative update of the covariance matrix and
96       * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
97       * pos. def. and is numerically faster. Active CMA usually speeds up the
98       * adaptation.
99       */
100     private final boolean isActiveCMA;
101     /**
102      * Determines how often a new random offspring is generated in case it is
103      * not feasible / beyond the defined limits, default is 0.
104      */
105     private final int checkFeasableCount;
106     /**
107      * @see Sigma
108      */
109     private double[] inputSigma;
110     /** Number of objective variables/problem dimension */
111     private int dimension;
112     /**
113      * Defines the number of initial iterations, where the covariance matrix
114      * remains diagonal and the algorithm has internally linear time complexity.
115      * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
116      * this setting also exhibits linear space complexity. This can be
117      * particularly useful for dimension > 100.
118      * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
119      */
120     private int diagonalOnly;
121     /** Number of objective variables/problem dimension */
122     private boolean isMinimize = true;
123     /** Indicates whether statistic data is collected. */
124     private final boolean generateStatistics;
125 
126     // termination criteria
127     /** Maximal number of iterations allowed. */
128     private final int maxIterations;
129     /** Limit for fitness value. */
130     private final double stopFitness;
131     /** Stop if x-changes larger stopTolUpX. */
132     private double stopTolUpX;
133     /** Stop if x-change smaller stopTolX. */
134     private double stopTolX;
135     /** Stop if fun-changes smaller stopTolFun. */
136     private double stopTolFun;
137     /** Stop if back fun-changes smaller stopTolHistFun. */
138     private double stopTolHistFun;
139 
140     // selection strategy parameters
141     /** Number of parents/points for recombination. */
142     private int mu; //
143     /** log(mu + 0.5), stored for efficiency. */
144     private double logMu2;
145     /** Array for weighted recombination. */
146     private RealMatrix weights;
147     /** Variance-effectiveness of sum w_i x_i. */
148     private double mueff; //
149 
150     // dynamic strategy parameters and constants
151     /** Overall standard deviation - search volume. */
152     private double sigma;
153     /** Cumulation constant. */
154     private double cc;
155     /** Cumulation constant for step-size. */
156     private double cs;
157     /** Damping for step-size. */
158     private double damps;
159     /** Learning rate for rank-one update. */
160     private double ccov1;
161     /** Learning rate for rank-mu update' */
162     private double ccovmu;
163     /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
164     private double chiN;
165     /** Learning rate for rank-one update - diagonalOnly */
166     private double ccov1Sep;
167     /** Learning rate for rank-mu update - diagonalOnly */
168     private double ccovmuSep;
169 
170     // CMA internal values - updated each generation
171     /** Objective variables. */
172     private RealMatrix xmean;
173     /** Evolution path. */
174     private RealMatrix pc;
175     /** Evolution path for sigma. */
176     private RealMatrix ps;
177     /** Norm of ps, stored for efficiency. */
178     private double normps;
179     /** Coordinate system. */
180     private RealMatrix B;
181     /** Scaling. */
182     private RealMatrix D;
183     /** B*D, stored for efficiency. */
184     private RealMatrix BD;
185     /** Diagonal of sqrt(D), stored for efficiency. */
186     private RealMatrix diagD;
187     /** Covariance matrix. */
188     private RealMatrix C;
189     /** Diagonal of C, used for diagonalOnly. */
190     private RealMatrix diagC;
191     /** Number of iterations already performed. */
192     private int iterations;
193 
194     /** History queue of best values. */
195     private double[] fitnessHistory;
196     /** Size of history queue of best values. */
197     private int historySize;
198 
199     /** Random generator. */
200     private final RandomGenerator random;
201 
202     /** History of sigma values. */
203     private final List<Double> statisticsSigmaHistory = new ArrayList<Double>();
204     /** History of mean matrix. */
205     private final List<RealMatrix> statisticsMeanHistory = new ArrayList<RealMatrix>();
206     /** History of fitness values. */
207     private final List<Double> statisticsFitnessHistory = new ArrayList<Double>();
208     /** History of D matrix. */
209     private final List<RealMatrix> statisticsDHistory = new ArrayList<RealMatrix>();
210 
211     /**
212      * @param maxIterations Maximal number of iterations.
213      * @param stopFitness Whether to stop if objective function value is smaller than
214      * {@code stopFitness}.
215      * @param isActiveCMA Chooses the covariance matrix update method.
216      * @param diagonalOnly Number of initial iterations, where the covariance matrix
217      * remains diagonal.
218      * @param checkFeasableCount Determines how often new random objective variables are
219      * generated in case they are out of bounds.
220      * @param random Random generator.
221      * @param generateStatistics Whether statistic data is collected.
222      * @param checker Convergence checker.
223      *
224      * @since 3.1
225      */
226     public CMAESOptimizer(int maxIterations,
227                           double stopFitness,
228                           boolean isActiveCMA,
229                           int diagonalOnly,
230                           int checkFeasableCount,
231                           RandomGenerator random,
232                           boolean generateStatistics,
233                           ConvergenceChecker<PointValuePair> checker) {
234         super(checker);
235         this.maxIterations = maxIterations;
236         this.stopFitness = stopFitness;
237         this.isActiveCMA = isActiveCMA;
238         this.diagonalOnly = diagonalOnly;
239         this.checkFeasableCount = checkFeasableCount;
240         this.random = random;
241         this.generateStatistics = generateStatistics;
242     }
243 
244     /**
245      * @return History of sigma values.
246      */
247     public List<Double> getStatisticsSigmaHistory() {
248         return statisticsSigmaHistory;
249     }
250 
251     /**
252      * @return History of mean matrix.
253      */
254     public List<RealMatrix> getStatisticsMeanHistory() {
255         return statisticsMeanHistory;
256     }
257 
258     /**
259      * @return History of fitness values.
260      */
261     public List<Double> getStatisticsFitnessHistory() {
262         return statisticsFitnessHistory;
263     }
264 
265     /**
266      * @return History of D matrix.
267      */
268     public List<RealMatrix> getStatisticsDHistory() {
269         return statisticsDHistory;
270     }
271 
272     /**
273      * Input sigma values.
274      * They define the initial coordinate-wise standard deviations for
275      * sampling new search points around the initial guess.
276      * It is suggested to set them to the estimated distance from the
277      * initial to the desired optimum.
278      * Small values induce the search to be more local (and very small
279      * values are more likely to find a local optimum close to the initial
280      * guess).
281      * Too small values might however lead to early termination.
282      */
283     public static class Sigma implements OptimizationData {
284         /** Sigma values. */
285         private final double[] sigma;
286 
287         /**
288          * @param s Sigma values.
289          * @throws NotPositiveException if any of the array entries is smaller
290          * than zero.
291          */
292         public Sigma(double[] s)
293             throws NotPositiveException {
294             for (int i = 0; i < s.length; i++) {
295                 if (s[i] < 0) {
296                     throw new NotPositiveException(s[i]);
297                 }
298             }
299 
300             sigma = s.clone();
301         }
302 
303         /**
304          * @return the sigma values.
305          */
306         public double[] getSigma() {
307             return sigma.clone();
308         }
309     }
310 
311     /**
312      * Population size.
313      * The number of offspring is the primary strategy parameter.
314      * In the absence of better clues, a good default could be an
315      * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
316      * number of optimized parameters.
317      * Increasing the population size improves global search properties
318      * at the expense of speed (which in general decreases at most
319      * linearly with increasing population size).
320      */
321     public static class PopulationSize implements OptimizationData {
322         /** Population size. */
323         private final int lambda;
324 
325         /**
326          * @param size Population size.
327          * @throws NotStrictlyPositiveException if {@code size <= 0}.
328          */
329         public PopulationSize(int size)
330             throws NotStrictlyPositiveException {
331             if (size <= 0) {
332                 throw new NotStrictlyPositiveException(size);
333             }
334             lambda = size;
335         }
336 
337         /**
338          * @return the population size.
339          */
340         public int getPopulationSize() {
341             return lambda;
342         }
343     }
344 
345     /**
346      * {@inheritDoc}
347      *
348      * @param optData Optimization data. In addition to those documented in
349      * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
350      * MultivariateOptimizer}, this method will register the following data:
351      * <ul>
352      *  <li>{@link Sigma}</li>
353      *  <li>{@link PopulationSize}</li>
354      * </ul>
355      * @return {@inheritDoc}
356      * @throws TooManyEvaluationsException if the maximal number of
357      * evaluations is exceeded.
358      * @throws DimensionMismatchException if the initial guess, target, and weight
359      * arguments have inconsistent dimensions.
360      */
361     @Override
362     public PointValuePair optimize(OptimizationData... optData)
363         throws TooManyEvaluationsException,
364                DimensionMismatchException {
365         // Set up base class and perform computation.
366         return super.optimize(optData);
367     }
368 
369     /** {@inheritDoc} */
370     @Override
371     protected PointValuePair doOptimize() {
372          // -------------------- Initialization --------------------------------
373         isMinimize = getGoalType().equals(GoalType.MINIMIZE);
374         final FitnessFunction fitfun = new FitnessFunction();
375         final double[] guess = getStartPoint();
376         // number of objective variables/problem dimension
377         dimension = guess.length;
378         initializeCMA(guess);
379         iterations = 0;
380         ValuePenaltyPair valuePenalty = fitfun.value(guess);
381         double bestValue = valuePenalty.value+valuePenalty.penalty;
382         push(fitnessHistory, bestValue);
383         PointValuePair optimum
384             = new PointValuePair(getStartPoint(),
385                                  isMinimize ? bestValue : -bestValue);
386         PointValuePair lastResult = null;
387 
388         // -------------------- Generation Loop --------------------------------
389 
390         generationLoop:
391         for (iterations = 1; iterations <= maxIterations; iterations++) {
392             incrementIterationCount();
393 
394             // Generate and evaluate lambda offspring
395             final RealMatrix arz = randn1(dimension, lambda);
396             final RealMatrix arx = zeros(dimension, lambda);
397             final double[] fitness = new double[lambda];
398             final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
399             // generate random offspring
400             for (int k = 0; k < lambda; k++) {
401                 RealMatrix arxk = null;
402                 for (int i = 0; i < checkFeasableCount + 1; i++) {
403                     if (diagonalOnly <= 0) {
404                         arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
405                                          .scalarMultiply(sigma)); // m + sig * Normal(0,C)
406                     } else {
407                         arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
408                                          .scalarMultiply(sigma));
409                     }
410                     if (i >= checkFeasableCount ||
411                         fitfun.isFeasible(arxk.getColumn(0))) {
412                         break;
413                     }
414                     // regenerate random arguments for row
415                     arz.setColumn(k, randn(dimension));
416                 }
417                 copyColumn(arxk, 0, arx, k);
418                 try {
419                     valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
420                 } catch (TooManyEvaluationsException e) {
421                     break generationLoop;
422                 }
423             }
424 
425             // Compute fitnesses by adding value and penalty after scaling by value range.
426             double valueRange = valueRange(valuePenaltyPairs);
427             for (int iValue=0;iValue<valuePenaltyPairs.length;iValue++) {
428                  fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty*valueRange;
429             }
430 
431             // Sort by fitness and compute weighted mean into xmean
432             final int[] arindex = sortedIndices(fitness);
433             // Calculate new xmean, this is selection and recombination
434             final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
435             final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
436             xmean = bestArx.multiply(weights);
437             final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
438             final RealMatrix zmean = bestArz.multiply(weights);
439             final boolean hsig = updateEvolutionPaths(zmean, xold);
440             if (diagonalOnly <= 0) {
441                 updateCovariance(hsig, bestArx, arz, arindex, xold);
442             } else {
443                 updateCovarianceDiagonalOnly(hsig, bestArz);
444             }
445             // Adapt step size sigma - Eq. (5)
446             sigma *= FastMath.exp(FastMath.min(1, (normps/chiN - 1) * cs / damps));
447             final double bestFitness = fitness[arindex[0]];
448             final double worstFitness = fitness[arindex[arindex.length - 1]];
449             if (bestValue > bestFitness) {
450                 bestValue = bestFitness;
451                 lastResult = optimum;
452                 optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
453                                              isMinimize ? bestFitness : -bestFitness);
454                 if (getConvergenceChecker() != null && lastResult != null &&
455                     getConvergenceChecker().converged(iterations, optimum, lastResult)) {
456                     break generationLoop;
457                 }
458             }
459             // handle termination criteria
460             // Break, if fitness is good enough
461             if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
462                 break generationLoop;
463             }
464             final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
465             final double[] pcCol = pc.getColumn(0);
466             for (int i = 0; i < dimension; i++) {
467                 if (sigma * FastMath.max(FastMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
468                     break;
469                 }
470                 if (i >= dimension - 1) {
471                     break generationLoop;
472                 }
473             }
474             for (int i = 0; i < dimension; i++) {
475                 if (sigma * sqrtDiagC[i] > stopTolUpX) {
476                     break generationLoop;
477                 }
478             }
479             final double historyBest = min(fitnessHistory);
480             final double historyWorst = max(fitnessHistory);
481             if (iterations > 2 &&
482                 FastMath.max(historyWorst, worstFitness) -
483                 FastMath.min(historyBest, bestFitness) < stopTolFun) {
484                 break generationLoop;
485             }
486             if (iterations > fitnessHistory.length &&
487                 historyWorst - historyBest < stopTolHistFun) {
488                 break generationLoop;
489             }
490             // condition number of the covariance matrix exceeds 1e14
491             if (max(diagD) / min(diagD) > 1e7) {
492                 break generationLoop;
493             }
494             // user defined termination
495             if (getConvergenceChecker() != null) {
496                 final PointValuePair current
497                     = new PointValuePair(bestArx.getColumn(0),
498                                          isMinimize ? bestFitness : -bestFitness);
499                 if (lastResult != null &&
500                     getConvergenceChecker().converged(iterations, current, lastResult)) {
501                     break generationLoop;
502                     }
503                 lastResult = current;
504             }
505             // Adjust step size in case of equal function values (flat fitness)
506             if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
507                 sigma *= FastMath.exp(0.2 + cs / damps);
508             }
509             if (iterations > 2 && FastMath.max(historyWorst, bestFitness) -
510                 FastMath.min(historyBest, bestFitness) == 0) {
511                 sigma *= FastMath.exp(0.2 + cs / damps);
512             }
513             // store best in history
514             push(fitnessHistory,bestFitness);
515             if (generateStatistics) {
516                 statisticsSigmaHistory.add(sigma);
517                 statisticsFitnessHistory.add(bestFitness);
518                 statisticsMeanHistory.add(xmean.transpose());
519                 statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
520             }
521         }
522         return optimum;
523     }
524 
525     /**
526      * Scans the list of (required and optional) optimization data that
527      * characterize the problem.
528      *
529      * @param optData Optimization data. The following data will be looked for:
530      * <ul>
531      *  <li>{@link Sigma}</li>
532      *  <li>{@link PopulationSize}</li>
533      * </ul>
534      */
535     @Override
536     protected void parseOptimizationData(OptimizationData... optData) {
537         // Allow base class to register its own data.
538         super.parseOptimizationData(optData);
539 
540         // The existing values (as set by the previous call) are reused if
541         // not provided in the argument list.
542         for (OptimizationData data : optData) {
543             if (data instanceof Sigma) {
544                 inputSigma = ((Sigma) data).getSigma();
545                 continue;
546             }
547             if (data instanceof PopulationSize) {
548                 lambda = ((PopulationSize) data).getPopulationSize();
549                 continue;
550             }
551         }
552 
553         checkParameters();
554     }
555 
556     /**
557      * Checks dimensions and values of boundaries and inputSigma if defined.
558      */
559     private void checkParameters() {
560         final double[] init = getStartPoint();
561         final double[] lB = getLowerBound();
562         final double[] uB = getUpperBound();
563 
564         if (inputSigma != null) {
565             if (inputSigma.length != init.length) {
566                 throw new DimensionMismatchException(inputSigma.length, init.length);
567             }
568             for (int i = 0; i < init.length; i++) {
569                 if (inputSigma[i] > uB[i] - lB[i]) {
570                     throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
571                 }
572             }
573         }
574     }
575 
576     /**
577      * Initialization of the dynamic search parameters
578      *
579      * @param guess Initial guess for the arguments of the fitness function.
580      */
581     private void initializeCMA(double[] guess) {
582         if (lambda <= 0) {
583             throw new NotStrictlyPositiveException(lambda);
584         }
585         // initialize sigma
586         final double[][] sigmaArray = new double[guess.length][1];
587         for (int i = 0; i < guess.length; i++) {
588             sigmaArray[i][0] = inputSigma[i];
589         }
590         final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
591         sigma = max(insigma); // overall standard deviation
592 
593         // initialize termination criteria
594         stopTolUpX = 1e3 * max(insigma);
595         stopTolX = 1e-11 * max(insigma);
596         stopTolFun = 1e-12;
597         stopTolHistFun = 1e-13;
598 
599         // initialize selection strategy parameters
600         mu = lambda / 2; // number of parents/points for recombination
601         logMu2 = FastMath.log(mu + 0.5);
602         weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
603         double sumw = 0;
604         double sumwq = 0;
605         for (int i = 0; i < mu; i++) {
606             double w = weights.getEntry(i, 0);
607             sumw += w;
608             sumwq += w * w;
609         }
610         weights = weights.scalarMultiply(1 / sumw);
611         mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
612 
613         // initialize dynamic strategy parameters and constants
614         cc = (4 + mueff / dimension) /
615                 (dimension + 4 + 2 * mueff / dimension);
616         cs = (mueff + 2) / (dimension + mueff + 3.);
617         damps = (1 + 2 * FastMath.max(0, FastMath.sqrt((mueff - 1) /
618                                                        (dimension + 1)) - 1)) *
619             FastMath.max(0.3,
620                          1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
621         ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
622         ccovmu = FastMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
623                               ((dimension + 2) * (dimension + 2) + mueff));
624         ccov1Sep = FastMath.min(1, ccov1 * (dimension + 1.5) / 3);
625         ccovmuSep = FastMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
626         chiN = FastMath.sqrt(dimension) *
627                 (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
628         // intialize CMA internal values - updated each generation
629         xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
630         diagD = insigma.scalarMultiply(1 / sigma);
631         diagC = square(diagD);
632         pc = zeros(dimension, 1); // evolution paths for C and sigma
633         ps = zeros(dimension, 1); // B defines the coordinate system
634         normps = ps.getFrobeniusNorm();
635 
636         B = eye(dimension, dimension);
637         D = ones(dimension, 1); // diagonal D defines the scaling
638         BD = times(B, repmat(diagD.transpose(), dimension, 1));
639         C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
640         historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
641         fitnessHistory = new double[historySize]; // history of fitness values
642         for (int i = 0; i < historySize; i++) {
643             fitnessHistory[i] = Double.MAX_VALUE;
644         }
645     }
646 
647     /**
648      * Update of the evolution paths ps and pc.
649      *
650      * @param zmean Weighted row matrix of the gaussian random numbers generating
651      * the current offspring.
652      * @param xold xmean matrix of the previous generation.
653      * @return hsig flag indicating a small correction.
654      */
655     private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
656         ps = ps.scalarMultiply(1 - cs).add(
657                 B.multiply(zmean).scalarMultiply(
658                         FastMath.sqrt(cs * (2 - cs) * mueff)));
659         normps = ps.getFrobeniusNorm();
660         final boolean hsig = normps /
661             FastMath.sqrt(1 - FastMath.pow(1 - cs, 2 * iterations)) /
662             chiN < 1.4 + 2 / ((double) dimension + 1);
663         pc = pc.scalarMultiply(1 - cc);
664         if (hsig) {
665             pc = pc.add(xmean.subtract(xold).scalarMultiply(FastMath.sqrt(cc * (2 - cc) * mueff) / sigma));
666         }
667         return hsig;
668     }
669 
670     /**
671      * Update of the covariance matrix C for diagonalOnly > 0
672      *
673      * @param hsig Flag indicating a small correction.
674      * @param bestArz Fitness-sorted matrix of the gaussian random values of the
675      * current offspring.
676      */
677     private void updateCovarianceDiagonalOnly(boolean hsig,
678                                               final RealMatrix bestArz) {
679         // minor correction if hsig==false
680         double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
681         oldFac += 1 - ccov1Sep - ccovmuSep;
682         diagC = diagC.scalarMultiply(oldFac) // regard old matrix
683             .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
684             .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
685                  .scalarMultiply(ccovmuSep));
686         diagD = sqrt(diagC); // replaces eig(C)
687         if (diagonalOnly > 1 &&
688             iterations > diagonalOnly) {
689             // full covariance matrix from now on
690             diagonalOnly = 0;
691             B = eye(dimension, dimension);
692             BD = diag(diagD);
693             C = diag(diagC);
694         }
695     }
696 
697     /**
698      * Update of the covariance matrix C.
699      *
700      * @param hsig Flag indicating a small correction.
701      * @param bestArx Fitness-sorted matrix of the argument vectors producing the
702      * current offspring.
703      * @param arz Unsorted matrix containing the gaussian random values of the
704      * current offspring.
705      * @param arindex Indices indicating the fitness-order of the current offspring.
706      * @param xold xmean matrix of the previous generation.
707      */
708     private void updateCovariance(boolean hsig, final RealMatrix bestArx,
709                                   final RealMatrix arz, final int[] arindex,
710                                   final RealMatrix xold) {
711         double negccov = 0;
712         if (ccov1 + ccovmu > 0) {
713             final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
714                 .scalarMultiply(1 / sigma); // mu difference vectors
715             final RealMatrix roneu = pc.multiply(pc.transpose())
716                 .scalarMultiply(ccov1); // rank one update
717             // minor correction if hsig==false
718             double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
719             oldFac += 1 - ccov1 - ccovmu;
720             if (isActiveCMA) {
721                 // Adapt covariance matrix C active CMA
722                 negccov = (1 - ccovmu) * 0.25 * mueff /
723                     (FastMath.pow(dimension + 2, 1.5) + 2 * mueff);
724                 // keep at least 0.66 in all directions, small popsize are most
725                 // critical
726                 final double negminresidualvariance = 0.66;
727                 // where to make up for the variance loss
728                 final double negalphaold = 0.5;
729                 // prepare vectors, compute negative updating matrix Cneg
730                 final int[] arReverseIndex = reverse(arindex);
731                 RealMatrix arzneg = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu));
732                 RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
733                 final int[] idxnorms = sortedIndices(arnorms.getRow(0));
734                 final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
735                 final int[] idxReverse = reverse(idxnorms);
736                 final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
737                 arnorms = divide(arnormsReverse, arnormsSorted);
738                 final int[] idxInv = inverse(idxnorms);
739                 final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
740                 // check and set learning rate negccov
741                 final double negcovMax = (1 - negminresidualvariance) /
742                     square(arnormsInv).multiply(weights).getEntry(0, 0);
743                 if (negccov > negcovMax) {
744                     negccov = negcovMax;
745                 }
746                 arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
747                 final RealMatrix artmp = BD.multiply(arzneg);
748                 final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
749                 oldFac += negalphaold * negccov;
750                 C = C.scalarMultiply(oldFac)
751                     .add(roneu) // regard old matrix
752                     .add(arpos.scalarMultiply( // plus rank one update
753                                               ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
754                          .multiply(times(repmat(weights, 1, dimension),
755                                          arpos.transpose())))
756                     .subtract(Cneg.scalarMultiply(negccov));
757             } else {
758                 // Adapt covariance matrix C - nonactive
759                 C = C.scalarMultiply(oldFac) // regard old matrix
760                     .add(roneu) // plus rank one update
761                     .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
762                          .multiply(times(repmat(weights, 1, dimension),
763                                          arpos.transpose())));
764             }
765         }
766         updateBD(negccov);
767     }
768 
769     /**
770      * Update B and D from C.
771      *
772      * @param negccov Negative covariance factor.
773      */
774     private void updateBD(double negccov) {
775         if (ccov1 + ccovmu + negccov > 0 &&
776             (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
777             // to achieve O(N^2)
778             C = triu(C, 0).add(triu(C, 1).transpose());
779             // enforce symmetry to prevent complex numbers
780             final EigenDecomposition eig = new EigenDecomposition(C);
781             B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
782             D = eig.getD();
783             diagD = diag(D);
784             if (min(diagD) <= 0) {
785                 for (int i = 0; i < dimension; i++) {
786                     if (diagD.getEntry(i, 0) < 0) {
787                         diagD.setEntry(i, 0, 0);
788                     }
789                 }
790                 final double tfac = max(diagD) / 1e14;
791                 C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
792                 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
793             }
794             if (max(diagD) > 1e14 * min(diagD)) {
795                 final double tfac = max(diagD) / 1e14 - min(diagD);
796                 C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
797                 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
798             }
799             diagC = diag(C);
800             diagD = sqrt(diagD); // D contains standard deviations now
801             BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
802         }
803     }
804 
805     /**
806      * Pushes the current best fitness value in a history queue.
807      *
808      * @param vals History queue.
809      * @param val Current best fitness value.
810      */
811     private static void push(double[] vals, double val) {
812         for (int i = vals.length-1; i > 0; i--) {
813             vals[i] = vals[i-1];
814         }
815         vals[0] = val;
816     }
817 
818     /**
819      * Sorts fitness values.
820      *
821      * @param doubles Array of values to be sorted.
822      * @return a sorted array of indices pointing into doubles.
823      */
824     private int[] sortedIndices(final double[] doubles) {
825         final DoubleIndex[] dis = new DoubleIndex[doubles.length];
826         for (int i = 0; i < doubles.length; i++) {
827             dis[i] = new DoubleIndex(doubles[i], i);
828         }
829         Arrays.sort(dis);
830         final int[] indices = new int[doubles.length];
831         for (int i = 0; i < doubles.length; i++) {
832             indices[i] = dis[i].index;
833         }
834         return indices;
835     }
836    /**
837      * Get range of values.
838      *
839      * @param vpPairs Array of valuePenaltyPairs to get range from.
840      * @return a double equal to maximum value minus minimum value.
841      */
842     private double valueRange(final ValuePenaltyPair[] vpPairs) {
843         double max = Double.NEGATIVE_INFINITY;
844         double min = Double.MAX_VALUE;
845         for (ValuePenaltyPair vpPair:vpPairs) {
846             if (vpPair.value > max) {
847                 max = vpPair.value;
848             }
849             if (vpPair.value < min) {
850                 min = vpPair.value;
851             }
852         }
853         return max-min;
854     }
855 
856     /**
857      * Used to sort fitness values. Sorting is always in lower value first
858      * order.
859      */
860     private static class DoubleIndex implements Comparable<DoubleIndex> {
861         /** Value to compare. */
862         private final double value;
863         /** Index into sorted array. */
864         private final int index;
865 
866         /**
867          * @param value Value to compare.
868          * @param index Index into sorted array.
869          */
870         DoubleIndex(double value, int index) {
871             this.value = value;
872             this.index = index;
873         }
874 
875         /** {@inheritDoc} */
876         public int compareTo(DoubleIndex o) {
877             return Double.compare(value, o.value);
878         }
879 
880         /** {@inheritDoc} */
881         @Override
882         public boolean equals(Object other) {
883 
884             if (this == other) {
885                 return true;
886             }
887 
888             if (other instanceof DoubleIndex) {
889                 return Double.compare(value, ((DoubleIndex) other).value) == 0;
890             }
891 
892             return false;
893         }
894 
895         /** {@inheritDoc} */
896         @Override
897         public int hashCode() {
898             long bits = Double.doubleToLongBits(value);
899             return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff);
900         }
901     }
902     /**
903      * Stores the value and penalty (for repair of out of bounds point).
904      */
905     private static class ValuePenaltyPair {
906         /** Objective function value. */
907         private double value;
908         /** Penalty value for repair of out out of bounds points. */
909         private double penalty;
910 
911         /**
912          * @param value Function value.
913          * @param penalty Out-of-bounds penalty.
914         */
915         public ValuePenaltyPair(final double value, final double penalty) {
916             this.value   = value;
917             this.penalty = penalty;
918         }
919     }
920 
921 
922     /**
923      * Normalizes fitness values to the range [0,1]. Adds a penalty to the
924      * fitness value if out of range.
925      */
926     private class FitnessFunction {
927         /**
928          * Flag indicating whether the objective variables are forced into their
929          * bounds if defined
930          */
931         private final boolean isRepairMode;
932 
933         /** Simple constructor.
934          */
935         public FitnessFunction() {
936             isRepairMode = true;
937         }
938 
939         /**
940          * @param point Normalized objective variables.
941          * @return the objective value + penalty for violated bounds.
942          */
943         public ValuePenaltyPair value(final double[] point) {
944             double value;
945             double penalty=0.0;
946             if (isRepairMode) {
947                 double[] repaired = repair(point);
948                 value = CMAESOptimizer.this.computeObjectiveValue(repaired);
949                 penalty =  penalty(point, repaired);
950             } else {
951                 value = CMAESOptimizer.this.computeObjectiveValue(point);
952             }
953             value = isMinimize ? value : -value;
954             penalty = isMinimize ? penalty : -penalty;
955             return new ValuePenaltyPair(value,penalty);
956         }
957 
958         /**
959          * @param x Normalized objective variables.
960          * @return {@code true} if in bounds.
961          */
962         public boolean isFeasible(final double[] x) {
963             final double[] lB = CMAESOptimizer.this.getLowerBound();
964             final double[] uB = CMAESOptimizer.this.getUpperBound();
965 
966             for (int i = 0; i < x.length; i++) {
967                 if (x[i] < lB[i]) {
968                     return false;
969                 }
970                 if (x[i] > uB[i]) {
971                     return false;
972                 }
973             }
974             return true;
975         }
976 
977         /**
978          * @param x Normalized objective variables.
979          * @return the repaired (i.e. all in bounds) objective variables.
980          */
981         private double[] repair(final double[] x) {
982             final double[] lB = CMAESOptimizer.this.getLowerBound();
983             final double[] uB = CMAESOptimizer.this.getUpperBound();
984 
985             final double[] repaired = new double[x.length];
986             for (int i = 0; i < x.length; i++) {
987                 if (x[i] < lB[i]) {
988                     repaired[i] = lB[i];
989                 } else if (x[i] > uB[i]) {
990                     repaired[i] = uB[i];
991                 } else {
992                     repaired[i] = x[i];
993                 }
994             }
995             return repaired;
996         }
997 
998         /**
999          * @param x Normalized objective variables.
1000          * @param repaired Repaired objective variables.
1001          * @return Penalty value according to the violation of the bounds.
1002          */
1003         private double penalty(final double[] x, final double[] repaired) {
1004             double penalty = 0;
1005             for (int i = 0; i < x.length; i++) {
1006                 double diff = FastMath.abs(x[i] - repaired[i]);
1007                 penalty += diff;
1008             }
1009             return isMinimize ? penalty : -penalty;
1010         }
1011     }
1012 
1013     // -----Matrix utility functions similar to the Matlab build in functions------
1014 
1015     /**
1016      * @param m Input matrix
1017      * @return Matrix representing the element-wise logarithm of m.
1018      */
1019     private static RealMatrix log(final RealMatrix m) {
1020         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1021         for (int r = 0; r < m.getRowDimension(); r++) {
1022             for (int c = 0; c < m.getColumnDimension(); c++) {
1023                 d[r][c] = FastMath.log(m.getEntry(r, c));
1024             }
1025         }
1026         return new Array2DRowRealMatrix(d, false);
1027     }
1028 
1029     /**
1030      * @param m Input matrix.
1031      * @return Matrix representing the element-wise square root of m.
1032      */
1033     private static RealMatrix sqrt(final RealMatrix m) {
1034         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1035         for (int r = 0; r < m.getRowDimension(); r++) {
1036             for (int c = 0; c < m.getColumnDimension(); c++) {
1037                 d[r][c] = FastMath.sqrt(m.getEntry(r, c));
1038             }
1039         }
1040         return new Array2DRowRealMatrix(d, false);
1041     }
1042 
1043     /**
1044      * @param m Input matrix.
1045      * @return Matrix representing the element-wise square of m.
1046      */
1047     private static RealMatrix square(final RealMatrix m) {
1048         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1049         for (int r = 0; r < m.getRowDimension(); r++) {
1050             for (int c = 0; c < m.getColumnDimension(); c++) {
1051                 double e = m.getEntry(r, c);
1052                 d[r][c] = e * e;
1053             }
1054         }
1055         return new Array2DRowRealMatrix(d, false);
1056     }
1057 
1058     /**
1059      * @param m Input matrix 1.
1060      * @param n Input matrix 2.
1061      * @return the matrix where the elements of m and n are element-wise multiplied.
1062      */
1063     private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1064         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1065         for (int r = 0; r < m.getRowDimension(); r++) {
1066             for (int c = 0; c < m.getColumnDimension(); c++) {
1067                 d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1068             }
1069         }
1070         return new Array2DRowRealMatrix(d, false);
1071     }
1072 
1073     /**
1074      * @param m Input matrix 1.
1075      * @param n Input matrix 2.
1076      * @return Matrix where the elements of m and n are element-wise divided.
1077      */
1078     private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1079         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1080         for (int r = 0; r < m.getRowDimension(); r++) {
1081             for (int c = 0; c < m.getColumnDimension(); c++) {
1082                 d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1083             }
1084         }
1085         return new Array2DRowRealMatrix(d, false);
1086     }
1087 
1088     /**
1089      * @param m Input matrix.
1090      * @param cols Columns to select.
1091      * @return Matrix representing the selected columns.
1092      */
1093     private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1094         final double[][] d = new double[m.getRowDimension()][cols.length];
1095         for (int r = 0; r < m.getRowDimension(); r++) {
1096             for (int c = 0; c < cols.length; c++) {
1097                 d[r][c] = m.getEntry(r, cols[c]);
1098             }
1099         }
1100         return new Array2DRowRealMatrix(d, false);
1101     }
1102 
1103     /**
1104      * @param m Input matrix.
1105      * @param k Diagonal position.
1106      * @return Upper triangular part of matrix.
1107      */
1108     private static RealMatrix triu(final RealMatrix m, int k) {
1109         final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1110         for (int r = 0; r < m.getRowDimension(); r++) {
1111             for (int c = 0; c < m.getColumnDimension(); c++) {
1112                 d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1113             }
1114         }
1115         return new Array2DRowRealMatrix(d, false);
1116     }
1117 
1118     /**
1119      * @param m Input matrix.
1120      * @return Row matrix representing the sums of the rows.
1121      */
1122     private static RealMatrix sumRows(final RealMatrix m) {
1123         final double[][] d = new double[1][m.getColumnDimension()];
1124         for (int c = 0; c < m.getColumnDimension(); c++) {
1125             double sum = 0;
1126             for (int r = 0; r < m.getRowDimension(); r++) {
1127                 sum += m.getEntry(r, c);
1128             }
1129             d[0][c] = sum;
1130         }
1131         return new Array2DRowRealMatrix(d, false);
1132     }
1133 
1134     /**
1135      * @param m Input matrix.
1136      * @return the diagonal n-by-n matrix if m is a column matrix or the column
1137      * matrix representing the diagonal if m is a n-by-n matrix.
1138      */
1139     private static RealMatrix diag(final RealMatrix m) {
1140         if (m.getColumnDimension() == 1) {
1141             final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1142             for (int i = 0; i < m.getRowDimension(); i++) {
1143                 d[i][i] = m.getEntry(i, 0);
1144             }
1145             return new Array2DRowRealMatrix(d, false);
1146         } else {
1147             final double[][] d = new double[m.getRowDimension()][1];
1148             for (int i = 0; i < m.getColumnDimension(); i++) {
1149                 d[i][0] = m.getEntry(i, i);
1150             }
1151             return new Array2DRowRealMatrix(d, false);
1152         }
1153     }
1154 
1155     /**
1156      * Copies a column from m1 to m2.
1157      *
1158      * @param m1 Source matrix.
1159      * @param col1 Source column.
1160      * @param m2 Target matrix.
1161      * @param col2 Target column.
1162      */
1163     private static void copyColumn(final RealMatrix m1, int col1,
1164                                    RealMatrix m2, int col2) {
1165         for (int i = 0; i < m1.getRowDimension(); i++) {
1166             m2.setEntry(i, col2, m1.getEntry(i, col1));
1167         }
1168     }
1169 
1170     /**
1171      * @param n Number of rows.
1172      * @param m Number of columns.
1173      * @return n-by-m matrix filled with 1.
1174      */
1175     private static RealMatrix ones(int n, int m) {
1176         final double[][] d = new double[n][m];
1177         for (int r = 0; r < n; r++) {
1178             Arrays.fill(d[r], 1);
1179         }
1180         return new Array2DRowRealMatrix(d, false);
1181     }
1182 
1183     /**
1184      * @param n Number of rows.
1185      * @param m Number of columns.
1186      * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1187      * the diagonal.
1188      */
1189     private static RealMatrix eye(int n, int m) {
1190         final double[][] d = new double[n][m];
1191         for (int r = 0; r < n; r++) {
1192             if (r < m) {
1193                 d[r][r] = 1;
1194             }
1195         }
1196         return new Array2DRowRealMatrix(d, false);
1197     }
1198 
1199     /**
1200      * @param n Number of rows.
1201      * @param m Number of columns.
1202      * @return n-by-m matrix of zero values.
1203      */
1204     private static RealMatrix zeros(int n, int m) {
1205         return new Array2DRowRealMatrix(n, m);
1206     }
1207 
1208     /**
1209      * @param mat Input matrix.
1210      * @param n Number of row replicates.
1211      * @param m Number of column replicates.
1212      * @return a matrix which replicates the input matrix in both directions.
1213      */
1214     private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1215         final int rd = mat.getRowDimension();
1216         final int cd = mat.getColumnDimension();
1217         final double[][] d = new double[n * rd][m * cd];
1218         for (int r = 0; r < n * rd; r++) {
1219             for (int c = 0; c < m * cd; c++) {
1220                 d[r][c] = mat.getEntry(r % rd, c % cd);
1221             }
1222         }
1223         return new Array2DRowRealMatrix(d, false);
1224     }
1225 
1226     /**
1227      * @param start Start value.
1228      * @param end End value.
1229      * @param step Step size.
1230      * @return a sequence as column matrix.
1231      */
1232     private static RealMatrix sequence(double start, double end, double step) {
1233         final int size = (int) ((end - start) / step + 1);
1234         final double[][] d = new double[size][1];
1235         double value = start;
1236         for (int r = 0; r < size; r++) {
1237             d[r][0] = value;
1238             value += step;
1239         }
1240         return new Array2DRowRealMatrix(d, false);
1241     }
1242 
1243     /**
1244      * @param m Input matrix.
1245      * @return the maximum of the matrix element values.
1246      */
1247     private static double max(final RealMatrix m) {
1248         double max = -Double.MAX_VALUE;
1249         for (int r = 0; r < m.getRowDimension(); r++) {
1250             for (int c = 0; c < m.getColumnDimension(); c++) {
1251                 double e = m.getEntry(r, c);
1252                 if (max < e) {
1253                     max = e;
1254                 }
1255             }
1256         }
1257         return max;
1258     }
1259 
1260     /**
1261      * @param m Input matrix.
1262      * @return the minimum of the matrix element values.
1263      */
1264     private static double min(final RealMatrix m) {
1265         double min = Double.MAX_VALUE;
1266         for (int r = 0; r < m.getRowDimension(); r++) {
1267             for (int c = 0; c < m.getColumnDimension(); c++) {
1268                 double e = m.getEntry(r, c);
1269                 if (min > e) {
1270                     min = e;
1271                 }
1272             }
1273         }
1274         return min;
1275     }
1276 
1277     /**
1278      * @param m Input array.
1279      * @return the maximum of the array values.
1280      */
1281     private static double max(final double[] m) {
1282         double max = -Double.MAX_VALUE;
1283         for (int r = 0; r < m.length; r++) {
1284             if (max < m[r]) {
1285                 max = m[r];
1286             }
1287         }
1288         return max;
1289     }
1290 
1291     /**
1292      * @param m Input array.
1293      * @return the minimum of the array values.
1294      */
1295     private static double min(final double[] m) {
1296         double min = Double.MAX_VALUE;
1297         for (int r = 0; r < m.length; r++) {
1298             if (min > m[r]) {
1299                 min = m[r];
1300             }
1301         }
1302         return min;
1303     }
1304 
1305     /**
1306      * @param indices Input index array.
1307      * @return the inverse of the mapping defined by indices.
1308      */
1309     private static int[] inverse(final int[] indices) {
1310         final int[] inverse = new int[indices.length];
1311         for (int i = 0; i < indices.length; i++) {
1312             inverse[indices[i]] = i;
1313         }
1314         return inverse;
1315     }
1316 
1317     /**
1318      * @param indices Input index array.
1319      * @return the indices in inverse order (last is first).
1320      */
1321     private static int[] reverse(final int[] indices) {
1322         final int[] reverse = new int[indices.length];
1323         for (int i = 0; i < indices.length; i++) {
1324             reverse[i] = indices[indices.length - i - 1];
1325         }
1326         return reverse;
1327     }
1328 
1329     /**
1330      * @param size Length of random array.
1331      * @return an array of Gaussian random numbers.
1332      */
1333     private double[] randn(int size) {
1334         final double[] randn = new double[size];
1335         for (int i = 0; i < size; i++) {
1336             randn[i] = random.nextGaussian();
1337         }
1338         return randn;
1339     }
1340 
1341     /**
1342      * @param size Number of rows.
1343      * @param popSize Population size.
1344      * @return a 2-dimensional matrix of Gaussian random numbers.
1345      */
1346     private RealMatrix randn1(int size, int popSize) {
1347         final double[][] d = new double[size][popSize];
1348         for (int r = 0; r < size; r++) {
1349             for (int c = 0; c < popSize; c++) {
1350                 d[r][c] = random.nextGaussian();
1351             }
1352         }
1353         return new Array2DRowRealMatrix(d, false);
1354     }
1355 }