001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018package org.apache.commons.math3.optim.nonlinear.scalar.noderiv;
019
020import java.util.ArrayList;
021import java.util.Arrays;
022import java.util.List;
023
024import org.apache.commons.math3.exception.DimensionMismatchException;
025import org.apache.commons.math3.exception.NotPositiveException;
026import org.apache.commons.math3.exception.NotStrictlyPositiveException;
027import org.apache.commons.math3.exception.OutOfRangeException;
028import org.apache.commons.math3.exception.TooManyEvaluationsException;
029import org.apache.commons.math3.linear.Array2DRowRealMatrix;
030import org.apache.commons.math3.linear.EigenDecomposition;
031import org.apache.commons.math3.linear.MatrixUtils;
032import org.apache.commons.math3.linear.RealMatrix;
033import org.apache.commons.math3.optim.ConvergenceChecker;
034import org.apache.commons.math3.optim.OptimizationData;
035import org.apache.commons.math3.optim.nonlinear.scalar.GoalType;
036import org.apache.commons.math3.optim.PointValuePair;
037import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer;
038import org.apache.commons.math3.random.RandomGenerator;
039import org.apache.commons.math3.util.FastMath;
040import org.apache.commons.math3.util.MathArrays;
041
042/**
043 * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
044 * for non-linear, non-convex, non-smooth, global function minimization.
045 * <p>
046 * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
047 * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
048 * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
049 * optima, outlier, etc.) of the objective function. Like a
050 * quasi-Newton method, the CMA-ES learns and applies a variable metric
051 * on the underlying search space. Unlike a quasi-Newton method, the
052 * CMA-ES neither estimates nor uses gradients, making it considerably more
053 * reliable in terms of finding a good, or even close to optimal, solution.
054 * <p>
055 * In general, on smooth objective functions the CMA-ES is roughly ten times
056 * slower than BFGS (counting objective function evaluations, no gradients provided).
057 * For up to <math>N=10</math> variables also the derivative-free simplex
058 * direct search method (Nelder and Mead) can be faster, but it is
059 * far less reliable than CMA-ES.
060 * <p>
061 * The CMA-ES is particularly well suited for non-separable
062 * and/or badly conditioned problems. To observe the advantage of CMA compared
063 * to a conventional evolution strategy, it will usually take about
064 * <math>30 N</math> function evaluations. On difficult problems the complete
065 * optimization (a single run) is expected to take <em>roughly</em> between
066 * <math>30 N</math> and <math>300 N<sup>2</sup></math>
067 * function evaluations.
068 * <p>
069 * This implementation is translated and adapted from the Matlab version
070 * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
071 * <p>
072 * For more information, please refer to the following links:
073 * <ul>
074 *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
075 *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
076 *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
077 * </ul>
078 *
079 * @since 3.0
080 */
081public class CMAESOptimizer
082    extends MultivariateOptimizer {
083    // global search parameters
084    /**
085     * Population size, offspring number. The primary strategy parameter to play
086     * with, which can be increased from its default value. Increasing the
087     * population size improves global search properties in exchange to speed.
088     * Speed decreases, as a rule, at most linearly with increasing population
089     * size. It is advisable to begin with the default small population size.
090     */
091    private int lambda; // population size
092    /**
093     * Covariance update mechanism, default is active CMA. isActiveCMA = true
094     * turns on "active CMA" with a negative update of the covariance matrix and
095     * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
096     * pos. def. and is numerically faster. Active CMA usually speeds up the
097     * adaptation.
098     */
099    private final boolean isActiveCMA;
100    /**
101     * Determines how often a new random offspring is generated in case it is
102     * not feasible / beyond the defined limits, default is 0.
103     */
104    private final int checkFeasableCount;
105    /**
106     * @see Sigma
107     */
108    private double[] inputSigma;
109    /** Number of objective variables/problem dimension */
110    private int dimension;
111    /**
112     * Defines the number of initial iterations, where the covariance matrix
113     * remains diagonal and the algorithm has internally linear time complexity.
114     * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
115     * this setting also exhibits linear space complexity. This can be
116     * particularly useful for dimension > 100.
117     * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
118     */
119    private int diagonalOnly;
120    /** Number of objective variables/problem dimension */
121    private boolean isMinimize = true;
122    /** Indicates whether statistic data is collected. */
123    private final boolean generateStatistics;
124
125    // termination criteria
126    /** Maximal number of iterations allowed. */
127    private final int maxIterations;
128    /** Limit for fitness value. */
129    private final double stopFitness;
130    /** Stop if x-changes larger stopTolUpX. */
131    private double stopTolUpX;
132    /** Stop if x-change smaller stopTolX. */
133    private double stopTolX;
134    /** Stop if fun-changes smaller stopTolFun. */
135    private double stopTolFun;
136    /** Stop if back fun-changes smaller stopTolHistFun. */
137    private double stopTolHistFun;
138
139    // selection strategy parameters
140    /** Number of parents/points for recombination. */
141    private int mu; //
142    /** log(mu + 0.5), stored for efficiency. */
143    private double logMu2;
144    /** Array for weighted recombination. */
145    private RealMatrix weights;
146    /** Variance-effectiveness of sum w_i x_i. */
147    private double mueff; //
148
149    // dynamic strategy parameters and constants
150    /** Overall standard deviation - search volume. */
151    private double sigma;
152    /** Cumulation constant. */
153    private double cc;
154    /** Cumulation constant for step-size. */
155    private double cs;
156    /** Damping for step-size. */
157    private double damps;
158    /** Learning rate for rank-one update. */
159    private double ccov1;
160    /** Learning rate for rank-mu update' */
161    private double ccovmu;
162    /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
163    private double chiN;
164    /** Learning rate for rank-one update - diagonalOnly */
165    private double ccov1Sep;
166    /** Learning rate for rank-mu update - diagonalOnly */
167    private double ccovmuSep;
168
169    // CMA internal values - updated each generation
170    /** Objective variables. */
171    private RealMatrix xmean;
172    /** Evolution path. */
173    private RealMatrix pc;
174    /** Evolution path for sigma. */
175    private RealMatrix ps;
176    /** Norm of ps, stored for efficiency. */
177    private double normps;
178    /** Coordinate system. */
179    private RealMatrix B;
180    /** Scaling. */
181    private RealMatrix D;
182    /** B*D, stored for efficiency. */
183    private RealMatrix BD;
184    /** Diagonal of sqrt(D), stored for efficiency. */
185    private RealMatrix diagD;
186    /** Covariance matrix. */
187    private RealMatrix C;
188    /** Diagonal of C, used for diagonalOnly. */
189    private RealMatrix diagC;
190    /** Number of iterations already performed. */
191    private int iterations;
192
193    /** History queue of best values. */
194    private double[] fitnessHistory;
195    /** Size of history queue of best values. */
196    private int historySize;
197
198    /** Random generator. */
199    private final RandomGenerator random;
200
201    /** History of sigma values. */
202    private final List<Double> statisticsSigmaHistory = new ArrayList<Double>();
203    /** History of mean matrix. */
204    private final List<RealMatrix> statisticsMeanHistory = new ArrayList<RealMatrix>();
205    /** History of fitness values. */
206    private final List<Double> statisticsFitnessHistory = new ArrayList<Double>();
207    /** History of D matrix. */
208    private final List<RealMatrix> statisticsDHistory = new ArrayList<RealMatrix>();
209
210    /**
211     * @param maxIterations Maximal number of iterations.
212     * @param stopFitness Whether to stop if objective function value is smaller than
213     * {@code stopFitness}.
214     * @param isActiveCMA Chooses the covariance matrix update method.
215     * @param diagonalOnly Number of initial iterations, where the covariance matrix
216     * remains diagonal.
217     * @param checkFeasableCount Determines how often new random objective variables are
218     * generated in case they are out of bounds.
219     * @param random Random generator.
220     * @param generateStatistics Whether statistic data is collected.
221     * @param checker Convergence checker.
222     *
223     * @since 3.1
224     */
225    public CMAESOptimizer(int maxIterations,
226                          double stopFitness,
227                          boolean isActiveCMA,
228                          int diagonalOnly,
229                          int checkFeasableCount,
230                          RandomGenerator random,
231                          boolean generateStatistics,
232                          ConvergenceChecker<PointValuePair> checker) {
233        super(checker);
234        this.maxIterations = maxIterations;
235        this.stopFitness = stopFitness;
236        this.isActiveCMA = isActiveCMA;
237        this.diagonalOnly = diagonalOnly;
238        this.checkFeasableCount = checkFeasableCount;
239        this.random = random;
240        this.generateStatistics = generateStatistics;
241    }
242
243    /**
244     * @return History of sigma values.
245     */
246    public List<Double> getStatisticsSigmaHistory() {
247        return statisticsSigmaHistory;
248    }
249
250    /**
251     * @return History of mean matrix.
252     */
253    public List<RealMatrix> getStatisticsMeanHistory() {
254        return statisticsMeanHistory;
255    }
256
257    /**
258     * @return History of fitness values.
259     */
260    public List<Double> getStatisticsFitnessHistory() {
261        return statisticsFitnessHistory;
262    }
263
264    /**
265     * @return History of D matrix.
266     */
267    public List<RealMatrix> getStatisticsDHistory() {
268        return statisticsDHistory;
269    }
270
271    /**
272     * Input sigma values.
273     * They define the initial coordinate-wise standard deviations for
274     * sampling new search points around the initial guess.
275     * It is suggested to set them to the estimated distance from the
276     * initial to the desired optimum.
277     * Small values induce the search to be more local (and very small
278     * values are more likely to find a local optimum close to the initial
279     * guess).
280     * Too small values might however lead to early termination.
281     */
282    public static class Sigma implements OptimizationData {
283        /** Sigma values. */
284        private final double[] sigma;
285
286        /**
287         * @param s Sigma values.
288         * @throws NotPositiveException if any of the array entries is smaller
289         * than zero.
290         */
291        public Sigma(double[] s)
292            throws NotPositiveException {
293            for (int i = 0; i < s.length; i++) {
294                if (s[i] < 0) {
295                    throw new NotPositiveException(s[i]);
296                }
297            }
298
299            sigma = s.clone();
300        }
301
302        /**
303         * @return the sigma values.
304         */
305        public double[] getSigma() {
306            return sigma.clone();
307        }
308    }
309
310    /**
311     * Population size.
312     * The number of offspring is the primary strategy parameter.
313     * In the absence of better clues, a good default could be an
314     * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
315     * number of optimized parameters.
316     * Increasing the population size improves global search properties
317     * at the expense of speed (which in general decreases at most
318     * linearly with increasing population size).
319     */
320    public static class PopulationSize implements OptimizationData {
321        /** Population size. */
322        private final int lambda;
323
324        /**
325         * @param size Population size.
326         * @throws NotStrictlyPositiveException if {@code size <= 0}.
327         */
328        public PopulationSize(int size)
329            throws NotStrictlyPositiveException {
330            if (size <= 0) {
331                throw new NotStrictlyPositiveException(size);
332            }
333            lambda = size;
334        }
335
336        /**
337         * @return the population size.
338         */
339        public int getPopulationSize() {
340            return lambda;
341        }
342    }
343
344    /**
345     * {@inheritDoc}
346     *
347     * @param optData Optimization data. In addition to those documented in
348     * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
349     * MultivariateOptimizer}, this method will register the following data:
350     * <ul>
351     *  <li>{@link Sigma}</li>
352     *  <li>{@link PopulationSize}</li>
353     * </ul>
354     * @return {@inheritDoc}
355     * @throws TooManyEvaluationsException if the maximal number of
356     * evaluations is exceeded.
357     * @throws DimensionMismatchException if the initial guess, target, and weight
358     * arguments have inconsistent dimensions.
359     */
360    @Override
361    public PointValuePair optimize(OptimizationData... optData)
362        throws TooManyEvaluationsException,
363               DimensionMismatchException {
364        // Set up base class and perform computation.
365        return super.optimize(optData);
366    }
367
368    /** {@inheritDoc} */
369    @Override
370    protected PointValuePair doOptimize() {
371         // -------------------- Initialization --------------------------------
372        isMinimize = getGoalType().equals(GoalType.MINIMIZE);
373        final FitnessFunction fitfun = new FitnessFunction();
374        final double[] guess = getStartPoint();
375        // number of objective variables/problem dimension
376        dimension = guess.length;
377        initializeCMA(guess);
378        iterations = 0;
379        ValuePenaltyPair valuePenalty = fitfun.value(guess);
380        double bestValue = valuePenalty.value+valuePenalty.penalty;
381        push(fitnessHistory, bestValue);
382        PointValuePair optimum
383            = new PointValuePair(getStartPoint(),
384                                 isMinimize ? bestValue : -bestValue);
385        PointValuePair lastResult = null;
386
387        // -------------------- Generation Loop --------------------------------
388
389        generationLoop:
390        for (iterations = 1; iterations <= maxIterations; iterations++) {
391            incrementIterationCount();
392
393            // Generate and evaluate lambda offspring
394            final RealMatrix arz = randn1(dimension, lambda);
395            final RealMatrix arx = zeros(dimension, lambda);
396            final double[] fitness = new double[lambda];
397            final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
398            // generate random offspring
399            for (int k = 0; k < lambda; k++) {
400                RealMatrix arxk = null;
401                for (int i = 0; i < checkFeasableCount + 1; i++) {
402                    if (diagonalOnly <= 0) {
403                        arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
404                                         .scalarMultiply(sigma)); // m + sig * Normal(0,C)
405                    } else {
406                        arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
407                                         .scalarMultiply(sigma));
408                    }
409                    if (i >= checkFeasableCount ||
410                        fitfun.isFeasible(arxk.getColumn(0))) {
411                        break;
412                    }
413                    // regenerate random arguments for row
414                    arz.setColumn(k, randn(dimension));
415                }
416                copyColumn(arxk, 0, arx, k);
417                try {
418                    valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
419                } catch (TooManyEvaluationsException e) {
420                    break generationLoop;
421                }
422            }
423
424            // Compute fitnesses by adding value and penalty after scaling by value range.
425            double valueRange = valueRange(valuePenaltyPairs);
426            for (int iValue=0;iValue<valuePenaltyPairs.length;iValue++) {
427                 fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty*valueRange;
428            }
429
430            // Sort by fitness and compute weighted mean into xmean
431            final int[] arindex = sortedIndices(fitness);
432            // Calculate new xmean, this is selection and recombination
433            final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
434            final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
435            xmean = bestArx.multiply(weights);
436            final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
437            final RealMatrix zmean = bestArz.multiply(weights);
438            final boolean hsig = updateEvolutionPaths(zmean, xold);
439            if (diagonalOnly <= 0) {
440                updateCovariance(hsig, bestArx, arz, arindex, xold);
441            } else {
442                updateCovarianceDiagonalOnly(hsig, bestArz);
443            }
444            // Adapt step size sigma - Eq. (5)
445            sigma *= FastMath.exp(FastMath.min(1, (normps/chiN - 1) * cs / damps));
446            final double bestFitness = fitness[arindex[0]];
447            final double worstFitness = fitness[arindex[arindex.length - 1]];
448            if (bestValue > bestFitness) {
449                bestValue = bestFitness;
450                lastResult = optimum;
451                optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
452                                             isMinimize ? bestFitness : -bestFitness);
453                if (getConvergenceChecker() != null && lastResult != null &&
454                    getConvergenceChecker().converged(iterations, optimum, lastResult)) {
455                    break generationLoop;
456                }
457            }
458            // handle termination criteria
459            // Break, if fitness is good enough
460            if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
461                break generationLoop;
462            }
463            final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
464            final double[] pcCol = pc.getColumn(0);
465            for (int i = 0; i < dimension; i++) {
466                if (sigma * FastMath.max(FastMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
467                    break;
468                }
469                if (i >= dimension - 1) {
470                    break generationLoop;
471                }
472            }
473            for (int i = 0; i < dimension; i++) {
474                if (sigma * sqrtDiagC[i] > stopTolUpX) {
475                    break generationLoop;
476                }
477            }
478            final double historyBest = min(fitnessHistory);
479            final double historyWorst = max(fitnessHistory);
480            if (iterations > 2 &&
481                FastMath.max(historyWorst, worstFitness) -
482                FastMath.min(historyBest, bestFitness) < stopTolFun) {
483                break generationLoop;
484            }
485            if (iterations > fitnessHistory.length &&
486                historyWorst - historyBest < stopTolHistFun) {
487                break generationLoop;
488            }
489            // condition number of the covariance matrix exceeds 1e14
490            if (max(diagD) / min(diagD) > 1e7) {
491                break generationLoop;
492            }
493            // user defined termination
494            if (getConvergenceChecker() != null) {
495                final PointValuePair current
496                    = new PointValuePair(bestArx.getColumn(0),
497                                         isMinimize ? bestFitness : -bestFitness);
498                if (lastResult != null &&
499                    getConvergenceChecker().converged(iterations, current, lastResult)) {
500                    break generationLoop;
501                    }
502                lastResult = current;
503            }
504            // Adjust step size in case of equal function values (flat fitness)
505            if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
506                sigma *= FastMath.exp(0.2 + cs / damps);
507            }
508            if (iterations > 2 && FastMath.max(historyWorst, bestFitness) -
509                FastMath.min(historyBest, bestFitness) == 0) {
510                sigma *= FastMath.exp(0.2 + cs / damps);
511            }
512            // store best in history
513            push(fitnessHistory,bestFitness);
514            if (generateStatistics) {
515                statisticsSigmaHistory.add(sigma);
516                statisticsFitnessHistory.add(bestFitness);
517                statisticsMeanHistory.add(xmean.transpose());
518                statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
519            }
520        }
521        return optimum;
522    }
523
524    /**
525     * Scans the list of (required and optional) optimization data that
526     * characterize the problem.
527     *
528     * @param optData Optimization data. The following data will be looked for:
529     * <ul>
530     *  <li>{@link Sigma}</li>
531     *  <li>{@link PopulationSize}</li>
532     * </ul>
533     */
534    @Override
535    protected void parseOptimizationData(OptimizationData... optData) {
536        // Allow base class to register its own data.
537        super.parseOptimizationData(optData);
538
539        // The existing values (as set by the previous call) are reused if
540        // not provided in the argument list.
541        for (OptimizationData data : optData) {
542            if (data instanceof Sigma) {
543                inputSigma = ((Sigma) data).getSigma();
544                continue;
545            }
546            if (data instanceof PopulationSize) {
547                lambda = ((PopulationSize) data).getPopulationSize();
548                continue;
549            }
550        }
551
552        checkParameters();
553    }
554
555    /**
556     * Checks dimensions and values of boundaries and inputSigma if defined.
557     */
558    private void checkParameters() {
559        final double[] init = getStartPoint();
560        final double[] lB = getLowerBound();
561        final double[] uB = getUpperBound();
562
563        if (inputSigma != null) {
564            if (inputSigma.length != init.length) {
565                throw new DimensionMismatchException(inputSigma.length, init.length);
566            }
567            for (int i = 0; i < init.length; i++) {
568                if (inputSigma[i] > uB[i] - lB[i]) {
569                    throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
570                }
571            }
572        }
573    }
574
575    /**
576     * Initialization of the dynamic search parameters
577     *
578     * @param guess Initial guess for the arguments of the fitness function.
579     */
580    private void initializeCMA(double[] guess) {
581        if (lambda <= 0) {
582            throw new NotStrictlyPositiveException(lambda);
583        }
584        // initialize sigma
585        final double[][] sigmaArray = new double[guess.length][1];
586        for (int i = 0; i < guess.length; i++) {
587            sigmaArray[i][0] = inputSigma[i];
588        }
589        final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
590        sigma = max(insigma); // overall standard deviation
591
592        // initialize termination criteria
593        stopTolUpX = 1e3 * max(insigma);
594        stopTolX = 1e-11 * max(insigma);
595        stopTolFun = 1e-12;
596        stopTolHistFun = 1e-13;
597
598        // initialize selection strategy parameters
599        mu = lambda / 2; // number of parents/points for recombination
600        logMu2 = FastMath.log(mu + 0.5);
601        weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
602        double sumw = 0;
603        double sumwq = 0;
604        for (int i = 0; i < mu; i++) {
605            double w = weights.getEntry(i, 0);
606            sumw += w;
607            sumwq += w * w;
608        }
609        weights = weights.scalarMultiply(1 / sumw);
610        mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
611
612        // initialize dynamic strategy parameters and constants
613        cc = (4 + mueff / dimension) /
614                (dimension + 4 + 2 * mueff / dimension);
615        cs = (mueff + 2) / (dimension + mueff + 3.);
616        damps = (1 + 2 * FastMath.max(0, FastMath.sqrt((mueff - 1) /
617                                                       (dimension + 1)) - 1)) *
618            FastMath.max(0.3,
619                         1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
620        ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
621        ccovmu = FastMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
622                              ((dimension + 2) * (dimension + 2) + mueff));
623        ccov1Sep = FastMath.min(1, ccov1 * (dimension + 1.5) / 3);
624        ccovmuSep = FastMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
625        chiN = FastMath.sqrt(dimension) *
626                (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
627        // intialize CMA internal values - updated each generation
628        xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
629        diagD = insigma.scalarMultiply(1 / sigma);
630        diagC = square(diagD);
631        pc = zeros(dimension, 1); // evolution paths for C and sigma
632        ps = zeros(dimension, 1); // B defines the coordinate system
633        normps = ps.getFrobeniusNorm();
634
635        B = eye(dimension, dimension);
636        D = ones(dimension, 1); // diagonal D defines the scaling
637        BD = times(B, repmat(diagD.transpose(), dimension, 1));
638        C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
639        historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
640        fitnessHistory = new double[historySize]; // history of fitness values
641        for (int i = 0; i < historySize; i++) {
642            fitnessHistory[i] = Double.MAX_VALUE;
643        }
644    }
645
646    /**
647     * Update of the evolution paths ps and pc.
648     *
649     * @param zmean Weighted row matrix of the gaussian random numbers generating
650     * the current offspring.
651     * @param xold xmean matrix of the previous generation.
652     * @return hsig flag indicating a small correction.
653     */
654    private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
655        ps = ps.scalarMultiply(1 - cs).add(
656                B.multiply(zmean).scalarMultiply(
657                        FastMath.sqrt(cs * (2 - cs) * mueff)));
658        normps = ps.getFrobeniusNorm();
659        final boolean hsig = normps /
660            FastMath.sqrt(1 - FastMath.pow(1 - cs, 2 * iterations)) /
661            chiN < 1.4 + 2 / ((double) dimension + 1);
662        pc = pc.scalarMultiply(1 - cc);
663        if (hsig) {
664            pc = pc.add(xmean.subtract(xold).scalarMultiply(FastMath.sqrt(cc * (2 - cc) * mueff) / sigma));
665        }
666        return hsig;
667    }
668
669    /**
670     * Update of the covariance matrix C for diagonalOnly > 0
671     *
672     * @param hsig Flag indicating a small correction.
673     * @param bestArz Fitness-sorted matrix of the gaussian random values of the
674     * current offspring.
675     */
676    private void updateCovarianceDiagonalOnly(boolean hsig,
677                                              final RealMatrix bestArz) {
678        // minor correction if hsig==false
679        double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
680        oldFac += 1 - ccov1Sep - ccovmuSep;
681        diagC = diagC.scalarMultiply(oldFac) // regard old matrix
682            .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
683            .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
684                 .scalarMultiply(ccovmuSep));
685        diagD = sqrt(diagC); // replaces eig(C)
686        if (diagonalOnly > 1 &&
687            iterations > diagonalOnly) {
688            // full covariance matrix from now on
689            diagonalOnly = 0;
690            B = eye(dimension, dimension);
691            BD = diag(diagD);
692            C = diag(diagC);
693        }
694    }
695
696    /**
697     * Update of the covariance matrix C.
698     *
699     * @param hsig Flag indicating a small correction.
700     * @param bestArx Fitness-sorted matrix of the argument vectors producing the
701     * current offspring.
702     * @param arz Unsorted matrix containing the gaussian random values of the
703     * current offspring.
704     * @param arindex Indices indicating the fitness-order of the current offspring.
705     * @param xold xmean matrix of the previous generation.
706     */
707    private void updateCovariance(boolean hsig, final RealMatrix bestArx,
708                                  final RealMatrix arz, final int[] arindex,
709                                  final RealMatrix xold) {
710        double negccov = 0;
711        if (ccov1 + ccovmu > 0) {
712            final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
713                .scalarMultiply(1 / sigma); // mu difference vectors
714            final RealMatrix roneu = pc.multiply(pc.transpose())
715                .scalarMultiply(ccov1); // rank one update
716            // minor correction if hsig==false
717            double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
718            oldFac += 1 - ccov1 - ccovmu;
719            if (isActiveCMA) {
720                // Adapt covariance matrix C active CMA
721                negccov = (1 - ccovmu) * 0.25 * mueff /
722                    (FastMath.pow(dimension + 2, 1.5) + 2 * mueff);
723                // keep at least 0.66 in all directions, small popsize are most
724                // critical
725                final double negminresidualvariance = 0.66;
726                // where to make up for the variance loss
727                final double negalphaold = 0.5;
728                // prepare vectors, compute negative updating matrix Cneg
729                final int[] arReverseIndex = reverse(arindex);
730                RealMatrix arzneg = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu));
731                RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
732                final int[] idxnorms = sortedIndices(arnorms.getRow(0));
733                final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
734                final int[] idxReverse = reverse(idxnorms);
735                final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
736                arnorms = divide(arnormsReverse, arnormsSorted);
737                final int[] idxInv = inverse(idxnorms);
738                final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
739                // check and set learning rate negccov
740                final double negcovMax = (1 - negminresidualvariance) /
741                    square(arnormsInv).multiply(weights).getEntry(0, 0);
742                if (negccov > negcovMax) {
743                    negccov = negcovMax;
744                }
745                arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
746                final RealMatrix artmp = BD.multiply(arzneg);
747                final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
748                oldFac += negalphaold * negccov;
749                C = C.scalarMultiply(oldFac)
750                    .add(roneu) // regard old matrix
751                    .add(arpos.scalarMultiply( // plus rank one update
752                                              ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
753                         .multiply(times(repmat(weights, 1, dimension),
754                                         arpos.transpose())))
755                    .subtract(Cneg.scalarMultiply(negccov));
756            } else {
757                // Adapt covariance matrix C - nonactive
758                C = C.scalarMultiply(oldFac) // regard old matrix
759                    .add(roneu) // plus rank one update
760                    .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
761                         .multiply(times(repmat(weights, 1, dimension),
762                                         arpos.transpose())));
763            }
764        }
765        updateBD(negccov);
766    }
767
768    /**
769     * Update B and D from C.
770     *
771     * @param negccov Negative covariance factor.
772     */
773    private void updateBD(double negccov) {
774        if (ccov1 + ccovmu + negccov > 0 &&
775            (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
776            // to achieve O(N^2)
777            C = triu(C, 0).add(triu(C, 1).transpose());
778            // enforce symmetry to prevent complex numbers
779            final EigenDecomposition eig = new EigenDecomposition(C);
780            B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
781            D = eig.getD();
782            diagD = diag(D);
783            if (min(diagD) <= 0) {
784                for (int i = 0; i < dimension; i++) {
785                    if (diagD.getEntry(i, 0) < 0) {
786                        diagD.setEntry(i, 0, 0);
787                    }
788                }
789                final double tfac = max(diagD) / 1e14;
790                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
791                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
792            }
793            if (max(diagD) > 1e14 * min(diagD)) {
794                final double tfac = max(diagD) / 1e14 - min(diagD);
795                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
796                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
797            }
798            diagC = diag(C);
799            diagD = sqrt(diagD); // D contains standard deviations now
800            BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
801        }
802    }
803
804    /**
805     * Pushes the current best fitness value in a history queue.
806     *
807     * @param vals History queue.
808     * @param val Current best fitness value.
809     */
810    private static void push(double[] vals, double val) {
811        for (int i = vals.length-1; i > 0; i--) {
812            vals[i] = vals[i-1];
813        }
814        vals[0] = val;
815    }
816
817    /**
818     * Sorts fitness values.
819     *
820     * @param doubles Array of values to be sorted.
821     * @return a sorted array of indices pointing into doubles.
822     */
823    private int[] sortedIndices(final double[] doubles) {
824        final DoubleIndex[] dis = new DoubleIndex[doubles.length];
825        for (int i = 0; i < doubles.length; i++) {
826            dis[i] = new DoubleIndex(doubles[i], i);
827        }
828        Arrays.sort(dis);
829        final int[] indices = new int[doubles.length];
830        for (int i = 0; i < doubles.length; i++) {
831            indices[i] = dis[i].index;
832        }
833        return indices;
834    }
835   /**
836     * Get range of values.
837     *
838     * @param vpPairs Array of valuePenaltyPairs to get range from.
839     * @return a double equal to maximum value minus minimum value.
840     */
841    private double valueRange(final ValuePenaltyPair[] vpPairs) {
842        double max = Double.NEGATIVE_INFINITY;
843        double min = Double.MAX_VALUE;
844        for (ValuePenaltyPair vpPair:vpPairs) {
845            if (vpPair.value > max) {
846                max = vpPair.value;
847            }
848            if (vpPair.value < min) {
849                min = vpPair.value;
850            }
851        }
852        return max-min;
853    }
854
855    /**
856     * Used to sort fitness values. Sorting is always in lower value first
857     * order.
858     */
859    private static class DoubleIndex implements Comparable<DoubleIndex> {
860        /** Value to compare. */
861        private final double value;
862        /** Index into sorted array. */
863        private final int index;
864
865        /**
866         * @param value Value to compare.
867         * @param index Index into sorted array.
868         */
869        DoubleIndex(double value, int index) {
870            this.value = value;
871            this.index = index;
872        }
873
874        /** {@inheritDoc} */
875        public int compareTo(DoubleIndex o) {
876            return Double.compare(value, o.value);
877        }
878
879        /** {@inheritDoc} */
880        @Override
881        public boolean equals(Object other) {
882
883            if (this == other) {
884                return true;
885            }
886
887            if (other instanceof DoubleIndex) {
888                return Double.compare(value, ((DoubleIndex) other).value) == 0;
889            }
890
891            return false;
892        }
893
894        /** {@inheritDoc} */
895        @Override
896        public int hashCode() {
897            long bits = Double.doubleToLongBits(value);
898            return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff);
899        }
900    }
901    /**
902     * Stores the value and penalty (for repair of out of bounds point).
903     */
904    private static class ValuePenaltyPair {
905        /** Objective function value. */
906        private double value;
907        /** Penalty value for repair of out out of bounds points. */
908        private double penalty;
909
910        /**
911         * @param value Function value.
912         * @param penalty Out-of-bounds penalty.
913        */
914        ValuePenaltyPair(final double value, final double penalty) {
915            this.value   = value;
916            this.penalty = penalty;
917        }
918    }
919
920
921    /**
922     * Normalizes fitness values to the range [0,1]. Adds a penalty to the
923     * fitness value if out of range.
924     */
925    private class FitnessFunction {
926        /**
927         * Flag indicating whether the objective variables are forced into their
928         * bounds if defined
929         */
930        private final boolean isRepairMode;
931
932        /** Simple constructor.
933         */
934        FitnessFunction() {
935            isRepairMode = true;
936        }
937
938        /**
939         * @param point Normalized objective variables.
940         * @return the objective value + penalty for violated bounds.
941         */
942        public ValuePenaltyPair value(final double[] point) {
943            double value;
944            double penalty=0.0;
945            if (isRepairMode) {
946                double[] repaired = repair(point);
947                value = CMAESOptimizer.this.computeObjectiveValue(repaired);
948                penalty =  penalty(point, repaired);
949            } else {
950                value = CMAESOptimizer.this.computeObjectiveValue(point);
951            }
952            value = isMinimize ? value : -value;
953            penalty = isMinimize ? penalty : -penalty;
954            return new ValuePenaltyPair(value,penalty);
955        }
956
957        /**
958         * @param x Normalized objective variables.
959         * @return {@code true} if in bounds.
960         */
961        public boolean isFeasible(final double[] x) {
962            final double[] lB = CMAESOptimizer.this.getLowerBound();
963            final double[] uB = CMAESOptimizer.this.getUpperBound();
964
965            for (int i = 0; i < x.length; i++) {
966                if (x[i] < lB[i]) {
967                    return false;
968                }
969                if (x[i] > uB[i]) {
970                    return false;
971                }
972            }
973            return true;
974        }
975
976        /**
977         * @param x Normalized objective variables.
978         * @return the repaired (i.e. all in bounds) objective variables.
979         */
980        private double[] repair(final double[] x) {
981            final double[] lB = CMAESOptimizer.this.getLowerBound();
982            final double[] uB = CMAESOptimizer.this.getUpperBound();
983
984            final double[] repaired = new double[x.length];
985            for (int i = 0; i < x.length; i++) {
986                if (x[i] < lB[i]) {
987                    repaired[i] = lB[i];
988                } else if (x[i] > uB[i]) {
989                    repaired[i] = uB[i];
990                } else {
991                    repaired[i] = x[i];
992                }
993            }
994            return repaired;
995        }
996
997        /**
998         * @param x Normalized objective variables.
999         * @param repaired Repaired objective variables.
1000         * @return Penalty value according to the violation of the bounds.
1001         */
1002        private double penalty(final double[] x, final double[] repaired) {
1003            double penalty = 0;
1004            for (int i = 0; i < x.length; i++) {
1005                double diff = FastMath.abs(x[i] - repaired[i]);
1006                penalty += diff;
1007            }
1008            return isMinimize ? penalty : -penalty;
1009        }
1010    }
1011
1012    // -----Matrix utility functions similar to the Matlab build in functions------
1013
1014    /**
1015     * @param m Input matrix
1016     * @return Matrix representing the element-wise logarithm of m.
1017     */
1018    private static RealMatrix log(final RealMatrix m) {
1019        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1020        for (int r = 0; r < m.getRowDimension(); r++) {
1021            for (int c = 0; c < m.getColumnDimension(); c++) {
1022                d[r][c] = FastMath.log(m.getEntry(r, c));
1023            }
1024        }
1025        return new Array2DRowRealMatrix(d, false);
1026    }
1027
1028    /**
1029     * @param m Input matrix.
1030     * @return Matrix representing the element-wise square root of m.
1031     */
1032    private static RealMatrix sqrt(final RealMatrix m) {
1033        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1034        for (int r = 0; r < m.getRowDimension(); r++) {
1035            for (int c = 0; c < m.getColumnDimension(); c++) {
1036                d[r][c] = FastMath.sqrt(m.getEntry(r, c));
1037            }
1038        }
1039        return new Array2DRowRealMatrix(d, false);
1040    }
1041
1042    /**
1043     * @param m Input matrix.
1044     * @return Matrix representing the element-wise square of m.
1045     */
1046    private static RealMatrix square(final RealMatrix m) {
1047        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1048        for (int r = 0; r < m.getRowDimension(); r++) {
1049            for (int c = 0; c < m.getColumnDimension(); c++) {
1050                double e = m.getEntry(r, c);
1051                d[r][c] = e * e;
1052            }
1053        }
1054        return new Array2DRowRealMatrix(d, false);
1055    }
1056
1057    /**
1058     * @param m Input matrix 1.
1059     * @param n Input matrix 2.
1060     * @return the matrix where the elements of m and n are element-wise multiplied.
1061     */
1062    private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1063        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1064        for (int r = 0; r < m.getRowDimension(); r++) {
1065            for (int c = 0; c < m.getColumnDimension(); c++) {
1066                d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1067            }
1068        }
1069        return new Array2DRowRealMatrix(d, false);
1070    }
1071
1072    /**
1073     * @param m Input matrix 1.
1074     * @param n Input matrix 2.
1075     * @return Matrix where the elements of m and n are element-wise divided.
1076     */
1077    private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1078        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1079        for (int r = 0; r < m.getRowDimension(); r++) {
1080            for (int c = 0; c < m.getColumnDimension(); c++) {
1081                d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1082            }
1083        }
1084        return new Array2DRowRealMatrix(d, false);
1085    }
1086
1087    /**
1088     * @param m Input matrix.
1089     * @param cols Columns to select.
1090     * @return Matrix representing the selected columns.
1091     */
1092    private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1093        final double[][] d = new double[m.getRowDimension()][cols.length];
1094        for (int r = 0; r < m.getRowDimension(); r++) {
1095            for (int c = 0; c < cols.length; c++) {
1096                d[r][c] = m.getEntry(r, cols[c]);
1097            }
1098        }
1099        return new Array2DRowRealMatrix(d, false);
1100    }
1101
1102    /**
1103     * @param m Input matrix.
1104     * @param k Diagonal position.
1105     * @return Upper triangular part of matrix.
1106     */
1107    private static RealMatrix triu(final RealMatrix m, int k) {
1108        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1109        for (int r = 0; r < m.getRowDimension(); r++) {
1110            for (int c = 0; c < m.getColumnDimension(); c++) {
1111                d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1112            }
1113        }
1114        return new Array2DRowRealMatrix(d, false);
1115    }
1116
1117    /**
1118     * @param m Input matrix.
1119     * @return Row matrix representing the sums of the rows.
1120     */
1121    private static RealMatrix sumRows(final RealMatrix m) {
1122        final double[][] d = new double[1][m.getColumnDimension()];
1123        for (int c = 0; c < m.getColumnDimension(); c++) {
1124            double sum = 0;
1125            for (int r = 0; r < m.getRowDimension(); r++) {
1126                sum += m.getEntry(r, c);
1127            }
1128            d[0][c] = sum;
1129        }
1130        return new Array2DRowRealMatrix(d, false);
1131    }
1132
1133    /**
1134     * @param m Input matrix.
1135     * @return the diagonal n-by-n matrix if m is a column matrix or the column
1136     * matrix representing the diagonal if m is a n-by-n matrix.
1137     */
1138    private static RealMatrix diag(final RealMatrix m) {
1139        if (m.getColumnDimension() == 1) {
1140            final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1141            for (int i = 0; i < m.getRowDimension(); i++) {
1142                d[i][i] = m.getEntry(i, 0);
1143            }
1144            return new Array2DRowRealMatrix(d, false);
1145        } else {
1146            final double[][] d = new double[m.getRowDimension()][1];
1147            for (int i = 0; i < m.getColumnDimension(); i++) {
1148                d[i][0] = m.getEntry(i, i);
1149            }
1150            return new Array2DRowRealMatrix(d, false);
1151        }
1152    }
1153
1154    /**
1155     * Copies a column from m1 to m2.
1156     *
1157     * @param m1 Source matrix.
1158     * @param col1 Source column.
1159     * @param m2 Target matrix.
1160     * @param col2 Target column.
1161     */
1162    private static void copyColumn(final RealMatrix m1, int col1,
1163                                   RealMatrix m2, int col2) {
1164        for (int i = 0; i < m1.getRowDimension(); i++) {
1165            m2.setEntry(i, col2, m1.getEntry(i, col1));
1166        }
1167    }
1168
1169    /**
1170     * @param n Number of rows.
1171     * @param m Number of columns.
1172     * @return n-by-m matrix filled with 1.
1173     */
1174    private static RealMatrix ones(int n, int m) {
1175        final double[][] d = new double[n][m];
1176        for (int r = 0; r < n; r++) {
1177            Arrays.fill(d[r], 1);
1178        }
1179        return new Array2DRowRealMatrix(d, false);
1180    }
1181
1182    /**
1183     * @param n Number of rows.
1184     * @param m Number of columns.
1185     * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1186     * the diagonal.
1187     */
1188    private static RealMatrix eye(int n, int m) {
1189        final double[][] d = new double[n][m];
1190        for (int r = 0; r < n; r++) {
1191            if (r < m) {
1192                d[r][r] = 1;
1193            }
1194        }
1195        return new Array2DRowRealMatrix(d, false);
1196    }
1197
1198    /**
1199     * @param n Number of rows.
1200     * @param m Number of columns.
1201     * @return n-by-m matrix of zero values.
1202     */
1203    private static RealMatrix zeros(int n, int m) {
1204        return new Array2DRowRealMatrix(n, m);
1205    }
1206
1207    /**
1208     * @param mat Input matrix.
1209     * @param n Number of row replicates.
1210     * @param m Number of column replicates.
1211     * @return a matrix which replicates the input matrix in both directions.
1212     */
1213    private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1214        final int rd = mat.getRowDimension();
1215        final int cd = mat.getColumnDimension();
1216        final double[][] d = new double[n * rd][m * cd];
1217        for (int r = 0; r < n * rd; r++) {
1218            for (int c = 0; c < m * cd; c++) {
1219                d[r][c] = mat.getEntry(r % rd, c % cd);
1220            }
1221        }
1222        return new Array2DRowRealMatrix(d, false);
1223    }
1224
1225    /**
1226     * @param start Start value.
1227     * @param end End value.
1228     * @param step Step size.
1229     * @return a sequence as column matrix.
1230     */
1231    private static RealMatrix sequence(double start, double end, double step) {
1232        final int size = (int) ((end - start) / step + 1);
1233        final double[][] d = new double[size][1];
1234        double value = start;
1235        for (int r = 0; r < size; r++) {
1236            d[r][0] = value;
1237            value += step;
1238        }
1239        return new Array2DRowRealMatrix(d, false);
1240    }
1241
1242    /**
1243     * @param m Input matrix.
1244     * @return the maximum of the matrix element values.
1245     */
1246    private static double max(final RealMatrix m) {
1247        double max = -Double.MAX_VALUE;
1248        for (int r = 0; r < m.getRowDimension(); r++) {
1249            for (int c = 0; c < m.getColumnDimension(); c++) {
1250                double e = m.getEntry(r, c);
1251                if (max < e) {
1252                    max = e;
1253                }
1254            }
1255        }
1256        return max;
1257    }
1258
1259    /**
1260     * @param m Input matrix.
1261     * @return the minimum of the matrix element values.
1262     */
1263    private static double min(final RealMatrix m) {
1264        double min = Double.MAX_VALUE;
1265        for (int r = 0; r < m.getRowDimension(); r++) {
1266            for (int c = 0; c < m.getColumnDimension(); c++) {
1267                double e = m.getEntry(r, c);
1268                if (min > e) {
1269                    min = e;
1270                }
1271            }
1272        }
1273        return min;
1274    }
1275
1276    /**
1277     * @param m Input array.
1278     * @return the maximum of the array values.
1279     */
1280    private static double max(final double[] m) {
1281        double max = -Double.MAX_VALUE;
1282        for (int r = 0; r < m.length; r++) {
1283            if (max < m[r]) {
1284                max = m[r];
1285            }
1286        }
1287        return max;
1288    }
1289
1290    /**
1291     * @param m Input array.
1292     * @return the minimum of the array values.
1293     */
1294    private static double min(final double[] m) {
1295        double min = Double.MAX_VALUE;
1296        for (int r = 0; r < m.length; r++) {
1297            if (min > m[r]) {
1298                min = m[r];
1299            }
1300        }
1301        return min;
1302    }
1303
1304    /**
1305     * @param indices Input index array.
1306     * @return the inverse of the mapping defined by indices.
1307     */
1308    private static int[] inverse(final int[] indices) {
1309        final int[] inverse = new int[indices.length];
1310        for (int i = 0; i < indices.length; i++) {
1311            inverse[indices[i]] = i;
1312        }
1313        return inverse;
1314    }
1315
1316    /**
1317     * @param indices Input index array.
1318     * @return the indices in inverse order (last is first).
1319     */
1320    private static int[] reverse(final int[] indices) {
1321        final int[] reverse = new int[indices.length];
1322        for (int i = 0; i < indices.length; i++) {
1323            reverse[i] = indices[indices.length - i - 1];
1324        }
1325        return reverse;
1326    }
1327
1328    /**
1329     * @param size Length of random array.
1330     * @return an array of Gaussian random numbers.
1331     */
1332    private double[] randn(int size) {
1333        final double[] randn = new double[size];
1334        for (int i = 0; i < size; i++) {
1335            randn[i] = random.nextGaussian();
1336        }
1337        return randn;
1338    }
1339
1340    /**
1341     * @param size Number of rows.
1342     * @param popSize Population size.
1343     * @return a 2-dimensional matrix of Gaussian random numbers.
1344     */
1345    private RealMatrix randn1(int size, int popSize) {
1346        final double[][] d = new double[size][popSize];
1347        for (int r = 0; r < size; r++) {
1348            for (int c = 0; c < popSize; c++) {
1349                d[r][c] = random.nextGaussian();
1350            }
1351        }
1352        return new Array2DRowRealMatrix(d, false);
1353    }
1354}