001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018package org.apache.commons.math4.optim.nonlinear.scalar.noderiv;
019
020import java.util.ArrayList;
021import java.util.Arrays;
022import java.util.List;
023
024import org.apache.commons.math4.exception.DimensionMismatchException;
025import org.apache.commons.math4.exception.NotPositiveException;
026import org.apache.commons.math4.exception.NotStrictlyPositiveException;
027import org.apache.commons.math4.exception.OutOfRangeException;
028import org.apache.commons.math4.exception.TooManyEvaluationsException;
029import org.apache.commons.math4.linear.Array2DRowRealMatrix;
030import org.apache.commons.math4.linear.EigenDecomposition;
031import org.apache.commons.math4.linear.MatrixUtils;
032import org.apache.commons.math4.linear.RealMatrix;
033import org.apache.commons.math4.optim.ConvergenceChecker;
034import org.apache.commons.math4.optim.OptimizationData;
035import org.apache.commons.math4.optim.PointValuePair;
036import org.apache.commons.math4.optim.nonlinear.scalar.GoalType;
037import org.apache.commons.math4.optim.nonlinear.scalar.MultivariateOptimizer;
038import org.apache.commons.rng.UniformRandomProvider;
039import org.apache.commons.statistics.distribution.ContinuousDistribution;
040import org.apache.commons.statistics.distribution.NormalDistribution;
041import org.apache.commons.math4.util.FastMath;
042
043/**
044 * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
045 * for non-linear, non-convex, non-smooth, global function minimization.
046 * <p>
047 * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
048 * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
049 * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
050 * optima, outlier, etc.) of the objective function. Like a
051 * quasi-Newton method, the CMA-ES learns and applies a variable metric
052 * on the underlying search space. Unlike a quasi-Newton method, the
053 * CMA-ES neither estimates nor uses gradients, making it considerably more
054 * reliable in terms of finding a good, or even close to optimal, solution.
055 * <p>
056 * In general, on smooth objective functions the CMA-ES is roughly ten times
057 * slower than BFGS (counting objective function evaluations, no gradients provided).
058 * For up to <code>N=10</code> variables also the derivative-free simplex
059 * direct search method (Nelder and Mead) can be faster, but it is
060 * far less reliable than CMA-ES.
061 * <p>
062 * The CMA-ES is particularly well suited for non-separable
063 * and/or badly conditioned problems. To observe the advantage of CMA compared
064 * to a conventional evolution strategy, it will usually take about
065 * <code>30 N</code> function evaluations. On difficult problems the complete
066 * optimization (a single run) is expected to take <em>roughly</em> between
067 * <code>30 N</code> and <code>300 N<sup>2</sup></code>
068 * function evaluations.
069 * <p>
070 * This implementation is translated and adapted from the Matlab version
071 * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
072 * <p>
073 * For more information, please refer to the following links:
074 * <ul>
075 *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
076 *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
077 *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
078 * </ul>
079 *
080 * @since 3.0
081 */
082public class CMAESOptimizer
083    extends MultivariateOptimizer {
084    // global search parameters
085    /**
086     * Population size, offspring number. The primary strategy parameter to play
087     * with, which can be increased from its default value. Increasing the
088     * population size improves global search properties in exchange to speed.
089     * Speed decreases, as a rule, at most linearly with increasing population
090     * size. It is advisable to begin with the default small population size.
091     */
092    private int lambda; // population size
093    /**
094     * Covariance update mechanism, default is active CMA. isActiveCMA = true
095     * turns on "active CMA" with a negative update of the covariance matrix and
096     * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
097     * pos. def. and is numerically faster. Active CMA usually speeds up the
098     * adaptation.
099     */
100    private final boolean isActiveCMA;
101    /**
102     * Determines how often a new random offspring is generated in case it is
103     * not feasible / beyond the defined limits, default is 0.
104     */
105    private final int checkFeasableCount;
106    /**
107     * @see Sigma
108     */
109    private double[] inputSigma;
110    /** Number of objective variables/problem dimension */
111    private int dimension;
112    /**
113     * Defines the number of initial iterations, where the covariance matrix
114     * remains diagonal and the algorithm has internally linear time complexity.
115     * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
116     * this setting also exhibits linear space complexity. This can be
117     * particularly useful for dimension > 100.
118     * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
119     */
120    private int diagonalOnly;
121    /** Number of objective variables/problem dimension */
122    private boolean isMinimize = true;
123    /** Indicates whether statistic data is collected. */
124    private final boolean generateStatistics;
125
126    // termination criteria
127    /** Maximal number of iterations allowed. */
128    private final int maxIterations;
129    /** Limit for fitness value. */
130    private final double stopFitness;
131    /** Stop if x-changes larger stopTolUpX. */
132    private double stopTolUpX;
133    /** Stop if x-change smaller stopTolX. */
134    private double stopTolX;
135    /** Stop if fun-changes smaller stopTolFun. */
136    private double stopTolFun;
137    /** Stop if back fun-changes smaller stopTolHistFun. */
138    private double stopTolHistFun;
139
140    // selection strategy parameters
141    /** Number of parents/points for recombination. */
142    private int mu; //
143    /** log(mu + 0.5), stored for efficiency. */
144    private double logMu2;
145    /** Array for weighted recombination. */
146    private RealMatrix weights;
147    /** Variance-effectiveness of sum w_i x_i. */
148    private double mueff; //
149
150    // dynamic strategy parameters and constants
151    /** Overall standard deviation - search volume. */
152    private double sigma;
153    /** Cumulation constant. */
154    private double cc;
155    /** Cumulation constant for step-size. */
156    private double cs;
157    /** Damping for step-size. */
158    private double damps;
159    /** Learning rate for rank-one update. */
160    private double ccov1;
161    /** Learning rate for rank-mu update' */
162    private double ccovmu;
163    /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
164    private double chiN;
165    /** Learning rate for rank-one update - diagonalOnly */
166    private double ccov1Sep;
167    /** Learning rate for rank-mu update - diagonalOnly */
168    private double ccovmuSep;
169
170    // CMA internal values - updated each generation
171    /** Objective variables. */
172    private RealMatrix xmean;
173    /** Evolution path. */
174    private RealMatrix pc;
175    /** Evolution path for sigma. */
176    private RealMatrix ps;
177    /** Norm of ps, stored for efficiency. */
178    private double normps;
179    /** Coordinate system. */
180    private RealMatrix B;
181    /** Scaling. */
182    private RealMatrix D;
183    /** B*D, stored for efficiency. */
184    private RealMatrix BD;
185    /** Diagonal of sqrt(D), stored for efficiency. */
186    private RealMatrix diagD;
187    /** Covariance matrix. */
188    private RealMatrix C;
189    /** Diagonal of C, used for diagonalOnly. */
190    private RealMatrix diagC;
191    /** Number of iterations already performed. */
192    private int iterations;
193
194    /** History queue of best values. */
195    private double[] fitnessHistory;
196    /** Size of history queue of best values. */
197    private int historySize;
198
199    /** Gaussian sampler. */
200    private final ContinuousDistribution.Sampler random;
201
202    /** History of sigma values. */
203    private final List<Double> statisticsSigmaHistory = new ArrayList<>();
204    /** History of mean matrix. */
205    private final List<RealMatrix> statisticsMeanHistory = new ArrayList<>();
206    /** History of fitness values. */
207    private final List<Double> statisticsFitnessHistory = new ArrayList<>();
208    /** History of D matrix. */
209    private final List<RealMatrix> statisticsDHistory = new ArrayList<>();
210
211    /**
212     * @param maxIterations Maximal number of iterations.
213     * @param stopFitness Whether to stop if objective function value is smaller than
214     * {@code stopFitness}.
215     * @param isActiveCMA Chooses the covariance matrix update method.
216     * @param diagonalOnly Number of initial iterations, where the covariance matrix
217     * remains diagonal.
218     * @param checkFeasableCount Determines how often new random objective variables are
219     * generated in case they are out of bounds.
220     * @param rng Random generator.
221     * @param generateStatistics Whether statistic data is collected.
222     * @param checker Convergence checker.
223     *
224     * @since 3.1
225     */
226    public CMAESOptimizer(int maxIterations,
227                          double stopFitness,
228                          boolean isActiveCMA,
229                          int diagonalOnly,
230                          int checkFeasableCount,
231                          UniformRandomProvider rng,
232                          boolean generateStatistics,
233                          ConvergenceChecker<PointValuePair> checker) {
234        super(checker);
235        this.maxIterations = maxIterations;
236        this.stopFitness = stopFitness;
237        this.isActiveCMA = isActiveCMA;
238        this.diagonalOnly = diagonalOnly;
239        this.checkFeasableCount = checkFeasableCount;
240        this.random = new NormalDistribution(0, 1).createSampler(rng);
241        this.generateStatistics = generateStatistics;
242    }
243
244    /**
245     * @return History of sigma values.
246     */
247    public List<Double> getStatisticsSigmaHistory() {
248        return statisticsSigmaHistory;
249    }
250
251    /**
252     * @return History of mean matrix.
253     */
254    public List<RealMatrix> getStatisticsMeanHistory() {
255        return statisticsMeanHistory;
256    }
257
258    /**
259     * @return History of fitness values.
260     */
261    public List<Double> getStatisticsFitnessHistory() {
262        return statisticsFitnessHistory;
263    }
264
265    /**
266     * @return History of D matrix.
267     */
268    public List<RealMatrix> getStatisticsDHistory() {
269        return statisticsDHistory;
270    }
271
272    /**
273     * Input sigma values.
274     * They define the initial coordinate-wise standard deviations for
275     * sampling new search points around the initial guess.
276     * It is suggested to set them to the estimated distance from the
277     * initial to the desired optimum.
278     * Small values induce the search to be more local (and very small
279     * values are more likely to find a local optimum close to the initial
280     * guess).
281     * Too small values might however lead to early termination.
282     */
283    public static class Sigma implements OptimizationData {
284        /** Sigma values. */
285        private final double[] sigma;
286
287        /**
288         * @param s Sigma values.
289         * @throws NotPositiveException if any of the array entries is smaller
290         * than zero.
291         */
292        public Sigma(double[] s)
293            throws NotPositiveException {
294            for (int i = 0; i < s.length; i++) {
295                if (s[i] < 0) {
296                    throw new NotPositiveException(s[i]);
297                }
298            }
299
300            sigma = s.clone();
301        }
302
303        /**
304         * @return the sigma values.
305         */
306        public double[] getSigma() {
307            return sigma.clone();
308        }
309    }
310
311    /**
312     * Population size.
313     * The number of offspring is the primary strategy parameter.
314     * In the absence of better clues, a good default could be an
315     * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the
316     * number of optimized parameters.
317     * Increasing the population size improves global search properties
318     * at the expense of speed (which in general decreases at most
319     * linearly with increasing population size).
320     */
321    public static class PopulationSize implements OptimizationData {
322        /** Population size. */
323        private final int lambda;
324
325        /**
326         * @param size Population size.
327         * @throws NotStrictlyPositiveException if {@code size <= 0}.
328         */
329        public PopulationSize(int size)
330            throws NotStrictlyPositiveException {
331            if (size <= 0) {
332                throw new NotStrictlyPositiveException(size);
333            }
334            lambda = size;
335        }
336
337        /**
338         * @return the population size.
339         */
340        public int getPopulationSize() {
341            return lambda;
342        }
343    }
344
345    /**
346     * {@inheritDoc}
347     *
348     * @param optData Optimization data. In addition to those documented in
349     * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
350     * MultivariateOptimizer}, this method will register the following data:
351     * <ul>
352     *  <li>{@link Sigma}</li>
353     *  <li>{@link PopulationSize}</li>
354     * </ul>
355     * @return {@inheritDoc}
356     * @throws TooManyEvaluationsException if the maximal number of
357     * evaluations is exceeded.
358     * @throws DimensionMismatchException if the initial guess, target, and weight
359     * arguments have inconsistent dimensions.
360     */
361    @Override
362    public PointValuePair optimize(OptimizationData... optData)
363        throws TooManyEvaluationsException,
364               DimensionMismatchException {
365        // Set up base class and perform computation.
366        return super.optimize(optData);
367    }
368
369    /** {@inheritDoc} */
370    @Override
371    protected PointValuePair doOptimize() {
372         // -------------------- Initialization --------------------------------
373        isMinimize = getGoalType().equals(GoalType.MINIMIZE);
374        final FitnessFunction fitfun = new FitnessFunction();
375        final double[] guess = getStartPoint();
376        // number of objective variables/problem dimension
377        dimension = guess.length;
378        initializeCMA(guess);
379        iterations = 0;
380        ValuePenaltyPair valuePenalty = fitfun.value(guess);
381        double bestValue = valuePenalty.value+valuePenalty.penalty;
382        push(fitnessHistory, bestValue);
383        PointValuePair optimum
384            = new PointValuePair(getStartPoint(),
385                                 isMinimize ? bestValue : -bestValue);
386        PointValuePair lastResult = null;
387
388        // -------------------- Generation Loop --------------------------------
389
390        generationLoop:
391        for (iterations = 1; iterations <= maxIterations; iterations++) {
392            incrementIterationCount();
393
394            // Generate and evaluate lambda offspring
395            final RealMatrix arz = randn1(dimension, lambda);
396            final RealMatrix arx = zeros(dimension, lambda);
397            final double[] fitness = new double[lambda];
398            final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
399            // generate random offspring
400            for (int k = 0; k < lambda; k++) {
401                RealMatrix arxk = null;
402                for (int i = 0; i < checkFeasableCount + 1; i++) {
403                    if (diagonalOnly <= 0) {
404                        arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
405                                         .scalarMultiply(sigma)); // m + sig * Normal(0,C)
406                    } else {
407                        arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
408                                         .scalarMultiply(sigma));
409                    }
410                    if (i >= checkFeasableCount ||
411                        fitfun.isFeasible(arxk.getColumn(0))) {
412                        break;
413                    }
414                    // regenerate random arguments for row
415                    arz.setColumn(k, randn(dimension));
416                }
417                copyColumn(arxk, 0, arx, k);
418                try {
419                    valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
420                } catch (TooManyEvaluationsException e) {
421                    break generationLoop;
422                }
423            }
424
425            // Compute fitnesses by adding value and penalty after scaling by value range.
426            double valueRange = valueRange(valuePenaltyPairs);
427            for (int iValue=0;iValue<valuePenaltyPairs.length;iValue++) {
428                 fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty*valueRange;
429            }
430
431            // Sort by fitness and compute weighted mean into xmean
432            final int[] arindex = sortedIndices(fitness);
433            // Calculate new xmean, this is selection and recombination
434            final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
435            final RealMatrix bestArx = selectColumns(arx, Arrays.copyOf(arindex, mu));
436            xmean = bestArx.multiply(weights);
437            final RealMatrix bestArz = selectColumns(arz, Arrays.copyOf(arindex, mu));
438            final RealMatrix zmean = bestArz.multiply(weights);
439            final boolean hsig = updateEvolutionPaths(zmean, xold);
440            if (diagonalOnly <= 0) {
441                updateCovariance(hsig, bestArx, arz, arindex, xold);
442            } else {
443                updateCovarianceDiagonalOnly(hsig, bestArz);
444            }
445            // Adapt step size sigma - Eq. (5)
446            sigma *= FastMath.exp(FastMath.min(1, (normps/chiN - 1) * cs / damps));
447            final double bestFitness = fitness[arindex[0]];
448            final double worstFitness = fitness[arindex[arindex.length - 1]];
449            if (bestValue > bestFitness) {
450                bestValue = bestFitness;
451                lastResult = optimum;
452                optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
453                                             isMinimize ? bestFitness : -bestFitness);
454                if (getConvergenceChecker() != null && lastResult != null &&
455                    getConvergenceChecker().converged(iterations, optimum, lastResult)) {
456                    break generationLoop;
457                }
458            }
459            // handle termination criteria
460            // Break, if fitness is good enough
461            if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
462                break generationLoop;
463            }
464            final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
465            final double[] pcCol = pc.getColumn(0);
466            for (int i = 0; i < dimension; i++) {
467                if (sigma * FastMath.max(FastMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
468                    break;
469                }
470                if (i >= dimension - 1) {
471                    break generationLoop;
472                }
473            }
474            for (int i = 0; i < dimension; i++) {
475                if (sigma * sqrtDiagC[i] > stopTolUpX) {
476                    break generationLoop;
477                }
478            }
479            final double historyBest = min(fitnessHistory);
480            final double historyWorst = max(fitnessHistory);
481            if (iterations > 2 &&
482                FastMath.max(historyWorst, worstFitness) -
483                FastMath.min(historyBest, bestFitness) < stopTolFun) {
484                break generationLoop;
485            }
486            if (iterations > fitnessHistory.length &&
487                historyWorst - historyBest < stopTolHistFun) {
488                break generationLoop;
489            }
490            // condition number of the covariance matrix exceeds 1e14
491            if (max(diagD) / min(diagD) > 1e7) {
492                break generationLoop;
493            }
494            // user defined termination
495            if (getConvergenceChecker() != null) {
496                final PointValuePair current
497                    = new PointValuePair(bestArx.getColumn(0),
498                                         isMinimize ? bestFitness : -bestFitness);
499                if (lastResult != null &&
500                    getConvergenceChecker().converged(iterations, current, lastResult)) {
501                    break generationLoop;
502                    }
503                lastResult = current;
504            }
505            // Adjust step size in case of equal function values (flat fitness)
506            if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
507                sigma *= FastMath.exp(0.2 + cs / damps);
508            }
509            if (iterations > 2 && FastMath.max(historyWorst, bestFitness) -
510                FastMath.min(historyBest, bestFitness) == 0) {
511                sigma *= FastMath.exp(0.2 + cs / damps);
512            }
513            // store best in history
514            push(fitnessHistory,bestFitness);
515            if (generateStatistics) {
516                statisticsSigmaHistory.add(sigma);
517                statisticsFitnessHistory.add(bestFitness);
518                statisticsMeanHistory.add(xmean.transpose());
519                statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
520            }
521        }
522        return optimum;
523    }
524
525    /**
526     * Scans the list of (required and optional) optimization data that
527     * characterize the problem.
528     *
529     * @param optData Optimization data. The following data will be looked for:
530     * <ul>
531     *  <li>{@link Sigma}</li>
532     *  <li>{@link PopulationSize}</li>
533     * </ul>
534     */
535    @Override
536    protected void parseOptimizationData(OptimizationData... optData) {
537        // Allow base class to register its own data.
538        super.parseOptimizationData(optData);
539
540        // The existing values (as set by the previous call) are reused if
541        // not provided in the argument list.
542        for (OptimizationData data : optData) {
543            if (data instanceof Sigma) {
544                inputSigma = ((Sigma) data).getSigma();
545                continue;
546            }
547            if (data instanceof PopulationSize) {
548                lambda = ((PopulationSize) data).getPopulationSize();
549                continue;
550            }
551        }
552
553        checkParameters();
554    }
555
556    /**
557     * Checks dimensions and values of boundaries and inputSigma if defined.
558     */
559    private void checkParameters() {
560        if (inputSigma != null) {
561            final double[] init = getStartPoint();
562
563            if (inputSigma.length != init.length) {
564                throw new DimensionMismatchException(inputSigma.length, init.length);
565            }
566
567            final double[] lB = getLowerBound();
568            final double[] uB = getUpperBound();
569
570            for (int i = 0; i < init.length; i++) {
571                if (inputSigma[i] > uB[i] - lB[i]) {
572                    throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
573                }
574            }
575        }
576    }
577
578    /**
579     * Initialization of the dynamic search parameters
580     *
581     * @param guess Initial guess for the arguments of the fitness function.
582     */
583    private void initializeCMA(double[] guess) {
584        if (lambda <= 0) {
585            throw new NotStrictlyPositiveException(lambda);
586        }
587        // initialize sigma
588        final double[][] sigmaArray = new double[guess.length][1];
589        for (int i = 0; i < guess.length; i++) {
590            sigmaArray[i][0] = inputSigma[i];
591        }
592        final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
593        sigma = max(insigma); // overall standard deviation
594
595        // initialize termination criteria
596        stopTolUpX = 1e3 * max(insigma);
597        stopTolX = 1e-11 * max(insigma);
598        stopTolFun = 1e-12;
599        stopTolHistFun = 1e-13;
600
601        // initialize selection strategy parameters
602        mu = lambda / 2; // number of parents/points for recombination
603        logMu2 = FastMath.log(mu + 0.5);
604        weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
605        double sumw = 0;
606        double sumwq = 0;
607        for (int i = 0; i < mu; i++) {
608            double w = weights.getEntry(i, 0);
609            sumw += w;
610            sumwq += w * w;
611        }
612        weights = weights.scalarMultiply(1 / sumw);
613        mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
614
615        // initialize dynamic strategy parameters and constants
616        cc = (4 + mueff / dimension) /
617                (dimension + 4 + 2 * mueff / dimension);
618        cs = (mueff + 2) / (dimension + mueff + 3.);
619        damps = (1 + 2 * FastMath.max(0, FastMath.sqrt((mueff - 1) /
620                                                       (dimension + 1)) - 1)) *
621            FastMath.max(0.3,
622                         1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
623        ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
624        ccovmu = FastMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
625                              ((dimension + 2) * (dimension + 2) + mueff));
626        ccov1Sep = FastMath.min(1, ccov1 * (dimension + 1.5) / 3);
627        ccovmuSep = FastMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
628        chiN = FastMath.sqrt(dimension) *
629                (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
630        // initialize CMA internal values - updated each generation
631        xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
632        diagD = insigma.scalarMultiply(1 / sigma);
633        diagC = square(diagD);
634        pc = zeros(dimension, 1); // evolution paths for C and sigma
635        ps = zeros(dimension, 1); // B defines the coordinate system
636        normps = ps.getFrobeniusNorm();
637
638        B = eye(dimension, dimension);
639        D = ones(dimension, 1); // diagonal D defines the scaling
640        BD = times(B, repmat(diagD.transpose(), dimension, 1));
641        C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
642        historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
643        fitnessHistory = new double[historySize]; // history of fitness values
644        for (int i = 0; i < historySize; i++) {
645            fitnessHistory[i] = Double.MAX_VALUE;
646        }
647    }
648
649    /**
650     * Update of the evolution paths ps and pc.
651     *
652     * @param zmean Weighted row matrix of the gaussian random numbers generating
653     * the current offspring.
654     * @param xold xmean matrix of the previous generation.
655     * @return hsig flag indicating a small correction.
656     */
657    private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
658        ps = ps.scalarMultiply(1 - cs).add(
659                B.multiply(zmean).scalarMultiply(
660                        FastMath.sqrt(cs * (2 - cs) * mueff)));
661        normps = ps.getFrobeniusNorm();
662        final boolean hsig = normps /
663            FastMath.sqrt(1 - FastMath.pow(1 - cs, 2 * iterations)) /
664            chiN < 1.4 + 2 / ((double) dimension + 1);
665        pc = pc.scalarMultiply(1 - cc);
666        if (hsig) {
667            pc = pc.add(xmean.subtract(xold).scalarMultiply(FastMath.sqrt(cc * (2 - cc) * mueff) / sigma));
668        }
669        return hsig;
670    }
671
672    /**
673     * Update of the covariance matrix C for diagonalOnly > 0
674     *
675     * @param hsig Flag indicating a small correction.
676     * @param bestArz Fitness-sorted matrix of the gaussian random values of the
677     * current offspring.
678     */
679    private void updateCovarianceDiagonalOnly(boolean hsig,
680                                              final RealMatrix bestArz) {
681        // minor correction if hsig==false
682        double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
683        oldFac += 1 - ccov1Sep - ccovmuSep;
684        diagC = diagC.scalarMultiply(oldFac) // regard old matrix
685            .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
686            .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
687                 .scalarMultiply(ccovmuSep));
688        diagD = sqrt(diagC); // replaces eig(C)
689        if (diagonalOnly > 1 &&
690            iterations > diagonalOnly) {
691            // full covariance matrix from now on
692            diagonalOnly = 0;
693            B = eye(dimension, dimension);
694            BD = diag(diagD);
695            C = diag(diagC);
696        }
697    }
698
699    /**
700     * Update of the covariance matrix C.
701     *
702     * @param hsig Flag indicating a small correction.
703     * @param bestArx Fitness-sorted matrix of the argument vectors producing the
704     * current offspring.
705     * @param arz Unsorted matrix containing the gaussian random values of the
706     * current offspring.
707     * @param arindex Indices indicating the fitness-order of the current offspring.
708     * @param xold xmean matrix of the previous generation.
709     */
710    private void updateCovariance(boolean hsig, final RealMatrix bestArx,
711                                  final RealMatrix arz, final int[] arindex,
712                                  final RealMatrix xold) {
713        double negccov = 0;
714        if (ccov1 + ccovmu > 0) {
715            final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
716                .scalarMultiply(1 / sigma); // mu difference vectors
717            final RealMatrix roneu = pc.multiply(pc.transpose())
718                .scalarMultiply(ccov1); // rank one update
719            // minor correction if hsig==false
720            double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
721            oldFac += 1 - ccov1 - ccovmu;
722            if (isActiveCMA) {
723                // Adapt covariance matrix C active CMA
724                negccov = (1 - ccovmu) * 0.25 * mueff /
725                    (FastMath.pow(dimension + 2, 1.5) + 2 * mueff);
726                // keep at least 0.66 in all directions, small popsize are most
727                // critical
728                final double negminresidualvariance = 0.66;
729                // where to make up for the variance loss
730                final double negalphaold = 0.5;
731                // prepare vectors, compute negative updating matrix Cneg
732                final int[] arReverseIndex = reverse(arindex);
733                RealMatrix arzneg = selectColumns(arz, Arrays.copyOf(arReverseIndex, mu));
734                RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
735                final int[] idxnorms = sortedIndices(arnorms.getRow(0));
736                final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
737                final int[] idxReverse = reverse(idxnorms);
738                final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
739                arnorms = divide(arnormsReverse, arnormsSorted);
740                final int[] idxInv = inverse(idxnorms);
741                final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
742                // check and set learning rate negccov
743                final double negcovMax = (1 - negminresidualvariance) /
744                    square(arnormsInv).multiply(weights).getEntry(0, 0);
745                if (negccov > negcovMax) {
746                    negccov = negcovMax;
747                }
748                arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
749                final RealMatrix artmp = BD.multiply(arzneg);
750                final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
751                oldFac += negalphaold * negccov;
752                C = C.scalarMultiply(oldFac)
753                    .add(roneu) // regard old matrix
754                    .add(arpos.scalarMultiply( // plus rank one update
755                                              ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
756                         .multiply(times(repmat(weights, 1, dimension),
757                                         arpos.transpose())))
758                    .subtract(Cneg.scalarMultiply(negccov));
759            } else {
760                // Adapt covariance matrix C - nonactive
761                C = C.scalarMultiply(oldFac) // regard old matrix
762                    .add(roneu) // plus rank one update
763                    .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
764                         .multiply(times(repmat(weights, 1, dimension),
765                                         arpos.transpose())));
766            }
767        }
768        updateBD(negccov);
769    }
770
771    /**
772     * Update B and D from C.
773     *
774     * @param negccov Negative covariance factor.
775     */
776    private void updateBD(double negccov) {
777        if (ccov1 + ccovmu + negccov > 0 &&
778            (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
779            // to achieve O(N^2)
780            C = triu(C, 0).add(triu(C, 1).transpose());
781            // enforce symmetry to prevent complex numbers
782            final EigenDecomposition eig = new EigenDecomposition(C);
783            B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
784            D = eig.getD();
785            diagD = diag(D);
786            if (min(diagD) <= 0) {
787                for (int i = 0; i < dimension; i++) {
788                    if (diagD.getEntry(i, 0) < 0) {
789                        diagD.setEntry(i, 0, 0);
790                    }
791                }
792                final double tfac = max(diagD) / 1e14;
793                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
794                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
795            }
796            if (max(diagD) > 1e14 * min(diagD)) {
797                final double tfac = max(diagD) / 1e14 - min(diagD);
798                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
799                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
800            }
801            diagC = diag(C);
802            diagD = sqrt(diagD); // D contains standard deviations now
803            BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
804        }
805    }
806
807    /**
808     * Pushes the current best fitness value in a history queue.
809     *
810     * @param vals History queue.
811     * @param val Current best fitness value.
812     */
813    private static void push(double[] vals, double val) {
814        for (int i = vals.length-1; i > 0; i--) {
815            vals[i] = vals[i-1];
816        }
817        vals[0] = val;
818    }
819
820    /**
821     * Sorts fitness values.
822     *
823     * @param doubles Array of values to be sorted.
824     * @return a sorted array of indices pointing into doubles.
825     */
826    private int[] sortedIndices(final double[] doubles) {
827        final DoubleIndex[] dis = new DoubleIndex[doubles.length];
828        for (int i = 0; i < doubles.length; i++) {
829            dis[i] = new DoubleIndex(doubles[i], i);
830        }
831        Arrays.sort(dis);
832        final int[] indices = new int[doubles.length];
833        for (int i = 0; i < doubles.length; i++) {
834            indices[i] = dis[i].index;
835        }
836        return indices;
837    }
838   /**
839     * Get range of values.
840     *
841     * @param vpPairs Array of valuePenaltyPairs to get range from.
842     * @return a double equal to maximum value minus minimum value.
843     */
844    private double valueRange(final ValuePenaltyPair[] vpPairs) {
845        double max = Double.NEGATIVE_INFINITY;
846        double min = Double.MAX_VALUE;
847        for (ValuePenaltyPair vpPair:vpPairs) {
848            if (vpPair.value > max) {
849                max = vpPair.value;
850            }
851            if (vpPair.value < min) {
852                min = vpPair.value;
853            }
854        }
855        return max-min;
856    }
857
858    /**
859     * Used to sort fitness values. Sorting is always in lower value first
860     * order.
861     */
862    private static class DoubleIndex implements Comparable<DoubleIndex> {
863        /** Value to compare. */
864        private final double value;
865        /** Index into sorted array. */
866        private final int index;
867
868        /**
869         * @param value Value to compare.
870         * @param index Index into sorted array.
871         */
872        DoubleIndex(double value, int index) {
873            this.value = value;
874            this.index = index;
875        }
876
877        /** {@inheritDoc} */
878        @Override
879        public int compareTo(DoubleIndex o) {
880            return Double.compare(value, o.value);
881        }
882
883        /** {@inheritDoc} */
884        @Override
885        public boolean equals(Object other) {
886
887            if (this == other) {
888                return true;
889            }
890
891            if (other instanceof DoubleIndex) {
892                return Double.compare(value, ((DoubleIndex) other).value) == 0;
893            }
894
895            return false;
896        }
897
898        /** {@inheritDoc} */
899        @Override
900        public int hashCode() {
901            long bits = Double.doubleToLongBits(value);
902            return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff);
903        }
904    }
905    /**
906     * Stores the value and penalty (for repair of out of bounds point).
907     */
908    private static class ValuePenaltyPair {
909        /** Objective function value. */
910        private double value;
911        /** Penalty value for repair of out out of bounds points. */
912        private double penalty;
913
914        /**
915         * @param value Function value.
916         * @param penalty Out-of-bounds penalty.
917        */
918        ValuePenaltyPair(final double value, final double penalty) {
919            this.value   = value;
920            this.penalty = penalty;
921        }
922    }
923
924
925    /**
926     * Normalizes fitness values to the range [0,1]. Adds a penalty to the
927     * fitness value if out of range.
928     */
929    private class FitnessFunction {
930        /**
931         * Flag indicating whether the objective variables are forced into their
932         * bounds if defined
933         */
934        private final boolean isRepairMode;
935
936        /** Simple constructor.
937         */
938        FitnessFunction() {
939            isRepairMode = true;
940        }
941
942        /**
943         * @param point Normalized objective variables.
944         * @return the objective value + penalty for violated bounds.
945         */
946        public ValuePenaltyPair value(final double[] point) {
947            double value;
948            double penalty=0.0;
949            if (isRepairMode) {
950                double[] repaired = repair(point);
951                value = CMAESOptimizer.this.computeObjectiveValue(repaired);
952                penalty =  penalty(point, repaired);
953            } else {
954                value = CMAESOptimizer.this.computeObjectiveValue(point);
955            }
956            value = isMinimize ? value : -value;
957            penalty = isMinimize ? penalty : -penalty;
958            return new ValuePenaltyPair(value,penalty);
959        }
960
961        /**
962         * @param x Normalized objective variables.
963         * @return {@code true} if in bounds.
964         */
965        public boolean isFeasible(final double[] x) {
966            final double[] lB = CMAESOptimizer.this.getLowerBound();
967            final double[] uB = CMAESOptimizer.this.getUpperBound();
968
969            for (int i = 0; i < x.length; i++) {
970                if (x[i] < lB[i]) {
971                    return false;
972                }
973                if (x[i] > uB[i]) {
974                    return false;
975                }
976            }
977            return true;
978        }
979
980        /**
981         * @param x Normalized objective variables.
982         * @return the repaired (i.e. all in bounds) objective variables.
983         */
984        private double[] repair(final double[] x) {
985            final double[] lB = CMAESOptimizer.this.getLowerBound();
986            final double[] uB = CMAESOptimizer.this.getUpperBound();
987
988            final double[] repaired = new double[x.length];
989            for (int i = 0; i < x.length; i++) {
990                if (x[i] < lB[i]) {
991                    repaired[i] = lB[i];
992                } else if (x[i] > uB[i]) {
993                    repaired[i] = uB[i];
994                } else {
995                    repaired[i] = x[i];
996                }
997            }
998            return repaired;
999        }
1000
1001        /**
1002         * @param x Normalized objective variables.
1003         * @param repaired Repaired objective variables.
1004         * @return Penalty value according to the violation of the bounds.
1005         */
1006        private double penalty(final double[] x, final double[] repaired) {
1007            double penalty = 0;
1008            for (int i = 0; i < x.length; i++) {
1009                double diff = FastMath.abs(x[i] - repaired[i]);
1010                penalty += diff;
1011            }
1012            return isMinimize ? penalty : -penalty;
1013        }
1014    }
1015
1016    // -----Matrix utility functions similar to the Matlab build in functions------
1017
1018    /**
1019     * @param m Input matrix
1020     * @return Matrix representing the element-wise logarithm of m.
1021     */
1022    private static RealMatrix log(final RealMatrix m) {
1023        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1024        for (int r = 0; r < m.getRowDimension(); r++) {
1025            for (int c = 0; c < m.getColumnDimension(); c++) {
1026                d[r][c] = FastMath.log(m.getEntry(r, c));
1027            }
1028        }
1029        return new Array2DRowRealMatrix(d, false);
1030    }
1031
1032    /**
1033     * @param m Input matrix.
1034     * @return Matrix representing the element-wise square root of m.
1035     */
1036    private static RealMatrix sqrt(final RealMatrix m) {
1037        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1038        for (int r = 0; r < m.getRowDimension(); r++) {
1039            for (int c = 0; c < m.getColumnDimension(); c++) {
1040                d[r][c] = FastMath.sqrt(m.getEntry(r, c));
1041            }
1042        }
1043        return new Array2DRowRealMatrix(d, false);
1044    }
1045
1046    /**
1047     * @param m Input matrix.
1048     * @return Matrix representing the element-wise square of m.
1049     */
1050    private static RealMatrix square(final RealMatrix m) {
1051        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1052        for (int r = 0; r < m.getRowDimension(); r++) {
1053            for (int c = 0; c < m.getColumnDimension(); c++) {
1054                double e = m.getEntry(r, c);
1055                d[r][c] = e * e;
1056            }
1057        }
1058        return new Array2DRowRealMatrix(d, false);
1059    }
1060
1061    /**
1062     * @param m Input matrix 1.
1063     * @param n Input matrix 2.
1064     * @return the matrix where the elements of m and n are element-wise multiplied.
1065     */
1066    private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1067        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1068        for (int r = 0; r < m.getRowDimension(); r++) {
1069            for (int c = 0; c < m.getColumnDimension(); c++) {
1070                d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1071            }
1072        }
1073        return new Array2DRowRealMatrix(d, false);
1074    }
1075
1076    /**
1077     * @param m Input matrix 1.
1078     * @param n Input matrix 2.
1079     * @return Matrix where the elements of m and n are element-wise divided.
1080     */
1081    private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1082        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1083        for (int r = 0; r < m.getRowDimension(); r++) {
1084            for (int c = 0; c < m.getColumnDimension(); c++) {
1085                d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1086            }
1087        }
1088        return new Array2DRowRealMatrix(d, false);
1089    }
1090
1091    /**
1092     * @param m Input matrix.
1093     * @param cols Columns to select.
1094     * @return Matrix representing the selected columns.
1095     */
1096    private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1097        final double[][] d = new double[m.getRowDimension()][cols.length];
1098        for (int r = 0; r < m.getRowDimension(); r++) {
1099            for (int c = 0; c < cols.length; c++) {
1100                d[r][c] = m.getEntry(r, cols[c]);
1101            }
1102        }
1103        return new Array2DRowRealMatrix(d, false);
1104    }
1105
1106    /**
1107     * @param m Input matrix.
1108     * @param k Diagonal position.
1109     * @return Upper triangular part of matrix.
1110     */
1111    private static RealMatrix triu(final RealMatrix m, int k) {
1112        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1113        for (int r = 0; r < m.getRowDimension(); r++) {
1114            for (int c = 0; c < m.getColumnDimension(); c++) {
1115                d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1116            }
1117        }
1118        return new Array2DRowRealMatrix(d, false);
1119    }
1120
1121    /**
1122     * @param m Input matrix.
1123     * @return Row matrix representing the sums of the rows.
1124     */
1125    private static RealMatrix sumRows(final RealMatrix m) {
1126        final double[][] d = new double[1][m.getColumnDimension()];
1127        for (int c = 0; c < m.getColumnDimension(); c++) {
1128            double sum = 0;
1129            for (int r = 0; r < m.getRowDimension(); r++) {
1130                sum += m.getEntry(r, c);
1131            }
1132            d[0][c] = sum;
1133        }
1134        return new Array2DRowRealMatrix(d, false);
1135    }
1136
1137    /**
1138     * @param m Input matrix.
1139     * @return the diagonal n-by-n matrix if m is a column matrix or the column
1140     * matrix representing the diagonal if m is a n-by-n matrix.
1141     */
1142    private static RealMatrix diag(final RealMatrix m) {
1143        if (m.getColumnDimension() == 1) {
1144            final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1145            for (int i = 0; i < m.getRowDimension(); i++) {
1146                d[i][i] = m.getEntry(i, 0);
1147            }
1148            return new Array2DRowRealMatrix(d, false);
1149        } else {
1150            final double[][] d = new double[m.getRowDimension()][1];
1151            for (int i = 0; i < m.getColumnDimension(); i++) {
1152                d[i][0] = m.getEntry(i, i);
1153            }
1154            return new Array2DRowRealMatrix(d, false);
1155        }
1156    }
1157
1158    /**
1159     * Copies a column from m1 to m2.
1160     *
1161     * @param m1 Source matrix.
1162     * @param col1 Source column.
1163     * @param m2 Target matrix.
1164     * @param col2 Target column.
1165     */
1166    private static void copyColumn(final RealMatrix m1, int col1,
1167                                   RealMatrix m2, int col2) {
1168        for (int i = 0; i < m1.getRowDimension(); i++) {
1169            m2.setEntry(i, col2, m1.getEntry(i, col1));
1170        }
1171    }
1172
1173    /**
1174     * @param n Number of rows.
1175     * @param m Number of columns.
1176     * @return n-by-m matrix filled with 1.
1177     */
1178    private static RealMatrix ones(int n, int m) {
1179        final double[][] d = new double[n][m];
1180        for (int r = 0; r < n; r++) {
1181            Arrays.fill(d[r], 1);
1182        }
1183        return new Array2DRowRealMatrix(d, false);
1184    }
1185
1186    /**
1187     * @param n Number of rows.
1188     * @param m Number of columns.
1189     * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1190     * the diagonal.
1191     */
1192    private static RealMatrix eye(int n, int m) {
1193        final double[][] d = new double[n][m];
1194        for (int r = 0; r < n; r++) {
1195            if (r < m) {
1196                d[r][r] = 1;
1197            }
1198        }
1199        return new Array2DRowRealMatrix(d, false);
1200    }
1201
1202    /**
1203     * @param n Number of rows.
1204     * @param m Number of columns.
1205     * @return n-by-m matrix of zero values.
1206     */
1207    private static RealMatrix zeros(int n, int m) {
1208        return new Array2DRowRealMatrix(n, m);
1209    }
1210
1211    /**
1212     * @param mat Input matrix.
1213     * @param n Number of row replicates.
1214     * @param m Number of column replicates.
1215     * @return a matrix which replicates the input matrix in both directions.
1216     */
1217    private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1218        final int rd = mat.getRowDimension();
1219        final int cd = mat.getColumnDimension();
1220        final double[][] d = new double[n * rd][m * cd];
1221        for (int r = 0; r < n * rd; r++) {
1222            for (int c = 0; c < m * cd; c++) {
1223                d[r][c] = mat.getEntry(r % rd, c % cd);
1224            }
1225        }
1226        return new Array2DRowRealMatrix(d, false);
1227    }
1228
1229    /**
1230     * @param start Start value.
1231     * @param end End value.
1232     * @param step Step size.
1233     * @return a sequence as column matrix.
1234     */
1235    private static RealMatrix sequence(double start, double end, double step) {
1236        final int size = (int) ((end - start) / step + 1);
1237        final double[][] d = new double[size][1];
1238        double value = start;
1239        for (int r = 0; r < size; r++) {
1240            d[r][0] = value;
1241            value += step;
1242        }
1243        return new Array2DRowRealMatrix(d, false);
1244    }
1245
1246    /**
1247     * @param m Input matrix.
1248     * @return the maximum of the matrix element values.
1249     */
1250    private static double max(final RealMatrix m) {
1251        double max = -Double.MAX_VALUE;
1252        for (int r = 0; r < m.getRowDimension(); r++) {
1253            for (int c = 0; c < m.getColumnDimension(); c++) {
1254                double e = m.getEntry(r, c);
1255                if (max < e) {
1256                    max = e;
1257                }
1258            }
1259        }
1260        return max;
1261    }
1262
1263    /**
1264     * @param m Input matrix.
1265     * @return the minimum of the matrix element values.
1266     */
1267    private static double min(final RealMatrix m) {
1268        double min = Double.MAX_VALUE;
1269        for (int r = 0; r < m.getRowDimension(); r++) {
1270            for (int c = 0; c < m.getColumnDimension(); c++) {
1271                double e = m.getEntry(r, c);
1272                if (min > e) {
1273                    min = e;
1274                }
1275            }
1276        }
1277        return min;
1278    }
1279
1280    /**
1281     * @param m Input array.
1282     * @return the maximum of the array values.
1283     */
1284    private static double max(final double[] m) {
1285        double max = -Double.MAX_VALUE;
1286        for (int r = 0; r < m.length; r++) {
1287            if (max < m[r]) {
1288                max = m[r];
1289            }
1290        }
1291        return max;
1292    }
1293
1294    /**
1295     * @param m Input array.
1296     * @return the minimum of the array values.
1297     */
1298    private static double min(final double[] m) {
1299        double min = Double.MAX_VALUE;
1300        for (int r = 0; r < m.length; r++) {
1301            if (min > m[r]) {
1302                min = m[r];
1303            }
1304        }
1305        return min;
1306    }
1307
1308    /**
1309     * @param indices Input index array.
1310     * @return the inverse of the mapping defined by indices.
1311     */
1312    private static int[] inverse(final int[] indices) {
1313        final int[] inverse = new int[indices.length];
1314        for (int i = 0; i < indices.length; i++) {
1315            inverse[indices[i]] = i;
1316        }
1317        return inverse;
1318    }
1319
1320    /**
1321     * @param indices Input index array.
1322     * @return the indices in inverse order (last is first).
1323     */
1324    private static int[] reverse(final int[] indices) {
1325        final int[] reverse = new int[indices.length];
1326        for (int i = 0; i < indices.length; i++) {
1327            reverse[i] = indices[indices.length - i - 1];
1328        }
1329        return reverse;
1330    }
1331
1332    /**
1333     * @param size Length of random array.
1334     * @return an array of Gaussian random numbers.
1335     */
1336    private double[] randn(int size) {
1337        final double[] randn = new double[size];
1338        for (int i = 0; i < size; i++) {
1339            randn[i] = random.sample();
1340        }
1341        return randn;
1342    }
1343
1344    /**
1345     * @param size Number of rows.
1346     * @param popSize Population size.
1347     * @return a 2-dimensional matrix of Gaussian random numbers.
1348     */
1349    private RealMatrix randn1(int size, int popSize) {
1350        final double[][] d = new double[size][popSize];
1351        for (int r = 0; r < size; r++) {
1352            for (int c = 0; c < popSize; c++) {
1353                d[r][c] = random.sample();
1354            }
1355        }
1356        return new Array2DRowRealMatrix(d, false);
1357    }
1358}