001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018package org.apache.commons.math4.legacy.optim.nonlinear.scalar.noderiv;
019
020import java.util.ArrayList;
021import java.util.Arrays;
022import java.util.List;
023
024import org.apache.commons.math4.legacy.exception.DimensionMismatchException;
025import org.apache.commons.math4.legacy.exception.NotStrictlyPositiveException;
026import org.apache.commons.math4.legacy.exception.OutOfRangeException;
027import org.apache.commons.math4.legacy.exception.TooManyEvaluationsException;
028import org.apache.commons.math4.legacy.linear.Array2DRowRealMatrix;
029import org.apache.commons.math4.legacy.linear.EigenDecomposition;
030import org.apache.commons.math4.legacy.linear.MatrixUtils;
031import org.apache.commons.math4.legacy.linear.RealMatrix;
032import org.apache.commons.math4.legacy.optim.ConvergenceChecker;
033import org.apache.commons.math4.legacy.optim.OptimizationData;
034import org.apache.commons.math4.legacy.optim.PointValuePair;
035import org.apache.commons.math4.legacy.optim.nonlinear.scalar.GoalType;
036import org.apache.commons.math4.legacy.optim.nonlinear.scalar.PopulationSize;
037import org.apache.commons.math4.legacy.optim.nonlinear.scalar.Sigma;
038import org.apache.commons.math4.legacy.optim.nonlinear.scalar.MultivariateOptimizer;
039import org.apache.commons.rng.UniformRandomProvider;
040import org.apache.commons.statistics.distribution.ContinuousDistribution;
041import org.apache.commons.statistics.distribution.NormalDistribution;
042import org.apache.commons.math4.core.jdkmath.JdkMath;
043
044/**
045 * An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
046 * for non-linear, non-convex, non-smooth, global function minimization.
047 * <p>
048 * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method
049 * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or
050 * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local
051 * optima, outlier, etc.) of the objective function. Like a
052 * quasi-Newton method, the CMA-ES learns and applies a variable metric
053 * on the underlying search space. Unlike a quasi-Newton method, the
054 * CMA-ES neither estimates nor uses gradients, making it considerably more
055 * reliable in terms of finding a good, or even close to optimal, solution.
056 * <p>
057 * In general, on smooth objective functions the CMA-ES is roughly ten times
058 * slower than BFGS (counting objective function evaluations, no gradients provided).
059 * For up to <code>N=10</code> variables also the derivative-free simplex
060 * direct search method (Nelder and Mead) can be faster, but it is
061 * far less reliable than CMA-ES.
062 * <p>
063 * The CMA-ES is particularly well suited for non-separable
064 * and/or badly conditioned problems. To observe the advantage of CMA compared
065 * to a conventional evolution strategy, it will usually take about
066 * <code>30 N</code> function evaluations. On difficult problems the complete
067 * optimization (a single run) is expected to take <em>roughly</em> between
068 * <code>30 N</code> and <code>300 N<sup>2</sup></code>
069 * function evaluations.
070 * <p>
071 * This implementation is translated and adapted from the Matlab version
072 * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.
073 * <p>
074 * For more information, please refer to the following links:
075 * <ul>
076 *  <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li>
077 *  <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li>
078 *  <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li>
079 * </ul>
080 *
081 * @since 3.0
082 */
083public class CMAESOptimizer
084    extends MultivariateOptimizer {
085    // global search parameters
086    /**
087     * Population size, offspring number. The primary strategy parameter to play
088     * with, which can be increased from its default value. Increasing the
089     * population size improves global search properties in exchange to speed.
090     * Speed decreases, as a rule, at most linearly with increasing population
091     * size. It is advisable to begin with the default small population size.
092     */
093    private int lambda; // population size
094    /**
095     * Covariance update mechanism, default is active CMA. isActiveCMA = true
096     * turns on "active CMA" with a negative update of the covariance matrix and
097     * checks for positive definiteness. OPTS.CMA.active = 2 does not check for
098     * pos. def. and is numerically faster. Active CMA usually speeds up the
099     * adaptation.
100     */
101    private final boolean isActiveCMA;
102    /**
103     * Determines how often a new random offspring is generated in case it is
104     * not feasible / beyond the defined limits, default is 0.
105     */
106    private final int checkFeasableCount;
107    /**
108     * @see Sigma
109     */
110    private double[] inputSigma;
111    /** Number of objective variables/problem dimension. */
112    private int dimension;
113    /**
114     * Defines the number of initial iterations, where the covariance matrix
115     * remains diagonal and the algorithm has internally linear time complexity.
116     * diagonalOnly = 1 means keeping the covariance matrix always diagonal and
117     * this setting also exhibits linear space complexity. This can be
118     * particularly useful for dimension > 100.
119     * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a>
120     */
121    private int diagonalOnly;
122    /** Number of objective variables/problem dimension. */
123    private boolean isMinimize = true;
124    /** Indicates whether statistic data is collected. */
125    private final boolean generateStatistics;
126
127    // termination criteria
128    /** Maximal number of iterations allowed. */
129    private final int maxIterations;
130    /** Limit for fitness value. */
131    private final double stopFitness;
132    /** Stop if x-changes larger stopTolUpX. */
133    private double stopTolUpX;
134    /** Stop if x-change smaller stopTolX. */
135    private double stopTolX;
136    /** Stop if fun-changes smaller stopTolFun. */
137    private double stopTolFun;
138    /** Stop if back fun-changes smaller stopTolHistFun. */
139    private double stopTolHistFun;
140
141    // selection strategy parameters
142    /** Number of parents/points for recombination. */
143    private int mu; //
144    /** log(mu + 0.5), stored for efficiency. */
145    private double logMu2;
146    /** Array for weighted recombination. */
147    private RealMatrix weights;
148    /** Variance-effectiveness of sum w_i x_i. */
149    private double mueff; //
150
151    // dynamic strategy parameters and constants
152    /** Overall standard deviation - search volume. */
153    private double sigma;
154    /** Cumulation constant. */
155    private double cc;
156    /** Cumulation constant for step-size. */
157    private double cs;
158    /** Damping for step-size. */
159    private double damps;
160    /** Learning rate for rank-one update. */
161    private double ccov1;
162    /** Learning rate for rank-mu update'. */
163    private double ccovmu;
164    /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */
165    private double chiN;
166    /** Learning rate for rank-one update - diagonalOnly. */
167    private double ccov1Sep;
168    /** Learning rate for rank-mu update - diagonalOnly. */
169    private double ccovmuSep;
170
171    // CMA internal values - updated each generation
172    /** Objective variables. */
173    private RealMatrix xmean;
174    /** Evolution path. */
175    private RealMatrix pc;
176    /** Evolution path for sigma. */
177    private RealMatrix ps;
178    /** Norm of ps, stored for efficiency. */
179    private double normps;
180    /** Coordinate system. */
181    private RealMatrix B;
182    /** Scaling. */
183    private RealMatrix D;
184    /** B*D, stored for efficiency. */
185    private RealMatrix BD;
186    /** Diagonal of sqrt(D), stored for efficiency. */
187    private RealMatrix diagD;
188    /** Covariance matrix. */
189    private RealMatrix C;
190    /** Diagonal of C, used for diagonalOnly. */
191    private RealMatrix diagC;
192    /** Number of iterations already performed. */
193    private int iterations;
194
195    /** History queue of best values. */
196    private double[] fitnessHistory;
197    /** Size of history queue of best values. */
198    private int historySize;
199
200    /** Gaussian sampler. */
201    private final ContinuousDistribution.Sampler random;
202
203    /** History of sigma values. */
204    private final List<Double> statisticsSigmaHistory = new ArrayList<>();
205    /** History of mean matrix. */
206    private final List<RealMatrix> statisticsMeanHistory = new ArrayList<>();
207    /** History of fitness values. */
208    private final List<Double> statisticsFitnessHistory = new ArrayList<>();
209    /** History of D matrix. */
210    private final List<RealMatrix> statisticsDHistory = new ArrayList<>();
211
212    /**
213     * @param maxIterations Maximal number of iterations.
214     * @param stopFitness Whether to stop if objective function value is smaller than
215     * {@code stopFitness}.
216     * @param isActiveCMA Chooses the covariance matrix update method.
217     * @param diagonalOnly Number of initial iterations, where the covariance matrix
218     * remains diagonal.
219     * @param checkFeasableCount Determines how often new random objective variables are
220     * generated in case they are out of bounds.
221     * @param rng Random generator.
222     * @param generateStatistics Whether statistic data is collected.
223     * @param checker Convergence checker.
224     *
225     * @since 3.1
226     */
227    public CMAESOptimizer(int maxIterations,
228                          double stopFitness,
229                          boolean isActiveCMA,
230                          int diagonalOnly,
231                          int checkFeasableCount,
232                          UniformRandomProvider rng,
233                          boolean generateStatistics,
234                          ConvergenceChecker<PointValuePair> checker) {
235        super(checker);
236        this.maxIterations = maxIterations;
237        this.stopFitness = stopFitness;
238        this.isActiveCMA = isActiveCMA;
239        this.diagonalOnly = diagonalOnly;
240        this.checkFeasableCount = Math.max(0, checkFeasableCount);
241        this.random = NormalDistribution.of(0, 1).createSampler(rng);
242        this.generateStatistics = generateStatistics;
243    }
244
245    /**
246     * @return History of sigma values.
247     */
248    public List<Double> getStatisticsSigmaHistory() {
249        return statisticsSigmaHistory;
250    }
251
252    /**
253     * @return History of mean matrix.
254     */
255    public List<RealMatrix> getStatisticsMeanHistory() {
256        return statisticsMeanHistory;
257    }
258
259    /**
260     * @return History of fitness values.
261     */
262    public List<Double> getStatisticsFitnessHistory() {
263        return statisticsFitnessHistory;
264    }
265
266    /**
267     * @return History of D matrix.
268     */
269    public List<RealMatrix> getStatisticsDHistory() {
270        return statisticsDHistory;
271    }
272
273    /**
274     * {@inheritDoc}
275     *
276     * @param optData Optimization data. In addition to those documented in
277     * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
278     * MultivariateOptimizer}, this method will register the following data:
279     * <ul>
280     *  <li>
281     *   {@link Sigma} values define the initial coordinate-wise standard
282     *   deviations for sampling new search points around the initial guess.
283     *   It is suggested to set them to the estimated distance from the
284     *   initial to the desired optimum.
285     *   Small values induce the search to be more local (and very small
286     *   values are more likely to find a local optimum close to the initial
287     *   guess).
288     *   Too small values might however lead to early termination.
289     *  </li>
290     *  <li>
291     *   {@link PopulationSize} is the number of offsprings and the primary
292     *   strategy parameter.
293     *   In the absence of better clues, a good default could be an integer
294     *   close to {@code 4 + 3 ln(n)}, where {@code n} is the number of
295     *   optimized parameters.
296     *   Increasing the population size improves global search properties at
297     *   the expense of speed (which in general decreases at most linearly
298     *   with increasing population size).
299     *  </li>
300     * </ul>
301     * @return {@inheritDoc}
302     * @throws TooManyEvaluationsException if the maximal number of evaluations
303     * is exceeded.
304     * @throws DimensionMismatchException if the initial guess, target, and
305     * weight arguments have inconsistent dimensions.
306     */
307    @Override
308    public PointValuePair optimize(OptimizationData... optData) {
309        // Set up base class and perform computation.
310        return super.optimize(optData);
311    }
312
313    /** {@inheritDoc} */
314    @Override
315    protected PointValuePair doOptimize() {
316         // -------------------- Initialization --------------------------------
317        isMinimize = getGoalType().equals(GoalType.MINIMIZE);
318        final FitnessFunction fitfun = new FitnessFunction();
319        final double[] guess = getStartPoint();
320        // number of objective variables/problem dimension
321        dimension = guess.length;
322        initializeCMA(guess);
323        iterations = 0;
324        ValuePenaltyPair valuePenalty = fitfun.value(guess);
325        double bestValue = valuePenalty.value+valuePenalty.penalty;
326        push(fitnessHistory, bestValue);
327        PointValuePair optimum
328            = new PointValuePair(getStartPoint(),
329                                 isMinimize ? bestValue : -bestValue);
330        PointValuePair lastResult = null;
331
332        // -------------------- Generation Loop --------------------------------
333        generationLoop:
334        for (iterations = 1; iterations <= maxIterations; iterations++) {
335            incrementIterationCount();
336
337            // Generate and evaluate lambda offspring
338            final RealMatrix arz = randn1(dimension, lambda);
339            final RealMatrix arx = zeros(dimension, lambda);
340            final double[] fitness = new double[lambda];
341            final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
342            // generate random offspring
343            for (int k = 0; k < lambda; k++) {
344                RealMatrix arxk = null;
345                for (int i = 0; i <= checkFeasableCount; i++) {
346                    if (diagonalOnly <= 0) {
347                        arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
348                                         .scalarMultiply(sigma)); // m + sig * Normal(0,C)
349                    } else {
350                        arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
351                                         .scalarMultiply(sigma));
352                    }
353                    if (i >= checkFeasableCount ||
354                        fitfun.isFeasible(arxk.getColumn(0))) {
355                        break;
356                    }
357                    // regenerate random arguments for row
358                    arz.setColumn(k, randn(dimension));
359                }
360                copyColumn(arxk, 0, arx, k);
361                try {
362                    valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
363                } catch (TooManyEvaluationsException e) {
364                    break generationLoop;
365                }
366            }
367            // Compute fitnesses by adding value and penalty after scaling by value range.
368            double valueRange = valueRange(valuePenaltyPairs);
369            for (int iValue=0;iValue<valuePenaltyPairs.length;iValue++) {
370                 fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty*valueRange;
371            }
372            // Sort by fitness and compute weighted mean into xmean
373            final int[] arindex = sortedIndices(fitness);
374            // Calculate new xmean, this is selection and recombination
375            final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
376            final RealMatrix bestArx = selectColumns(arx, Arrays.copyOf(arindex, mu));
377            xmean = bestArx.multiply(weights);
378            final RealMatrix bestArz = selectColumns(arz, Arrays.copyOf(arindex, mu));
379            final RealMatrix zmean = bestArz.multiply(weights);
380            final boolean hsig = updateEvolutionPaths(zmean, xold);
381            if (diagonalOnly <= 0) {
382                updateCovariance(hsig, bestArx, arz, arindex, xold);
383            } else {
384                updateCovarianceDiagonalOnly(hsig, bestArz);
385            }
386            // Adapt step size sigma - Eq. (5)
387            sigma *= JdkMath.exp(JdkMath.min(1, (normps/chiN - 1) * cs / damps));
388            final double bestFitness = fitness[arindex[0]];
389            final double worstFitness = fitness[arindex[arindex.length - 1]];
390            if (bestValue > bestFitness) {
391                bestValue = bestFitness;
392                lastResult = optimum;
393                optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)),
394                                             isMinimize ? bestFitness : -bestFitness);
395                if (getConvergenceChecker() != null && lastResult != null &&
396                    getConvergenceChecker().converged(iterations, optimum, lastResult)) {
397                    break generationLoop;
398                }
399            }
400            // handle termination criteria
401            // Break, if fitness is good enough
402            if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
403                break generationLoop;
404            }
405            final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
406            final double[] pcCol = pc.getColumn(0);
407            for (int i = 0; i < dimension; i++) {
408                if (sigma * JdkMath.max(JdkMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
409                    break;
410                }
411                if (i >= dimension - 1) {
412                    break generationLoop;
413                }
414            }
415            for (int i = 0; i < dimension; i++) {
416                if (sigma * sqrtDiagC[i] > stopTolUpX) {
417                    break generationLoop;
418                }
419            }
420            final double historyBest = min(fitnessHistory);
421            final double historyWorst = max(fitnessHistory);
422            if (iterations > 2 &&
423                JdkMath.max(historyWorst, worstFitness) -
424                JdkMath.min(historyBest, bestFitness) < stopTolFun) {
425                break generationLoop;
426            }
427            if (iterations > fitnessHistory.length &&
428                historyWorst - historyBest < stopTolHistFun) {
429                break generationLoop;
430            }
431            // condition number of the covariance matrix exceeds 1e14
432            if (max(diagD) / min(diagD) > 1e7) {
433                break generationLoop;
434            }
435            // user defined termination
436            if (getConvergenceChecker() != null) {
437                final PointValuePair current
438                    = new PointValuePair(bestArx.getColumn(0),
439                                         isMinimize ? bestFitness : -bestFitness);
440                if (lastResult != null &&
441                    getConvergenceChecker().converged(iterations, current, lastResult)) {
442                    break generationLoop;
443                    }
444                lastResult = current;
445            }
446            // Adjust step size in case of equal function values (flat fitness)
447            if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) {
448                sigma *= JdkMath.exp(0.2 + cs / damps);
449            }
450            if (iterations > 2 && JdkMath.max(historyWorst, bestFitness) -
451                JdkMath.min(historyBest, bestFitness) == 0) {
452                sigma *= JdkMath.exp(0.2 + cs / damps);
453            }
454            // store best in history
455            push(fitnessHistory,bestFitness);
456            if (generateStatistics) {
457                statisticsSigmaHistory.add(sigma);
458                statisticsFitnessHistory.add(bestFitness);
459                statisticsMeanHistory.add(xmean.transpose());
460                statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
461            }
462        }
463        return optimum;
464    }
465
466    /**
467     * Scans the list of (required and optional) optimization data that
468     * characterize the problem.
469     *
470     * @param optData Optimization data. The following data will be looked for:
471     * <ul>
472     *  <li>{@link Sigma}</li>
473     *  <li>{@link PopulationSize}</li>
474     * </ul>
475     */
476    @Override
477    protected void parseOptimizationData(OptimizationData... optData) {
478        // Allow base class to register its own data.
479        super.parseOptimizationData(optData);
480
481        // The existing values (as set by the previous call) are reused if
482        // not provided in the argument list.
483        for (OptimizationData data : optData) {
484            if (data instanceof Sigma) {
485                inputSigma = ((Sigma) data).getSigma();
486                continue;
487            }
488            if (data instanceof PopulationSize) {
489                lambda = ((PopulationSize) data).getPopulationSize();
490                continue;
491            }
492        }
493
494        checkParameters();
495    }
496
497    /**
498     * Checks dimensions and values of boundaries and inputSigma if defined.
499     */
500    private void checkParameters() {
501        if (inputSigma != null) {
502            final double[] init = getStartPoint();
503
504            if (inputSigma.length != init.length) {
505                throw new DimensionMismatchException(inputSigma.length, init.length);
506            }
507
508            final double[] lB = getLowerBound();
509            final double[] uB = getUpperBound();
510
511            for (int i = 0; i < init.length; i++) {
512                if (inputSigma[i] > uB[i] - lB[i]) {
513                    throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]);
514                }
515            }
516        }
517    }
518
519    /**
520     * Initialization of the dynamic search parameters.
521     *
522     * @param guess Initial guess for the arguments of the fitness function.
523     */
524    private void initializeCMA(double[] guess) {
525        if (lambda <= 0) {
526            throw new NotStrictlyPositiveException(lambda);
527        }
528        // initialize sigma
529        final double[][] sigmaArray = new double[guess.length][1];
530        for (int i = 0; i < guess.length; i++) {
531            sigmaArray[i][0] = inputSigma[i];
532        }
533        final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
534        sigma = max(insigma); // overall standard deviation
535
536        // initialize termination criteria
537        stopTolUpX = 1e3 * max(insigma);
538        stopTolX = 1e-11 * max(insigma);
539        stopTolFun = 1e-12;
540        stopTolHistFun = 1e-13;
541
542        // initialize selection strategy parameters
543        mu = lambda / 2; // number of parents/points for recombination
544        logMu2 = JdkMath.log(mu + 0.5);
545        weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2);
546        double sumw = 0;
547        double sumwq = 0;
548        for (int i = 0; i < mu; i++) {
549            double w = weights.getEntry(i, 0);
550            sumw += w;
551            sumwq += w * w;
552        }
553        weights = weights.scalarMultiply(1 / sumw);
554        mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i
555
556        // initialize dynamic strategy parameters and constants
557        cc = (4 + mueff / dimension) /
558                (dimension + 4 + 2 * mueff / dimension);
559        cs = (mueff + 2) / (dimension + mueff + 3.);
560        damps = (1 + 2 * JdkMath.max(0, JdkMath.sqrt((mueff - 1) /
561                                                       (dimension + 1)) - 1)) *
562            JdkMath.max(0.3,
563                         1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment
564        ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff);
565        ccovmu = JdkMath.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) /
566                              ((dimension + 2) * (dimension + 2) + mueff));
567        ccov1Sep = JdkMath.min(1, ccov1 * (dimension + 1.5) / 3);
568        ccovmuSep = JdkMath.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3);
569        chiN = JdkMath.sqrt(dimension) *
570                (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension));
571        // initialize CMA internal values - updated each generation
572        xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables
573        diagD = insigma.scalarMultiply(1 / sigma);
574        diagC = square(diagD);
575        pc = zeros(dimension, 1); // evolution paths for C and sigma
576        ps = zeros(dimension, 1); // B defines the coordinate system
577        normps = ps.getFrobeniusNorm();
578
579        B = eye(dimension, dimension);
580        D = ones(dimension, 1); // diagonal D defines the scaling
581        BD = times(B, repmat(diagD.transpose(), dimension, 1));
582        C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance
583        historySize = 10 + (int) (3 * 10 * dimension / (double) lambda);
584        fitnessHistory = new double[historySize]; // history of fitness values
585        for (int i = 0; i < historySize; i++) {
586            fitnessHistory[i] = Double.MAX_VALUE;
587        }
588    }
589
590    /**
591     * Update of the evolution paths ps and pc.
592     *
593     * @param zmean Weighted row matrix of the gaussian random numbers generating
594     * the current offspring.
595     * @param xold xmean matrix of the previous generation.
596     * @return hsig flag indicating a small correction.
597     */
598    private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) {
599        ps = ps.scalarMultiply(1 - cs).add(
600                B.multiply(zmean).scalarMultiply(
601                        JdkMath.sqrt(cs * (2 - cs) * mueff)));
602        normps = ps.getFrobeniusNorm();
603        final boolean hsig = normps /
604            JdkMath.sqrt(1 - JdkMath.pow(1 - cs, 2 * iterations)) /
605            chiN < 1.4 + 2 / ((double) dimension + 1);
606        pc = pc.scalarMultiply(1 - cc);
607        if (hsig) {
608            pc = pc.add(xmean.subtract(xold).scalarMultiply(JdkMath.sqrt(cc * (2 - cc) * mueff) / sigma));
609        }
610        return hsig;
611    }
612
613    /**
614     * Update of the covariance matrix C for diagonalOnly > 0.
615     *
616     * @param hsig Flag indicating a small correction.
617     * @param bestArz Fitness-sorted matrix of the gaussian random values of the
618     * current offspring.
619     */
620    private void updateCovarianceDiagonalOnly(boolean hsig,
621                                              final RealMatrix bestArz) {
622        // minor correction if hsig==false
623        double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc);
624        oldFac += 1 - ccov1Sep - ccovmuSep;
625        diagC = diagC.scalarMultiply(oldFac) // regard old matrix
626            .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update
627            .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update
628                 .scalarMultiply(ccovmuSep));
629        diagD = sqrt(diagC); // replaces eig(C)
630        if (diagonalOnly > 1 &&
631            iterations > diagonalOnly) {
632            // full covariance matrix from now on
633            diagonalOnly = 0;
634            B = eye(dimension, dimension);
635            BD = diag(diagD);
636            C = diag(diagC);
637        }
638    }
639
640    /**
641     * Update of the covariance matrix C.
642     *
643     * @param hsig Flag indicating a small correction.
644     * @param bestArx Fitness-sorted matrix of the argument vectors producing the
645     * current offspring.
646     * @param arz Unsorted matrix containing the gaussian random values of the
647     * current offspring.
648     * @param arindex Indices indicating the fitness-order of the current offspring.
649     * @param xold xmean matrix of the previous generation.
650     */
651    private void updateCovariance(boolean hsig, final RealMatrix bestArx,
652                                  final RealMatrix arz, final int[] arindex,
653                                  final RealMatrix xold) {
654        double negccov = 0;
655        if (ccov1 + ccovmu > 0) {
656            final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
657                .scalarMultiply(1 / sigma); // mu difference vectors
658            final RealMatrix roneu = pc.multiply(pc.transpose())
659                .scalarMultiply(ccov1); // rank one update
660            // minor correction if hsig==false
661            double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc);
662            oldFac += 1 - ccov1 - ccovmu;
663            if (isActiveCMA) {
664                // Adapt covariance matrix C active CMA
665                negccov = (1 - ccovmu) * 0.25 * mueff /
666                    (JdkMath.pow(dimension + 2.0, 1.5) + 2 * mueff);
667                // keep at least 0.66 in all directions, small popsize are most
668                // critical
669                final double negminresidualvariance = 0.66;
670                // where to make up for the variance loss
671                final double negalphaold = 0.5;
672                // prepare vectors, compute negative updating matrix Cneg
673                final int[] arReverseIndex = reverse(arindex);
674                RealMatrix arzneg = selectColumns(arz, Arrays.copyOf(arReverseIndex, mu));
675                RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
676                final int[] idxnorms = sortedIndices(arnorms.getRow(0));
677                final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
678                final int[] idxReverse = reverse(idxnorms);
679                final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
680                arnorms = divide(arnormsReverse, arnormsSorted);
681                final int[] idxInv = inverse(idxnorms);
682                final RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
683                // check and set learning rate negccov
684                final double negcovMax = (1 - negminresidualvariance) /
685                    square(arnormsInv).multiply(weights).getEntry(0, 0);
686                if (negccov > negcovMax) {
687                    negccov = negcovMax;
688                }
689                arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
690                final RealMatrix artmp = BD.multiply(arzneg);
691                final RealMatrix cNeg = artmp.multiply(diag(weights)).multiply(artmp.transpose());
692                oldFac += negalphaold * negccov;
693                C = C.scalarMultiply(oldFac)
694                    .add(roneu) // regard old matrix
695                    .add(arpos.scalarMultiply( // plus rank one update
696                                              ccovmu + (1 - negalphaold) * negccov) // plus rank mu update
697                         .multiply(times(repmat(weights, 1, dimension),
698                                         arpos.transpose())))
699                    .subtract(cNeg.scalarMultiply(negccov));
700            } else {
701                // Adapt covariance matrix C - nonactive
702                C = C.scalarMultiply(oldFac) // regard old matrix
703                    .add(roneu) // plus rank one update
704                    .add(arpos.scalarMultiply(ccovmu) // plus rank mu update
705                         .multiply(times(repmat(weights, 1, dimension),
706                                         arpos.transpose())));
707            }
708        }
709        updateBD(negccov);
710    }
711
712    /**
713     * Update B and D from C.
714     *
715     * @param negccov Negative covariance factor.
716     */
717    private void updateBD(double negccov) {
718        if (ccov1 + ccovmu + negccov > 0 &&
719            (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) {
720            // to achieve O(N^2)
721            C = triu(C, 0).add(triu(C, 1).transpose());
722            // enforce symmetry to prevent complex numbers
723            final EigenDecomposition eig = new EigenDecomposition(C);
724            B = eig.getV(); // eigen decomposition, B==normalized eigenvectors
725            D = eig.getD();
726            diagD = diag(D);
727            if (min(diagD) <= 0) {
728                for (int i = 0; i < dimension; i++) {
729                    if (diagD.getEntry(i, 0) < 0) {
730                        diagD.setEntry(i, 0, 0);
731                    }
732                }
733                final double tfac = max(diagD) / 1e14;
734                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
735                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
736            }
737            if (max(diagD) > 1e14 * min(diagD)) {
738                final double tfac = max(diagD) / 1e14 - min(diagD);
739                C = C.add(eye(dimension, dimension).scalarMultiply(tfac));
740                diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac));
741            }
742            diagC = diag(C);
743            diagD = sqrt(diagD); // D contains standard deviations now
744            BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2)
745        }
746    }
747
748    /**
749     * Pushes the current best fitness value in a history queue.
750     *
751     * @param vals History queue.
752     * @param val Current best fitness value.
753     */
754    private static void push(double[] vals, double val) {
755        if (vals.length - 1 > 0) {
756            System.arraycopy(vals, 0, vals, 1, vals.length - 1);
757        }
758        vals[0] = val;
759    }
760
761    /**
762     * Sorts fitness values.
763     *
764     * @param doubles Array of values to be sorted.
765     * @return a sorted array of indices pointing into doubles.
766     */
767    private int[] sortedIndices(final double[] doubles) {
768        final DoubleIndex[] dis = new DoubleIndex[doubles.length];
769        for (int i = 0; i < doubles.length; i++) {
770            dis[i] = new DoubleIndex(doubles[i], i);
771        }
772        Arrays.sort(dis);
773        final int[] indices = new int[doubles.length];
774        for (int i = 0; i < doubles.length; i++) {
775            indices[i] = dis[i].index;
776        }
777        return indices;
778    }
779   /**
780     * Get range of values.
781     *
782     * @param vpPairs Array of valuePenaltyPairs to get range from.
783     * @return a double equal to maximum value minus minimum value.
784     */
785    private double valueRange(final ValuePenaltyPair[] vpPairs) {
786        double max = Double.NEGATIVE_INFINITY;
787        double min = Double.MAX_VALUE;
788        for (ValuePenaltyPair vpPair:vpPairs) {
789            if (vpPair.value > max) {
790                max = vpPair.value;
791            }
792            if (vpPair.value < min) {
793                min = vpPair.value;
794            }
795        }
796        return max-min;
797    }
798
799    /**
800     * Used to sort fitness values. Sorting is always in lower value first
801     * order.
802     */
803    private static class DoubleIndex implements Comparable<DoubleIndex> {
804        /** Value to compare. */
805        private final double value;
806        /** Index into sorted array. */
807        private final int index;
808
809        /**
810         * @param value Value to compare.
811         * @param index Index into sorted array.
812         */
813        DoubleIndex(double value, int index) {
814            this.value = value;
815            this.index = index;
816        }
817
818        /** {@inheritDoc} */
819        @Override
820        public int compareTo(DoubleIndex o) {
821            return Double.compare(value, o.value);
822        }
823
824        /** {@inheritDoc} */
825        @Override
826        public boolean equals(Object other) {
827
828            if (this == other) {
829                return true;
830            }
831
832            if (other instanceof DoubleIndex) {
833                return Double.compare(value, ((DoubleIndex) other).value) == 0;
834            }
835
836            return false;
837        }
838
839        /** {@inheritDoc} */
840        @Override
841        public int hashCode() {
842            long bits = Double.doubleToLongBits(value);
843            return (int) (1438542 ^ (bits >>> 32) ^ bits);
844        }
845    }
846    /**
847     * Stores the value and penalty (for repair of out of bounds point).
848     */
849    private static class ValuePenaltyPair {
850        /** Objective function value. */
851        private double value;
852        /** Penalty value for repair of out out of bounds points. */
853        private double penalty;
854
855        /**
856         * @param value Function value.
857         * @param penalty Out-of-bounds penalty.
858        */
859        ValuePenaltyPair(final double value, final double penalty) {
860            this.value   = value;
861            this.penalty = penalty;
862        }
863    }
864
865
866    /**
867     * Normalizes fitness values to the range [0,1]. Adds a penalty to the
868     * fitness value if out of range.
869     */
870    private class FitnessFunction {
871        /**
872         * Flag indicating whether the objective variables are forced into their
873         * bounds if defined.
874         */
875        private final boolean isRepairMode;
876
877        /** Simple constructor.
878         */
879        FitnessFunction() {
880            isRepairMode = true;
881        }
882
883        /**
884         * @param point Normalized objective variables.
885         * @return the objective value + penalty for violated bounds.
886         */
887        public ValuePenaltyPair value(final double[] point) {
888            double value;
889            double penalty=0.0;
890            if (isRepairMode) {
891                double[] repaired = repair(point);
892                value = CMAESOptimizer.this.computeObjectiveValue(repaired);
893                penalty =  penalty(point, repaired);
894            } else {
895                value = CMAESOptimizer.this.computeObjectiveValue(point);
896            }
897            value = isMinimize ? value : -value;
898            penalty = isMinimize ? penalty : -penalty;
899            return new ValuePenaltyPair(value,penalty);
900        }
901
902        /**
903         * @param x Normalized objective variables.
904         * @return {@code true} if in bounds.
905         */
906        public boolean isFeasible(final double[] x) {
907            final double[] lB = CMAESOptimizer.this.getLowerBound();
908            final double[] uB = CMAESOptimizer.this.getUpperBound();
909
910            for (int i = 0; i < x.length; i++) {
911                if (x[i] < lB[i]) {
912                    return false;
913                }
914                if (x[i] > uB[i]) {
915                    return false;
916                }
917            }
918            return true;
919        }
920
921        /**
922         * @param x Normalized objective variables.
923         * @return the repaired (i.e. all in bounds) objective variables.
924         */
925        private double[] repair(final double[] x) {
926            final double[] lB = CMAESOptimizer.this.getLowerBound();
927            final double[] uB = CMAESOptimizer.this.getUpperBound();
928
929            final double[] repaired = new double[x.length];
930            for (int i = 0; i < x.length; i++) {
931                if (x[i] < lB[i]) {
932                    repaired[i] = lB[i];
933                } else if (x[i] > uB[i]) {
934                    repaired[i] = uB[i];
935                } else {
936                    repaired[i] = x[i];
937                }
938            }
939            return repaired;
940        }
941
942        /**
943         * @param x Normalized objective variables.
944         * @param repaired Repaired objective variables.
945         * @return Penalty value according to the violation of the bounds.
946         */
947        private double penalty(final double[] x, final double[] repaired) {
948            double penalty = 0;
949            for (int i = 0; i < x.length; i++) {
950                double diff = JdkMath.abs(x[i] - repaired[i]);
951                penalty += diff;
952            }
953            return isMinimize ? penalty : -penalty;
954        }
955    }
956
957    // -----Matrix utility functions similar to the Matlab build in functions------
958
959    /**
960     * @param m Input matrix
961     * @return Matrix representing the element-wise logarithm of m.
962     */
963    private static RealMatrix log(final RealMatrix m) {
964        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
965        for (int r = 0; r < m.getRowDimension(); r++) {
966            for (int c = 0; c < m.getColumnDimension(); c++) {
967                d[r][c] = JdkMath.log(m.getEntry(r, c));
968            }
969        }
970        return new Array2DRowRealMatrix(d, false);
971    }
972
973    /**
974     * @param m Input matrix.
975     * @return Matrix representing the element-wise square root of m.
976     */
977    private static RealMatrix sqrt(final RealMatrix m) {
978        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
979        for (int r = 0; r < m.getRowDimension(); r++) {
980            for (int c = 0; c < m.getColumnDimension(); c++) {
981                d[r][c] = JdkMath.sqrt(m.getEntry(r, c));
982            }
983        }
984        return new Array2DRowRealMatrix(d, false);
985    }
986
987    /**
988     * @param m Input matrix.
989     * @return Matrix representing the element-wise square of m.
990     */
991    private static RealMatrix square(final RealMatrix m) {
992        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
993        for (int r = 0; r < m.getRowDimension(); r++) {
994            for (int c = 0; c < m.getColumnDimension(); c++) {
995                double e = m.getEntry(r, c);
996                d[r][c] = e * e;
997            }
998        }
999        return new Array2DRowRealMatrix(d, false);
1000    }
1001
1002    /**
1003     * @param m Input matrix 1.
1004     * @param n Input matrix 2.
1005     * @return the matrix where the elements of m and n are element-wise multiplied.
1006     */
1007    private static RealMatrix times(final RealMatrix m, final RealMatrix n) {
1008        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1009        for (int r = 0; r < m.getRowDimension(); r++) {
1010            for (int c = 0; c < m.getColumnDimension(); c++) {
1011                d[r][c] = m.getEntry(r, c) * n.getEntry(r, c);
1012            }
1013        }
1014        return new Array2DRowRealMatrix(d, false);
1015    }
1016
1017    /**
1018     * @param m Input matrix 1.
1019     * @param n Input matrix 2.
1020     * @return Matrix where the elements of m and n are element-wise divided.
1021     */
1022    private static RealMatrix divide(final RealMatrix m, final RealMatrix n) {
1023        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1024        for (int r = 0; r < m.getRowDimension(); r++) {
1025            for (int c = 0; c < m.getColumnDimension(); c++) {
1026                d[r][c] = m.getEntry(r, c) / n.getEntry(r, c);
1027            }
1028        }
1029        return new Array2DRowRealMatrix(d, false);
1030    }
1031
1032    /**
1033     * @param m Input matrix.
1034     * @param cols Columns to select.
1035     * @return Matrix representing the selected columns.
1036     */
1037    private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) {
1038        final double[][] d = new double[m.getRowDimension()][cols.length];
1039        for (int r = 0; r < m.getRowDimension(); r++) {
1040            for (int c = 0; c < cols.length; c++) {
1041                d[r][c] = m.getEntry(r, cols[c]);
1042            }
1043        }
1044        return new Array2DRowRealMatrix(d, false);
1045    }
1046
1047    /**
1048     * @param m Input matrix.
1049     * @param k Diagonal position.
1050     * @return Upper triangular part of matrix.
1051     */
1052    private static RealMatrix triu(final RealMatrix m, int k) {
1053        final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()];
1054        for (int r = 0; r < m.getRowDimension(); r++) {
1055            for (int c = 0; c < m.getColumnDimension(); c++) {
1056                d[r][c] = r <= c - k ? m.getEntry(r, c) : 0;
1057            }
1058        }
1059        return new Array2DRowRealMatrix(d, false);
1060    }
1061
1062    /**
1063     * @param m Input matrix.
1064     * @return Row matrix representing the sums of the rows.
1065     */
1066    private static RealMatrix sumRows(final RealMatrix m) {
1067        final double[][] d = new double[1][m.getColumnDimension()];
1068        for (int c = 0; c < m.getColumnDimension(); c++) {
1069            double sum = 0;
1070            for (int r = 0; r < m.getRowDimension(); r++) {
1071                sum += m.getEntry(r, c);
1072            }
1073            d[0][c] = sum;
1074        }
1075        return new Array2DRowRealMatrix(d, false);
1076    }
1077
1078    /**
1079     * @param m Input matrix.
1080     * @return the diagonal n-by-n matrix if m is a column matrix or the column
1081     * matrix representing the diagonal if m is a n-by-n matrix.
1082     */
1083    private static RealMatrix diag(final RealMatrix m) {
1084        if (m.getColumnDimension() == 1) {
1085            final double[][] d = new double[m.getRowDimension()][m.getRowDimension()];
1086            for (int i = 0; i < m.getRowDimension(); i++) {
1087                d[i][i] = m.getEntry(i, 0);
1088            }
1089            return new Array2DRowRealMatrix(d, false);
1090        } else {
1091            final double[][] d = new double[m.getRowDimension()][1];
1092            for (int i = 0; i < m.getColumnDimension(); i++) {
1093                d[i][0] = m.getEntry(i, i);
1094            }
1095            return new Array2DRowRealMatrix(d, false);
1096        }
1097    }
1098
1099    /**
1100     * Copies a column from m1 to m2.
1101     *
1102     * @param m1 Source matrix.
1103     * @param col1 Source column.
1104     * @param m2 Target matrix.
1105     * @param col2 Target column.
1106     */
1107    private static void copyColumn(final RealMatrix m1, int col1,
1108                                   RealMatrix m2, int col2) {
1109        for (int i = 0; i < m1.getRowDimension(); i++) {
1110            m2.setEntry(i, col2, m1.getEntry(i, col1));
1111        }
1112    }
1113
1114    /**
1115     * @param n Number of rows.
1116     * @param m Number of columns.
1117     * @return n-by-m matrix filled with 1.
1118     */
1119    private static RealMatrix ones(int n, int m) {
1120        final double[][] d = new double[n][m];
1121        for (int r = 0; r < n; r++) {
1122            Arrays.fill(d[r], 1);
1123        }
1124        return new Array2DRowRealMatrix(d, false);
1125    }
1126
1127    /**
1128     * @param n Number of rows.
1129     * @param m Number of columns.
1130     * @return n-by-m matrix of 0 values out of diagonal, and 1 values on
1131     * the diagonal.
1132     */
1133    private static RealMatrix eye(int n, int m) {
1134        final double[][] d = new double[n][m];
1135        for (int r = 0; r < n; r++) {
1136            if (r < m) {
1137                d[r][r] = 1;
1138            }
1139        }
1140        return new Array2DRowRealMatrix(d, false);
1141    }
1142
1143    /**
1144     * @param n Number of rows.
1145     * @param m Number of columns.
1146     * @return n-by-m matrix of zero values.
1147     */
1148    private static RealMatrix zeros(int n, int m) {
1149        return new Array2DRowRealMatrix(n, m);
1150    }
1151
1152    /**
1153     * @param mat Input matrix.
1154     * @param n Number of row replicates.
1155     * @param m Number of column replicates.
1156     * @return a matrix which replicates the input matrix in both directions.
1157     */
1158    private static RealMatrix repmat(final RealMatrix mat, int n, int m) {
1159        final int rd = mat.getRowDimension();
1160        final int cd = mat.getColumnDimension();
1161        final double[][] d = new double[n * rd][m * cd];
1162        for (int r = 0; r < n * rd; r++) {
1163            for (int c = 0; c < m * cd; c++) {
1164                d[r][c] = mat.getEntry(r % rd, c % cd);
1165            }
1166        }
1167        return new Array2DRowRealMatrix(d, false);
1168    }
1169
1170    /**
1171     * @param start Start value.
1172     * @param end End value.
1173     * @param step Step size.
1174     * @return a sequence as column matrix.
1175     */
1176    private static RealMatrix sequence(double start, double end, double step) {
1177        final int size = (int) ((end - start) / step + 1);
1178        final double[][] d = new double[size][1];
1179        double value = start;
1180        for (int r = 0; r < size; r++) {
1181            d[r][0] = value;
1182            value += step;
1183        }
1184        return new Array2DRowRealMatrix(d, false);
1185    }
1186
1187    /**
1188     * @param m Input matrix.
1189     * @return the maximum of the matrix element values.
1190     */
1191    private static double max(final RealMatrix m) {
1192        double max = -Double.MAX_VALUE;
1193        for (int r = 0; r < m.getRowDimension(); r++) {
1194            for (int c = 0; c < m.getColumnDimension(); c++) {
1195                double e = m.getEntry(r, c);
1196                if (max < e) {
1197                    max = e;
1198                }
1199            }
1200        }
1201        return max;
1202    }
1203
1204    /**
1205     * @param m Input matrix.
1206     * @return the minimum of the matrix element values.
1207     */
1208    private static double min(final RealMatrix m) {
1209        double min = Double.MAX_VALUE;
1210        for (int r = 0; r < m.getRowDimension(); r++) {
1211            for (int c = 0; c < m.getColumnDimension(); c++) {
1212                double e = m.getEntry(r, c);
1213                if (min > e) {
1214                    min = e;
1215                }
1216            }
1217        }
1218        return min;
1219    }
1220
1221    /**
1222     * @param m Input array.
1223     * @return the maximum of the array values.
1224     */
1225    private static double max(final double[] m) {
1226        double max = -Double.MAX_VALUE;
1227        for (int r = 0; r < m.length; r++) {
1228            if (max < m[r]) {
1229                max = m[r];
1230            }
1231        }
1232        return max;
1233    }
1234
1235    /**
1236     * @param m Input array.
1237     * @return the minimum of the array values.
1238     */
1239    private static double min(final double[] m) {
1240        double min = Double.MAX_VALUE;
1241        for (int r = 0; r < m.length; r++) {
1242            if (min > m[r]) {
1243                min = m[r];
1244            }
1245        }
1246        return min;
1247    }
1248
1249    /**
1250     * @param indices Input index array.
1251     * @return the inverse of the mapping defined by indices.
1252     */
1253    private static int[] inverse(final int[] indices) {
1254        final int[] inverse = new int[indices.length];
1255        for (int i = 0; i < indices.length; i++) {
1256            inverse[indices[i]] = i;
1257        }
1258        return inverse;
1259    }
1260
1261    /**
1262     * @param indices Input index array.
1263     * @return the indices in inverse order (last is first).
1264     */
1265    private static int[] reverse(final int[] indices) {
1266        final int[] reverse = new int[indices.length];
1267        for (int i = 0; i < indices.length; i++) {
1268            reverse[i] = indices[indices.length - i - 1];
1269        }
1270        return reverse;
1271    }
1272
1273    /**
1274     * @param size Length of random array.
1275     * @return an array of Gaussian random numbers.
1276     */
1277    private double[] randn(int size) {
1278        final double[] randn = new double[size];
1279        for (int i = 0; i < size; i++) {
1280            randn[i] = random.sample();
1281        }
1282        return randn;
1283    }
1284
1285    /**
1286     * @param size Number of rows.
1287     * @param popSize Population size.
1288     * @return a 2-dimensional matrix of Gaussian random numbers.
1289     */
1290    private RealMatrix randn1(int size, int popSize) {
1291        final double[][] d = new double[size][popSize];
1292        for (int r = 0; r < size; r++) {
1293            for (int c = 0; c < popSize; c++) {
1294                d[r][c] = random.sample();
1295            }
1296        }
1297        return new Array2DRowRealMatrix(d, false);
1298    }
1299}