001 /* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017 018 package org.apache.commons.math3.optim.nonlinear.scalar.noderiv; 019 020 import java.util.ArrayList; 021 import java.util.Arrays; 022 import java.util.List; 023 import org.apache.commons.math3.exception.DimensionMismatchException; 024 import org.apache.commons.math3.exception.NotPositiveException; 025 import org.apache.commons.math3.exception.NotStrictlyPositiveException; 026 import org.apache.commons.math3.exception.OutOfRangeException; 027 import org.apache.commons.math3.exception.TooManyEvaluationsException; 028 import org.apache.commons.math3.linear.Array2DRowRealMatrix; 029 import org.apache.commons.math3.linear.EigenDecomposition; 030 import org.apache.commons.math3.linear.MatrixUtils; 031 import org.apache.commons.math3.linear.RealMatrix; 032 import org.apache.commons.math3.optim.ConvergenceChecker; 033 import org.apache.commons.math3.optim.OptimizationData; 034 import org.apache.commons.math3.optim.nonlinear.scalar.GoalType; 035 import org.apache.commons.math3.optim.PointValuePair; 036 import org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer; 037 import org.apache.commons.math3.random.RandomGenerator; 038 import org.apache.commons.math3.util.MathArrays; 039 040 /** 041 * <p>An implementation of the active Covariance Matrix Adaptation Evolution Strategy (CMA-ES) 042 * for non-linear, non-convex, non-smooth, global function minimization. 043 * The CMA-Evolution Strategy (CMA-ES) is a reliable stochastic optimization method 044 * which should be applied if derivative-based methods, e.g. quasi-Newton BFGS or 045 * conjugate gradient, fail due to a rugged search landscape (e.g. noise, local 046 * optima, outlier, etc.) of the objective function. Like a 047 * quasi-Newton method, the CMA-ES learns and applies a variable metric 048 * on the underlying search space. Unlike a quasi-Newton method, the 049 * CMA-ES neither estimates nor uses gradients, making it considerably more 050 * reliable in terms of finding a good, or even close to optimal, solution.</p> 051 * 052 * <p>In general, on smooth objective functions the CMA-ES is roughly ten times 053 * slower than BFGS (counting objective function evaluations, no gradients provided). 054 * For up to <math>N=10</math> variables also the derivative-free simplex 055 * direct search method (Nelder and Mead) can be faster, but it is 056 * far less reliable than CMA-ES.</p> 057 * 058 * <p>The CMA-ES is particularly well suited for non-separable 059 * and/or badly conditioned problems. To observe the advantage of CMA compared 060 * to a conventional evolution strategy, it will usually take about 061 * <math>30 N</math> function evaluations. On difficult problems the complete 062 * optimization (a single run) is expected to take <em>roughly</em> between 063 * <math>30 N</math> and <math>300 N<sup>2</sup></math> 064 * function evaluations.</p> 065 * 066 * <p>This implementation is translated and adapted from the Matlab version 067 * of the CMA-ES algorithm as implemented in module {@code cmaes.m} version 3.51.</p> 068 * 069 * For more information, please refer to the following links: 070 * <ul> 071 * <li><a href="http://www.lri.fr/~hansen/cmaes.m">Matlab code</a></li> 072 * <li><a href="http://www.lri.fr/~hansen/cmaesintro.html">Introduction to CMA-ES</a></li> 073 * <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li> 074 * </ul> 075 * 076 * @version $Id: CMAESOptimizer.java 1462503 2013-03-29 15:48:27Z luc $ 077 * @since 3.0 078 */ 079 public class CMAESOptimizer 080 extends MultivariateOptimizer { 081 // global search parameters 082 /** 083 * Population size, offspring number. The primary strategy parameter to play 084 * with, which can be increased from its default value. Increasing the 085 * population size improves global search properties in exchange to speed. 086 * Speed decreases, as a rule, at most linearly with increasing population 087 * size. It is advisable to begin with the default small population size. 088 */ 089 private int lambda; // population size 090 /** 091 * Covariance update mechanism, default is active CMA. isActiveCMA = true 092 * turns on "active CMA" with a negative update of the covariance matrix and 093 * checks for positive definiteness. OPTS.CMA.active = 2 does not check for 094 * pos. def. and is numerically faster. Active CMA usually speeds up the 095 * adaptation. 096 */ 097 private final boolean isActiveCMA; 098 /** 099 * Determines how often a new random offspring is generated in case it is 100 * not feasible / beyond the defined limits, default is 0. 101 */ 102 private final int checkFeasableCount; 103 /** 104 * @see Sigma 105 */ 106 private double[] inputSigma; 107 /** Number of objective variables/problem dimension */ 108 private int dimension; 109 /** 110 * Defines the number of initial iterations, where the covariance matrix 111 * remains diagonal and the algorithm has internally linear time complexity. 112 * diagonalOnly = 1 means keeping the covariance matrix always diagonal and 113 * this setting also exhibits linear space complexity. This can be 114 * particularly useful for dimension > 100. 115 * @see <a href="http://hal.archives-ouvertes.fr/inria-00287367/en">A Simple Modification in CMA-ES</a> 116 */ 117 private int diagonalOnly; 118 /** Number of objective variables/problem dimension */ 119 private boolean isMinimize = true; 120 /** Indicates whether statistic data is collected. */ 121 private final boolean generateStatistics; 122 123 // termination criteria 124 /** Maximal number of iterations allowed. */ 125 private final int maxIterations; 126 /** Limit for fitness value. */ 127 private final double stopFitness; 128 /** Stop if x-changes larger stopTolUpX. */ 129 private double stopTolUpX; 130 /** Stop if x-change smaller stopTolX. */ 131 private double stopTolX; 132 /** Stop if fun-changes smaller stopTolFun. */ 133 private double stopTolFun; 134 /** Stop if back fun-changes smaller stopTolHistFun. */ 135 private double stopTolHistFun; 136 137 // selection strategy parameters 138 /** Number of parents/points for recombination. */ 139 private int mu; // 140 /** log(mu + 0.5), stored for efficiency. */ 141 private double logMu2; 142 /** Array for weighted recombination. */ 143 private RealMatrix weights; 144 /** Variance-effectiveness of sum w_i x_i. */ 145 private double mueff; // 146 147 // dynamic strategy parameters and constants 148 /** Overall standard deviation - search volume. */ 149 private double sigma; 150 /** Cumulation constant. */ 151 private double cc; 152 /** Cumulation constant for step-size. */ 153 private double cs; 154 /** Damping for step-size. */ 155 private double damps; 156 /** Learning rate for rank-one update. */ 157 private double ccov1; 158 /** Learning rate for rank-mu update' */ 159 private double ccovmu; 160 /** Expectation of ||N(0,I)|| == norm(randn(N,1)). */ 161 private double chiN; 162 /** Learning rate for rank-one update - diagonalOnly */ 163 private double ccov1Sep; 164 /** Learning rate for rank-mu update - diagonalOnly */ 165 private double ccovmuSep; 166 167 // CMA internal values - updated each generation 168 /** Objective variables. */ 169 private RealMatrix xmean; 170 /** Evolution path. */ 171 private RealMatrix pc; 172 /** Evolution path for sigma. */ 173 private RealMatrix ps; 174 /** Norm of ps, stored for efficiency. */ 175 private double normps; 176 /** Coordinate system. */ 177 private RealMatrix B; 178 /** Scaling. */ 179 private RealMatrix D; 180 /** B*D, stored for efficiency. */ 181 private RealMatrix BD; 182 /** Diagonal of sqrt(D), stored for efficiency. */ 183 private RealMatrix diagD; 184 /** Covariance matrix. */ 185 private RealMatrix C; 186 /** Diagonal of C, used for diagonalOnly. */ 187 private RealMatrix diagC; 188 /** Number of iterations already performed. */ 189 private int iterations; 190 191 /** History queue of best values. */ 192 private double[] fitnessHistory; 193 /** Size of history queue of best values. */ 194 private int historySize; 195 196 /** Random generator. */ 197 private final RandomGenerator random; 198 199 /** History of sigma values. */ 200 private final List<Double> statisticsSigmaHistory = new ArrayList<Double>(); 201 /** History of mean matrix. */ 202 private final List<RealMatrix> statisticsMeanHistory = new ArrayList<RealMatrix>(); 203 /** History of fitness values. */ 204 private final List<Double> statisticsFitnessHistory = new ArrayList<Double>(); 205 /** History of D matrix. */ 206 private final List<RealMatrix> statisticsDHistory = new ArrayList<RealMatrix>(); 207 208 /** 209 * @param maxIterations Maximal number of iterations. 210 * @param stopFitness Whether to stop if objective function value is smaller than 211 * {@code stopFitness}. 212 * @param isActiveCMA Chooses the covariance matrix update method. 213 * @param diagonalOnly Number of initial iterations, where the covariance matrix 214 * remains diagonal. 215 * @param checkFeasableCount Determines how often new random objective variables are 216 * generated in case they are out of bounds. 217 * @param random Random generator. 218 * @param generateStatistics Whether statistic data is collected. 219 * @param checker Convergence checker. 220 * 221 * @since 3.1 222 */ 223 public CMAESOptimizer(int maxIterations, 224 double stopFitness, 225 boolean isActiveCMA, 226 int diagonalOnly, 227 int checkFeasableCount, 228 RandomGenerator random, 229 boolean generateStatistics, 230 ConvergenceChecker<PointValuePair> checker) { 231 super(checker); 232 this.maxIterations = maxIterations; 233 this.stopFitness = stopFitness; 234 this.isActiveCMA = isActiveCMA; 235 this.diagonalOnly = diagonalOnly; 236 this.checkFeasableCount = checkFeasableCount; 237 this.random = random; 238 this.generateStatistics = generateStatistics; 239 } 240 241 /** 242 * @return History of sigma values. 243 */ 244 public List<Double> getStatisticsSigmaHistory() { 245 return statisticsSigmaHistory; 246 } 247 248 /** 249 * @return History of mean matrix. 250 */ 251 public List<RealMatrix> getStatisticsMeanHistory() { 252 return statisticsMeanHistory; 253 } 254 255 /** 256 * @return History of fitness values. 257 */ 258 public List<Double> getStatisticsFitnessHistory() { 259 return statisticsFitnessHistory; 260 } 261 262 /** 263 * @return History of D matrix. 264 */ 265 public List<RealMatrix> getStatisticsDHistory() { 266 return statisticsDHistory; 267 } 268 269 /** 270 * Input sigma values. 271 * They define the initial coordinate-wise standard deviations for 272 * sampling new search points around the initial guess. 273 * It is suggested to set them to the estimated distance from the 274 * initial to the desired optimum. 275 * Small values induce the search to be more local (and very small 276 * values are more likely to find a local optimum close to the initial 277 * guess). 278 * Too small values might however lead to early termination. 279 */ 280 public static class Sigma implements OptimizationData { 281 /** Sigma values. */ 282 private final double[] sigma; 283 284 /** 285 * @param s Sigma values. 286 * @throws NotPositiveException if any of the array entries is smaller 287 * than zero. 288 */ 289 public Sigma(double[] s) 290 throws NotPositiveException { 291 for (int i = 0; i < s.length; i++) { 292 if (s[i] < 0) { 293 throw new NotPositiveException(s[i]); 294 } 295 } 296 297 sigma = s.clone(); 298 } 299 300 /** 301 * @return the sigma values. 302 */ 303 public double[] getSigma() { 304 return sigma.clone(); 305 } 306 } 307 308 /** 309 * Population size. 310 * The number of offspring is the primary strategy parameter. 311 * In the absence of better clues, a good default could be an 312 * integer close to {@code 4 + 3 ln(n)}, where {@code n} is the 313 * number of optimized parameters. 314 * Increasing the population size improves global search properties 315 * at the expense of speed (which in general decreases at most 316 * linearly with increasing population size). 317 */ 318 public static class PopulationSize implements OptimizationData { 319 /** Population size. */ 320 private final int lambda; 321 322 /** 323 * @param size Population size. 324 * @throws NotStrictlyPositiveException if {@code size <= 0}. 325 */ 326 public PopulationSize(int size) 327 throws NotStrictlyPositiveException { 328 if (size <= 0) { 329 throw new NotStrictlyPositiveException(size); 330 } 331 lambda = size; 332 } 333 334 /** 335 * @return the population size. 336 */ 337 public int getPopulationSize() { 338 return lambda; 339 } 340 } 341 342 /** 343 * {@inheritDoc} 344 * 345 * @param optData Optimization data. In addition to those documented in 346 * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[]) 347 * MultivariateOptimizer}, this method will register the following data: 348 * <ul> 349 * <li>{@link Sigma}</li> 350 * <li>{@link PopulationSize}</li> 351 * </ul> 352 * @return {@inheritDoc} 353 * @throws TooManyEvaluationsException if the maximal number of 354 * evaluations is exceeded. 355 * @throws DimensionMismatchException if the initial guess, target, and weight 356 * arguments have inconsistent dimensions. 357 */ 358 @Override 359 public PointValuePair optimize(OptimizationData... optData) 360 throws TooManyEvaluationsException, 361 DimensionMismatchException { 362 // Set up base class and perform computation. 363 return super.optimize(optData); 364 } 365 366 /** {@inheritDoc} */ 367 @Override 368 protected PointValuePair doOptimize() { 369 // -------------------- Initialization -------------------------------- 370 isMinimize = getGoalType().equals(GoalType.MINIMIZE); 371 final FitnessFunction fitfun = new FitnessFunction(); 372 final double[] guess = getStartPoint(); 373 // number of objective variables/problem dimension 374 dimension = guess.length; 375 initializeCMA(guess); 376 iterations = 0; 377 double bestValue = fitfun.value(guess); 378 push(fitnessHistory, bestValue); 379 PointValuePair optimum 380 = new PointValuePair(getStartPoint(), 381 isMinimize ? bestValue : -bestValue); 382 PointValuePair lastResult = null; 383 384 // -------------------- Generation Loop -------------------------------- 385 386 generationLoop: 387 for (iterations = 1; iterations <= maxIterations; iterations++) { 388 incrementIterationCount(); 389 390 // Generate and evaluate lambda offspring 391 final RealMatrix arz = randn1(dimension, lambda); 392 final RealMatrix arx = zeros(dimension, lambda); 393 final double[] fitness = new double[lambda]; 394 // generate random offspring 395 for (int k = 0; k < lambda; k++) { 396 RealMatrix arxk = null; 397 for (int i = 0; i < checkFeasableCount + 1; i++) { 398 if (diagonalOnly <= 0) { 399 arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k)) 400 .scalarMultiply(sigma)); // m + sig * Normal(0,C) 401 } else { 402 arxk = xmean.add(times(diagD,arz.getColumnMatrix(k)) 403 .scalarMultiply(sigma)); 404 } 405 if (i >= checkFeasableCount || 406 fitfun.isFeasible(arxk.getColumn(0))) { 407 break; 408 } 409 // regenerate random arguments for row 410 arz.setColumn(k, randn(dimension)); 411 } 412 copyColumn(arxk, 0, arx, k); 413 try { 414 fitness[k] = fitfun.value(arx.getColumn(k)); // compute fitness 415 } catch (TooManyEvaluationsException e) { 416 break generationLoop; 417 } 418 } 419 // Sort by fitness and compute weighted mean into xmean 420 final int[] arindex = sortedIndices(fitness); 421 // Calculate new xmean, this is selection and recombination 422 final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3) 423 final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu)); 424 xmean = bestArx.multiply(weights); 425 final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu)); 426 final RealMatrix zmean = bestArz.multiply(weights); 427 final boolean hsig = updateEvolutionPaths(zmean, xold); 428 if (diagonalOnly <= 0) { 429 updateCovariance(hsig, bestArx, arz, arindex, xold); 430 } else { 431 updateCovarianceDiagonalOnly(hsig, bestArz); 432 } 433 // Adapt step size sigma - Eq. (5) 434 sigma *= Math.exp(Math.min(1, (normps/chiN - 1) * cs / damps)); 435 final double bestFitness = fitness[arindex[0]]; 436 final double worstFitness = fitness[arindex[arindex.length - 1]]; 437 if (bestValue > bestFitness) { 438 bestValue = bestFitness; 439 lastResult = optimum; 440 optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)), 441 isMinimize ? bestFitness : -bestFitness); 442 if (getConvergenceChecker() != null && lastResult != null && 443 getConvergenceChecker().converged(iterations, optimum, lastResult)) { 444 break generationLoop; 445 } 446 } 447 // handle termination criteria 448 // Break, if fitness is good enough 449 if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) { 450 break generationLoop; 451 } 452 final double[] sqrtDiagC = sqrt(diagC).getColumn(0); 453 final double[] pcCol = pc.getColumn(0); 454 for (int i = 0; i < dimension; i++) { 455 if (sigma * Math.max(Math.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) { 456 break; 457 } 458 if (i >= dimension - 1) { 459 break generationLoop; 460 } 461 } 462 for (int i = 0; i < dimension; i++) { 463 if (sigma * sqrtDiagC[i] > stopTolUpX) { 464 break generationLoop; 465 } 466 } 467 final double historyBest = min(fitnessHistory); 468 final double historyWorst = max(fitnessHistory); 469 if (iterations > 2 && 470 Math.max(historyWorst, worstFitness) - 471 Math.min(historyBest, bestFitness) < stopTolFun) { 472 break generationLoop; 473 } 474 if (iterations > fitnessHistory.length && 475 historyWorst - historyBest < stopTolHistFun) { 476 break generationLoop; 477 } 478 // condition number of the covariance matrix exceeds 1e14 479 if (max(diagD) / min(diagD) > 1e7) { 480 break generationLoop; 481 } 482 // user defined termination 483 if (getConvergenceChecker() != null) { 484 final PointValuePair current 485 = new PointValuePair(bestArx.getColumn(0), 486 isMinimize ? bestFitness : -bestFitness); 487 if (lastResult != null && 488 getConvergenceChecker().converged(iterations, current, lastResult)) { 489 break generationLoop; 490 } 491 lastResult = current; 492 } 493 // Adjust step size in case of equal function values (flat fitness) 494 if (bestValue == fitness[arindex[(int)(0.1+lambda/4.)]]) { 495 sigma = sigma * Math.exp(0.2 + cs / damps); 496 } 497 if (iterations > 2 && Math.max(historyWorst, bestFitness) - 498 Math.min(historyBest, bestFitness) == 0) { 499 sigma = sigma * Math.exp(0.2 + cs / damps); 500 } 501 // store best in history 502 push(fitnessHistory,bestFitness); 503 fitfun.setValueRange(worstFitness-bestFitness); 504 if (generateStatistics) { 505 statisticsSigmaHistory.add(sigma); 506 statisticsFitnessHistory.add(bestFitness); 507 statisticsMeanHistory.add(xmean.transpose()); 508 statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5)); 509 } 510 } 511 return optimum; 512 } 513 514 /** 515 * Scans the list of (required and optional) optimization data that 516 * characterize the problem. 517 * 518 * @param optData Optimization data. The following data will be looked for: 519 * <ul> 520 * <li>{@link Sigma}</li> 521 * <li>{@link PopulationSize}</li> 522 * </ul> 523 */ 524 @Override 525 protected void parseOptimizationData(OptimizationData... optData) { 526 // Allow base class to register its own data. 527 super.parseOptimizationData(optData); 528 529 // The existing values (as set by the previous call) are reused if 530 // not provided in the argument list. 531 for (OptimizationData data : optData) { 532 if (data instanceof Sigma) { 533 inputSigma = ((Sigma) data).getSigma(); 534 continue; 535 } 536 if (data instanceof PopulationSize) { 537 lambda = ((PopulationSize) data).getPopulationSize(); 538 continue; 539 } 540 } 541 542 checkParameters(); 543 } 544 545 /** 546 * Checks dimensions and values of boundaries and inputSigma if defined. 547 */ 548 private void checkParameters() { 549 final double[] init = getStartPoint(); 550 final double[] lB = getLowerBound(); 551 final double[] uB = getUpperBound(); 552 553 if (inputSigma != null) { 554 if (inputSigma.length != init.length) { 555 throw new DimensionMismatchException(inputSigma.length, init.length); 556 } 557 for (int i = 0; i < init.length; i++) { 558 if (inputSigma[i] > uB[i] - lB[i]) { 559 throw new OutOfRangeException(inputSigma[i], 0, uB[i] - lB[i]); 560 } 561 } 562 } 563 } 564 565 /** 566 * Initialization of the dynamic search parameters 567 * 568 * @param guess Initial guess for the arguments of the fitness function. 569 */ 570 private void initializeCMA(double[] guess) { 571 if (lambda <= 0) { 572 throw new NotStrictlyPositiveException(lambda); 573 } 574 // initialize sigma 575 final double[][] sigmaArray = new double[guess.length][1]; 576 for (int i = 0; i < guess.length; i++) { 577 sigmaArray[i][0] = inputSigma[i]; 578 } 579 final RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false); 580 sigma = max(insigma); // overall standard deviation 581 582 // initialize termination criteria 583 stopTolUpX = 1e3 * max(insigma); 584 stopTolX = 1e-11 * max(insigma); 585 stopTolFun = 1e-12; 586 stopTolHistFun = 1e-13; 587 588 // initialize selection strategy parameters 589 mu = lambda / 2; // number of parents/points for recombination 590 logMu2 = Math.log(mu + 0.5); 591 weights = log(sequence(1, mu, 1)).scalarMultiply(-1).scalarAdd(logMu2); 592 double sumw = 0; 593 double sumwq = 0; 594 for (int i = 0; i < mu; i++) { 595 double w = weights.getEntry(i, 0); 596 sumw += w; 597 sumwq += w * w; 598 } 599 weights = weights.scalarMultiply(1 / sumw); 600 mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i 601 602 // initialize dynamic strategy parameters and constants 603 cc = (4 + mueff / dimension) / 604 (dimension + 4 + 2 * mueff / dimension); 605 cs = (mueff + 2) / (dimension + mueff + 3.); 606 damps = (1 + 2 * Math.max(0, Math.sqrt((mueff - 1) / 607 (dimension + 1)) - 1)) * 608 Math.max(0.3, 609 1 - dimension / (1e-6 + maxIterations)) + cs; // minor increment 610 ccov1 = 2 / ((dimension + 1.3) * (dimension + 1.3) + mueff); 611 ccovmu = Math.min(1 - ccov1, 2 * (mueff - 2 + 1 / mueff) / 612 ((dimension + 2) * (dimension + 2) + mueff)); 613 ccov1Sep = Math.min(1, ccov1 * (dimension + 1.5) / 3); 614 ccovmuSep = Math.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3); 615 chiN = Math.sqrt(dimension) * 616 (1 - 1 / ((double) 4 * dimension) + 1 / ((double) 21 * dimension * dimension)); 617 // intialize CMA internal values - updated each generation 618 xmean = MatrixUtils.createColumnRealMatrix(guess); // objective variables 619 diagD = insigma.scalarMultiply(1 / sigma); 620 diagC = square(diagD); 621 pc = zeros(dimension, 1); // evolution paths for C and sigma 622 ps = zeros(dimension, 1); // B defines the coordinate system 623 normps = ps.getFrobeniusNorm(); 624 625 B = eye(dimension, dimension); 626 D = ones(dimension, 1); // diagonal D defines the scaling 627 BD = times(B, repmat(diagD.transpose(), dimension, 1)); 628 C = B.multiply(diag(square(D)).multiply(B.transpose())); // covariance 629 historySize = 10 + (int) (3 * 10 * dimension / (double) lambda); 630 fitnessHistory = new double[historySize]; // history of fitness values 631 for (int i = 0; i < historySize; i++) { 632 fitnessHistory[i] = Double.MAX_VALUE; 633 } 634 } 635 636 /** 637 * Update of the evolution paths ps and pc. 638 * 639 * @param zmean Weighted row matrix of the gaussian random numbers generating 640 * the current offspring. 641 * @param xold xmean matrix of the previous generation. 642 * @return hsig flag indicating a small correction. 643 */ 644 private boolean updateEvolutionPaths(RealMatrix zmean, RealMatrix xold) { 645 ps = ps.scalarMultiply(1 - cs).add( 646 B.multiply(zmean).scalarMultiply( 647 Math.sqrt(cs * (2 - cs) * mueff))); 648 normps = ps.getFrobeniusNorm(); 649 final boolean hsig = normps / 650 Math.sqrt(1 - Math.pow(1 - cs, 2 * iterations)) / 651 chiN < 1.4 + 2 / ((double) dimension + 1); 652 pc = pc.scalarMultiply(1 - cc); 653 if (hsig) { 654 pc = pc.add(xmean.subtract(xold).scalarMultiply(Math.sqrt(cc * (2 - cc) * mueff) / sigma)); 655 } 656 return hsig; 657 } 658 659 /** 660 * Update of the covariance matrix C for diagonalOnly > 0 661 * 662 * @param hsig Flag indicating a small correction. 663 * @param bestArz Fitness-sorted matrix of the gaussian random values of the 664 * current offspring. 665 */ 666 private void updateCovarianceDiagonalOnly(boolean hsig, 667 final RealMatrix bestArz) { 668 // minor correction if hsig==false 669 double oldFac = hsig ? 0 : ccov1Sep * cc * (2 - cc); 670 oldFac += 1 - ccov1Sep - ccovmuSep; 671 diagC = diagC.scalarMultiply(oldFac) // regard old matrix 672 .add(square(pc).scalarMultiply(ccov1Sep)) // plus rank one update 673 .add((times(diagC, square(bestArz).multiply(weights))) // plus rank mu update 674 .scalarMultiply(ccovmuSep)); 675 diagD = sqrt(diagC); // replaces eig(C) 676 if (diagonalOnly > 1 && 677 iterations > diagonalOnly) { 678 // full covariance matrix from now on 679 diagonalOnly = 0; 680 B = eye(dimension, dimension); 681 BD = diag(diagD); 682 C = diag(diagC); 683 } 684 } 685 686 /** 687 * Update of the covariance matrix C. 688 * 689 * @param hsig Flag indicating a small correction. 690 * @param bestArx Fitness-sorted matrix of the argument vectors producing the 691 * current offspring. 692 * @param arz Unsorted matrix containing the gaussian random values of the 693 * current offspring. 694 * @param arindex Indices indicating the fitness-order of the current offspring. 695 * @param xold xmean matrix of the previous generation. 696 */ 697 private void updateCovariance(boolean hsig, final RealMatrix bestArx, 698 final RealMatrix arz, final int[] arindex, 699 final RealMatrix xold) { 700 double negccov = 0; 701 if (ccov1 + ccovmu > 0) { 702 final RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu)) 703 .scalarMultiply(1 / sigma); // mu difference vectors 704 final RealMatrix roneu = pc.multiply(pc.transpose()) 705 .scalarMultiply(ccov1); // rank one update 706 // minor correction if hsig==false 707 double oldFac = hsig ? 0 : ccov1 * cc * (2 - cc); 708 oldFac += 1 - ccov1 - ccovmu; 709 if (isActiveCMA) { 710 // Adapt covariance matrix C active CMA 711 negccov = (1 - ccovmu) * 0.25 * mueff / 712 (Math.pow(dimension + 2, 1.5) + 2 * mueff); 713 // keep at least 0.66 in all directions, small popsize are most 714 // critical 715 final double negminresidualvariance = 0.66; 716 // where to make up for the variance loss 717 final double negalphaold = 0.5; 718 // prepare vectors, compute negative updating matrix Cneg 719 final int[] arReverseIndex = reverse(arindex); 720 RealMatrix arzneg = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu)); 721 RealMatrix arnorms = sqrt(sumRows(square(arzneg))); 722 final int[] idxnorms = sortedIndices(arnorms.getRow(0)); 723 final RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms); 724 final int[] idxReverse = reverse(idxnorms); 725 final RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse); 726 arnorms = divide(arnormsReverse, arnormsSorted); 727 final int[] idxInv = inverse(idxnorms); 728 final RealMatrix arnormsInv = selectColumns(arnorms, idxInv); 729 // check and set learning rate negccov 730 final double negcovMax = (1 - negminresidualvariance) / 731 square(arnormsInv).multiply(weights).getEntry(0, 0); 732 if (negccov > negcovMax) { 733 negccov = negcovMax; 734 } 735 arzneg = times(arzneg, repmat(arnormsInv, dimension, 1)); 736 final RealMatrix artmp = BD.multiply(arzneg); 737 final RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(artmp.transpose()); 738 oldFac += negalphaold * negccov; 739 C = C.scalarMultiply(oldFac) 740 .add(roneu) // regard old matrix 741 .add(arpos.scalarMultiply( // plus rank one update 742 ccovmu + (1 - negalphaold) * negccov) // plus rank mu update 743 .multiply(times(repmat(weights, 1, dimension), 744 arpos.transpose()))) 745 .subtract(Cneg.scalarMultiply(negccov)); 746 } else { 747 // Adapt covariance matrix C - nonactive 748 C = C.scalarMultiply(oldFac) // regard old matrix 749 .add(roneu) // plus rank one update 750 .add(arpos.scalarMultiply(ccovmu) // plus rank mu update 751 .multiply(times(repmat(weights, 1, dimension), 752 arpos.transpose()))); 753 } 754 } 755 updateBD(negccov); 756 } 757 758 /** 759 * Update B and D from C. 760 * 761 * @param negccov Negative covariance factor. 762 */ 763 private void updateBD(double negccov) { 764 if (ccov1 + ccovmu + negccov > 0 && 765 (iterations % 1. / (ccov1 + ccovmu + negccov) / dimension / 10.) < 1) { 766 // to achieve O(N^2) 767 C = triu(C, 0).add(triu(C, 1).transpose()); 768 // enforce symmetry to prevent complex numbers 769 final EigenDecomposition eig = new EigenDecomposition(C); 770 B = eig.getV(); // eigen decomposition, B==normalized eigenvectors 771 D = eig.getD(); 772 diagD = diag(D); 773 if (min(diagD) <= 0) { 774 for (int i = 0; i < dimension; i++) { 775 if (diagD.getEntry(i, 0) < 0) { 776 diagD.setEntry(i, 0, 0); 777 } 778 } 779 final double tfac = max(diagD) / 1e14; 780 C = C.add(eye(dimension, dimension).scalarMultiply(tfac)); 781 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac)); 782 } 783 if (max(diagD) > 1e14 * min(diagD)) { 784 final double tfac = max(diagD) / 1e14 - min(diagD); 785 C = C.add(eye(dimension, dimension).scalarMultiply(tfac)); 786 diagD = diagD.add(ones(dimension, 1).scalarMultiply(tfac)); 787 } 788 diagC = diag(C); 789 diagD = sqrt(diagD); // D contains standard deviations now 790 BD = times(B, repmat(diagD.transpose(), dimension, 1)); // O(n^2) 791 } 792 } 793 794 /** 795 * Pushes the current best fitness value in a history queue. 796 * 797 * @param vals History queue. 798 * @param val Current best fitness value. 799 */ 800 private static void push(double[] vals, double val) { 801 for (int i = vals.length-1; i > 0; i--) { 802 vals[i] = vals[i-1]; 803 } 804 vals[0] = val; 805 } 806 807 /** 808 * Sorts fitness values. 809 * 810 * @param doubles Array of values to be sorted. 811 * @return a sorted array of indices pointing into doubles. 812 */ 813 private int[] sortedIndices(final double[] doubles) { 814 final DoubleIndex[] dis = new DoubleIndex[doubles.length]; 815 for (int i = 0; i < doubles.length; i++) { 816 dis[i] = new DoubleIndex(doubles[i], i); 817 } 818 Arrays.sort(dis); 819 final int[] indices = new int[doubles.length]; 820 for (int i = 0; i < doubles.length; i++) { 821 indices[i] = dis[i].index; 822 } 823 return indices; 824 } 825 826 /** 827 * Used to sort fitness values. Sorting is always in lower value first 828 * order. 829 */ 830 private static class DoubleIndex implements Comparable<DoubleIndex> { 831 /** Value to compare. */ 832 private final double value; 833 /** Index into sorted array. */ 834 private final int index; 835 836 /** 837 * @param value Value to compare. 838 * @param index Index into sorted array. 839 */ 840 DoubleIndex(double value, int index) { 841 this.value = value; 842 this.index = index; 843 } 844 845 /** {@inheritDoc} */ 846 public int compareTo(DoubleIndex o) { 847 return Double.compare(value, o.value); 848 } 849 850 /** {@inheritDoc} */ 851 @Override 852 public boolean equals(Object other) { 853 854 if (this == other) { 855 return true; 856 } 857 858 if (other instanceof DoubleIndex) { 859 return Double.compare(value, ((DoubleIndex) other).value) == 0; 860 } 861 862 return false; 863 } 864 865 /** {@inheritDoc} */ 866 @Override 867 public int hashCode() { 868 long bits = Double.doubleToLongBits(value); 869 return (int) ((1438542 ^ (bits >>> 32) ^ bits) & 0xffffffff); 870 } 871 } 872 873 /** 874 * Normalizes fitness values to the range [0,1]. Adds a penalty to the 875 * fitness value if out of range. The penalty is adjusted by calling 876 * setValueRange(). 877 */ 878 private class FitnessFunction { 879 /** Determines the penalty for boundary violations */ 880 private double valueRange; 881 /** 882 * Flag indicating whether the objective variables are forced into their 883 * bounds if defined 884 */ 885 private final boolean isRepairMode; 886 887 /** Simple constructor. 888 */ 889 public FitnessFunction() { 890 valueRange = 1; 891 isRepairMode = true; 892 } 893 894 /** 895 * @param point Normalized objective variables. 896 * @return the objective value + penalty for violated bounds. 897 */ 898 public double value(final double[] point) { 899 double value; 900 if (isRepairMode) { 901 double[] repaired = repair(point); 902 value = CMAESOptimizer.this.computeObjectiveValue(repaired) + 903 penalty(point, repaired); 904 } else { 905 value = CMAESOptimizer.this.computeObjectiveValue(point); 906 } 907 return isMinimize ? value : -value; 908 } 909 910 /** 911 * @param x Normalized objective variables. 912 * @return {@code true} if in bounds. 913 */ 914 public boolean isFeasible(final double[] x) { 915 final double[] lB = CMAESOptimizer.this.getLowerBound(); 916 final double[] uB = CMAESOptimizer.this.getUpperBound(); 917 918 for (int i = 0; i < x.length; i++) { 919 if (x[i] < lB[i]) { 920 return false; 921 } 922 if (x[i] > uB[i]) { 923 return false; 924 } 925 } 926 return true; 927 } 928 929 /** 930 * @param valueRange Adjusts the penalty computation. 931 */ 932 public void setValueRange(double valueRange) { 933 this.valueRange = valueRange; 934 } 935 936 /** 937 * @param x Normalized objective variables. 938 * @return the repaired (i.e. all in bounds) objective variables. 939 */ 940 private double[] repair(final double[] x) { 941 final double[] lB = CMAESOptimizer.this.getLowerBound(); 942 final double[] uB = CMAESOptimizer.this.getUpperBound(); 943 944 final double[] repaired = new double[x.length]; 945 for (int i = 0; i < x.length; i++) { 946 if (x[i] < lB[i]) { 947 repaired[i] = lB[i]; 948 } else if (x[i] > uB[i]) { 949 repaired[i] = uB[i]; 950 } else { 951 repaired[i] = x[i]; 952 } 953 } 954 return repaired; 955 } 956 957 /** 958 * @param x Normalized objective variables. 959 * @param repaired Repaired objective variables. 960 * @return Penalty value according to the violation of the bounds. 961 */ 962 private double penalty(final double[] x, final double[] repaired) { 963 double penalty = 0; 964 for (int i = 0; i < x.length; i++) { 965 double diff = Math.abs(x[i] - repaired[i]); 966 penalty += diff * valueRange; 967 } 968 return isMinimize ? penalty : -penalty; 969 } 970 } 971 972 // -----Matrix utility functions similar to the Matlab build in functions------ 973 974 /** 975 * @param m Input matrix 976 * @return Matrix representing the element-wise logarithm of m. 977 */ 978 private static RealMatrix log(final RealMatrix m) { 979 final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()]; 980 for (int r = 0; r < m.getRowDimension(); r++) { 981 for (int c = 0; c < m.getColumnDimension(); c++) { 982 d[r][c] = Math.log(m.getEntry(r, c)); 983 } 984 } 985 return new Array2DRowRealMatrix(d, false); 986 } 987 988 /** 989 * @param m Input matrix. 990 * @return Matrix representing the element-wise square root of m. 991 */ 992 private static RealMatrix sqrt(final RealMatrix m) { 993 final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()]; 994 for (int r = 0; r < m.getRowDimension(); r++) { 995 for (int c = 0; c < m.getColumnDimension(); c++) { 996 d[r][c] = Math.sqrt(m.getEntry(r, c)); 997 } 998 } 999 return new Array2DRowRealMatrix(d, false); 1000 } 1001 1002 /** 1003 * @param m Input matrix. 1004 * @return Matrix representing the element-wise square of m. 1005 */ 1006 private static RealMatrix square(final RealMatrix m) { 1007 final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()]; 1008 for (int r = 0; r < m.getRowDimension(); r++) { 1009 for (int c = 0; c < m.getColumnDimension(); c++) { 1010 double e = m.getEntry(r, c); 1011 d[r][c] = e * e; 1012 } 1013 } 1014 return new Array2DRowRealMatrix(d, false); 1015 } 1016 1017 /** 1018 * @param m Input matrix 1. 1019 * @param n Input matrix 2. 1020 * @return the matrix where the elements of m and n are element-wise multiplied. 1021 */ 1022 private static RealMatrix times(final RealMatrix m, final RealMatrix n) { 1023 final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()]; 1024 for (int r = 0; r < m.getRowDimension(); r++) { 1025 for (int c = 0; c < m.getColumnDimension(); c++) { 1026 d[r][c] = m.getEntry(r, c) * n.getEntry(r, c); 1027 } 1028 } 1029 return new Array2DRowRealMatrix(d, false); 1030 } 1031 1032 /** 1033 * @param m Input matrix 1. 1034 * @param n Input matrix 2. 1035 * @return Matrix where the elements of m and n are element-wise divided. 1036 */ 1037 private static RealMatrix divide(final RealMatrix m, final RealMatrix n) { 1038 final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()]; 1039 for (int r = 0; r < m.getRowDimension(); r++) { 1040 for (int c = 0; c < m.getColumnDimension(); c++) { 1041 d[r][c] = m.getEntry(r, c) / n.getEntry(r, c); 1042 } 1043 } 1044 return new Array2DRowRealMatrix(d, false); 1045 } 1046 1047 /** 1048 * @param m Input matrix. 1049 * @param cols Columns to select. 1050 * @return Matrix representing the selected columns. 1051 */ 1052 private static RealMatrix selectColumns(final RealMatrix m, final int[] cols) { 1053 final double[][] d = new double[m.getRowDimension()][cols.length]; 1054 for (int r = 0; r < m.getRowDimension(); r++) { 1055 for (int c = 0; c < cols.length; c++) { 1056 d[r][c] = m.getEntry(r, cols[c]); 1057 } 1058 } 1059 return new Array2DRowRealMatrix(d, false); 1060 } 1061 1062 /** 1063 * @param m Input matrix. 1064 * @param k Diagonal position. 1065 * @return Upper triangular part of matrix. 1066 */ 1067 private static RealMatrix triu(final RealMatrix m, int k) { 1068 final double[][] d = new double[m.getRowDimension()][m.getColumnDimension()]; 1069 for (int r = 0; r < m.getRowDimension(); r++) { 1070 for (int c = 0; c < m.getColumnDimension(); c++) { 1071 d[r][c] = r <= c - k ? m.getEntry(r, c) : 0; 1072 } 1073 } 1074 return new Array2DRowRealMatrix(d, false); 1075 } 1076 1077 /** 1078 * @param m Input matrix. 1079 * @return Row matrix representing the sums of the rows. 1080 */ 1081 private static RealMatrix sumRows(final RealMatrix m) { 1082 final double[][] d = new double[1][m.getColumnDimension()]; 1083 for (int c = 0; c < m.getColumnDimension(); c++) { 1084 double sum = 0; 1085 for (int r = 0; r < m.getRowDimension(); r++) { 1086 sum += m.getEntry(r, c); 1087 } 1088 d[0][c] = sum; 1089 } 1090 return new Array2DRowRealMatrix(d, false); 1091 } 1092 1093 /** 1094 * @param m Input matrix. 1095 * @return the diagonal n-by-n matrix if m is a column matrix or the column 1096 * matrix representing the diagonal if m is a n-by-n matrix. 1097 */ 1098 private static RealMatrix diag(final RealMatrix m) { 1099 if (m.getColumnDimension() == 1) { 1100 final double[][] d = new double[m.getRowDimension()][m.getRowDimension()]; 1101 for (int i = 0; i < m.getRowDimension(); i++) { 1102 d[i][i] = m.getEntry(i, 0); 1103 } 1104 return new Array2DRowRealMatrix(d, false); 1105 } else { 1106 final double[][] d = new double[m.getRowDimension()][1]; 1107 for (int i = 0; i < m.getColumnDimension(); i++) { 1108 d[i][0] = m.getEntry(i, i); 1109 } 1110 return new Array2DRowRealMatrix(d, false); 1111 } 1112 } 1113 1114 /** 1115 * Copies a column from m1 to m2. 1116 * 1117 * @param m1 Source matrix. 1118 * @param col1 Source column. 1119 * @param m2 Target matrix. 1120 * @param col2 Target column. 1121 */ 1122 private static void copyColumn(final RealMatrix m1, int col1, 1123 RealMatrix m2, int col2) { 1124 for (int i = 0; i < m1.getRowDimension(); i++) { 1125 m2.setEntry(i, col2, m1.getEntry(i, col1)); 1126 } 1127 } 1128 1129 /** 1130 * @param n Number of rows. 1131 * @param m Number of columns. 1132 * @return n-by-m matrix filled with 1. 1133 */ 1134 private static RealMatrix ones(int n, int m) { 1135 final double[][] d = new double[n][m]; 1136 for (int r = 0; r < n; r++) { 1137 Arrays.fill(d[r], 1); 1138 } 1139 return new Array2DRowRealMatrix(d, false); 1140 } 1141 1142 /** 1143 * @param n Number of rows. 1144 * @param m Number of columns. 1145 * @return n-by-m matrix of 0 values out of diagonal, and 1 values on 1146 * the diagonal. 1147 */ 1148 private static RealMatrix eye(int n, int m) { 1149 final double[][] d = new double[n][m]; 1150 for (int r = 0; r < n; r++) { 1151 if (r < m) { 1152 d[r][r] = 1; 1153 } 1154 } 1155 return new Array2DRowRealMatrix(d, false); 1156 } 1157 1158 /** 1159 * @param n Number of rows. 1160 * @param m Number of columns. 1161 * @return n-by-m matrix of zero values. 1162 */ 1163 private static RealMatrix zeros(int n, int m) { 1164 return new Array2DRowRealMatrix(n, m); 1165 } 1166 1167 /** 1168 * @param mat Input matrix. 1169 * @param n Number of row replicates. 1170 * @param m Number of column replicates. 1171 * @return a matrix which replicates the input matrix in both directions. 1172 */ 1173 private static RealMatrix repmat(final RealMatrix mat, int n, int m) { 1174 final int rd = mat.getRowDimension(); 1175 final int cd = mat.getColumnDimension(); 1176 final double[][] d = new double[n * rd][m * cd]; 1177 for (int r = 0; r < n * rd; r++) { 1178 for (int c = 0; c < m * cd; c++) { 1179 d[r][c] = mat.getEntry(r % rd, c % cd); 1180 } 1181 } 1182 return new Array2DRowRealMatrix(d, false); 1183 } 1184 1185 /** 1186 * @param start Start value. 1187 * @param end End value. 1188 * @param step Step size. 1189 * @return a sequence as column matrix. 1190 */ 1191 private static RealMatrix sequence(double start, double end, double step) { 1192 final int size = (int) ((end - start) / step + 1); 1193 final double[][] d = new double[size][1]; 1194 double value = start; 1195 for (int r = 0; r < size; r++) { 1196 d[r][0] = value; 1197 value += step; 1198 } 1199 return new Array2DRowRealMatrix(d, false); 1200 } 1201 1202 /** 1203 * @param m Input matrix. 1204 * @return the maximum of the matrix element values. 1205 */ 1206 private static double max(final RealMatrix m) { 1207 double max = -Double.MAX_VALUE; 1208 for (int r = 0; r < m.getRowDimension(); r++) { 1209 for (int c = 0; c < m.getColumnDimension(); c++) { 1210 double e = m.getEntry(r, c); 1211 if (max < e) { 1212 max = e; 1213 } 1214 } 1215 } 1216 return max; 1217 } 1218 1219 /** 1220 * @param m Input matrix. 1221 * @return the minimum of the matrix element values. 1222 */ 1223 private static double min(final RealMatrix m) { 1224 double min = Double.MAX_VALUE; 1225 for (int r = 0; r < m.getRowDimension(); r++) { 1226 for (int c = 0; c < m.getColumnDimension(); c++) { 1227 double e = m.getEntry(r, c); 1228 if (min > e) { 1229 min = e; 1230 } 1231 } 1232 } 1233 return min; 1234 } 1235 1236 /** 1237 * @param m Input array. 1238 * @return the maximum of the array values. 1239 */ 1240 private static double max(final double[] m) { 1241 double max = -Double.MAX_VALUE; 1242 for (int r = 0; r < m.length; r++) { 1243 if (max < m[r]) { 1244 max = m[r]; 1245 } 1246 } 1247 return max; 1248 } 1249 1250 /** 1251 * @param m Input array. 1252 * @return the minimum of the array values. 1253 */ 1254 private static double min(final double[] m) { 1255 double min = Double.MAX_VALUE; 1256 for (int r = 0; r < m.length; r++) { 1257 if (min > m[r]) { 1258 min = m[r]; 1259 } 1260 } 1261 return min; 1262 } 1263 1264 /** 1265 * @param indices Input index array. 1266 * @return the inverse of the mapping defined by indices. 1267 */ 1268 private static int[] inverse(final int[] indices) { 1269 final int[] inverse = new int[indices.length]; 1270 for (int i = 0; i < indices.length; i++) { 1271 inverse[indices[i]] = i; 1272 } 1273 return inverse; 1274 } 1275 1276 /** 1277 * @param indices Input index array. 1278 * @return the indices in inverse order (last is first). 1279 */ 1280 private static int[] reverse(final int[] indices) { 1281 final int[] reverse = new int[indices.length]; 1282 for (int i = 0; i < indices.length; i++) { 1283 reverse[i] = indices[indices.length - i - 1]; 1284 } 1285 return reverse; 1286 } 1287 1288 /** 1289 * @param size Length of random array. 1290 * @return an array of Gaussian random numbers. 1291 */ 1292 private double[] randn(int size) { 1293 final double[] randn = new double[size]; 1294 for (int i = 0; i < size; i++) { 1295 randn[i] = random.nextGaussian(); 1296 } 1297 return randn; 1298 } 1299 1300 /** 1301 * @param size Number of rows. 1302 * @param popSize Population size. 1303 * @return a 2-dimensional matrix of Gaussian random numbers. 1304 */ 1305 private RealMatrix randn1(int size, int popSize) { 1306 final double[][] d = new double[size][popSize]; 1307 for (int r = 0; r < size; r++) { 1308 for (int c = 0; c < popSize; c++) { 1309 d[r][c] = random.nextGaussian(); 1310 } 1311 } 1312 return new Array2DRowRealMatrix(d, false); 1313 } 1314 }