LevenbergMarquardtOptimizer.java

  1. /*
  2.  * Licensed to the Apache Software Foundation (ASF) under one or more
  3.  * contributor license agreements.  See the NOTICE file distributed with
  4.  * this work for additional information regarding copyright ownership.
  5.  * The ASF licenses this file to You under the Apache License, Version 2.0
  6.  * (the "License"); you may not use this file except in compliance with
  7.  * the License.  You may obtain a copy of the License at
  8.  *
  9.  *      http://www.apache.org/licenses/LICENSE-2.0
  10.  *
  11.  * Unless required by applicable law or agreed to in writing, software
  12.  * distributed under the License is distributed on an "AS IS" BASIS,
  13.  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14.  * See the License for the specific language governing permissions and
  15.  * limitations under the License.
  16.  */
  17. package org.apache.commons.math4.legacy.fitting.leastsquares;

  18. import java.util.Arrays;

  19. import org.apache.commons.math4.legacy.exception.ConvergenceException;
  20. import org.apache.commons.math4.legacy.exception.util.LocalizedFormats;
  21. import org.apache.commons.math4.legacy.fitting.leastsquares.LeastSquaresProblem.Evaluation;
  22. import org.apache.commons.math4.legacy.linear.ArrayRealVector;
  23. import org.apache.commons.math4.legacy.linear.RealMatrix;
  24. import org.apache.commons.math4.legacy.optim.ConvergenceChecker;
  25. import org.apache.commons.math4.core.jdkmath.JdkMath;
  26. import org.apache.commons.math4.legacy.core.IntegerSequence;
  27. import org.apache.commons.numbers.core.Precision;


  28. /**
  29.  * This class solves a least-squares problem using the Levenberg-Marquardt
  30.  * algorithm.
  31.  *
  32.  * <p>This implementation <em>should</em> work even for over-determined systems
  33.  * (i.e. systems having more point than equations). Over-determined systems
  34.  * are solved by ignoring the point which have the smallest impact according
  35.  * to their jacobian column norm. Only the rank of the matrix and some loop bounds
  36.  * are changed to implement this.</p>
  37.  *
  38.  * <p>The resolution engine is a simple translation of the MINPACK <a
  39.  * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
  40.  * changes. The changes include the over-determined resolution, the use of
  41.  * inherited convergence checker and the Q.R. decomposition which has been
  42.  * rewritten following the algorithm described in the
  43.  * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
  44.  * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
  45.  * <p>The authors of the original fortran version are:
  46.  * <ul>
  47.  * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  48.  * <li>Burton S. Garbow</li>
  49.  * <li>Kenneth E. Hillstrom</li>
  50.  * <li>Jorge J. More</li>
  51.  * </ul>
  52.  * The redistribution policy for MINPACK is available <a
  53.  * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
  54.  * is reproduced below.
  55.  *
  56.  * <table style="text-align: center; background-color: #E0E0E0" border="">
  57.  * <caption>MINPACK redistribution policy</caption>
  58.  * <tr><td>
  59.  *    Minpack Copyright Notice (1999) University of Chicago.
  60.  *    All rights reserved
  61.  * </td></tr>
  62.  * <tr><td>
  63.  * Redistribution and use in source and binary forms, with or without
  64.  * modification, are permitted provided that the following conditions
  65.  * are met:
  66.  * <ol>
  67.  *  <li>Redistributions of source code must retain the above copyright
  68.  *      notice, this list of conditions and the following disclaimer.</li>
  69.  * <li>Redistributions in binary form must reproduce the above
  70.  *     copyright notice, this list of conditions and the following
  71.  *     disclaimer in the documentation and/or other materials provided
  72.  *     with the distribution.</li>
  73.  * <li>The end-user documentation included with the redistribution, if any,
  74.  *     must include the following acknowledgment:
  75.  *     <code>This product includes software developed by the University of
  76.  *           Chicago, as Operator of Argonne National Laboratory.</code>
  77.  *     Alternately, this acknowledgment may appear in the software itself,
  78.  *     if and wherever such third-party acknowledgments normally appear.</li>
  79.  * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
  80.  *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
  81.  *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
  82.  *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
  83.  *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
  84.  *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
  85.  *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
  86.  *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
  87.  *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
  88.  *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
  89.  *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
  90.  *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
  91.  *     BE CORRECTED.</strong></li>
  92.  * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
  93.  *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
  94.  *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
  95.  *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
  96.  *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
  97.  *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
  98.  *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
  99.  *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
  100.  *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
  101.  *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
  102.  * </ol></td></tr>
  103.  * </table>
  104.  *
  105.  * @since 3.3
  106.  */
  107. public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer {

  108.     /** Twice the "epsilon machine". */
  109.     private static final double TWO_EPS = 2 * Precision.EPSILON;

  110.     /* configuration parameters */
  111.     /** Positive input variable used in determining the initial step bound. */
  112.     private final double initialStepBoundFactor;
  113.     /** Desired relative error in the sum of squares. */
  114.     private final double costRelativeTolerance;
  115.     /**  Desired relative error in the approximate solution parameters. */
  116.     private final double parRelativeTolerance;
  117.     /** Desired max cosine on the orthogonality between the function vector
  118.      * and the columns of the jacobian. */
  119.     private final double orthoTolerance;
  120.     /** Threshold for QR ranking. */
  121.     private final double qrRankingThreshold;

  122.     /** Default constructor.
  123.      * <p>
  124.      * The default values for the algorithm settings are:
  125.      * <ul>
  126.      *  <li>Initial step bound factor: 100</li>
  127.      *  <li>Cost relative tolerance: 1e-10</li>
  128.      *  <li>Parameters relative tolerance: 1e-10</li>
  129.      *  <li>Orthogonality tolerance: 1e-10</li>
  130.      *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
  131.      * </ul>
  132.      **/
  133.     public LevenbergMarquardtOptimizer() {
  134.         this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
  135.     }

  136.     /**
  137.      * Construct an instance with all parameters specified.
  138.      *
  139.      * @param initialStepBoundFactor initial step bound factor
  140.      * @param costRelativeTolerance  cost relative tolerance
  141.      * @param parRelativeTolerance   parameters relative tolerance
  142.      * @param orthoTolerance         orthogonality tolerance
  143.      * @param qrRankingThreshold     threshold in the QR decomposition. Columns with a 2
  144.      *                               norm less than this threshold are considered to be
  145.      *                               all 0s.
  146.      */
  147.     public LevenbergMarquardtOptimizer(
  148.             final double initialStepBoundFactor,
  149.             final double costRelativeTolerance,
  150.             final double parRelativeTolerance,
  151.             final double orthoTolerance,
  152.             final double qrRankingThreshold) {
  153.         this.initialStepBoundFactor = initialStepBoundFactor;
  154.         this.costRelativeTolerance = costRelativeTolerance;
  155.         this.parRelativeTolerance = parRelativeTolerance;
  156.         this.orthoTolerance = orthoTolerance;
  157.         this.qrRankingThreshold = qrRankingThreshold;
  158.     }

  159.     /**
  160.      * @param newInitialStepBoundFactor Positive input variable used in
  161.      * determining the initial step bound. This bound is set to the
  162.      * product of initialStepBoundFactor and the euclidean norm of
  163.      * {@code diag * x} if non-zero, or else to {@code newInitialStepBoundFactor}
  164.      * itself. In most cases factor should lie in the interval
  165.      * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
  166.      * of the matrix is reduced.
  167.      * @return a new instance.
  168.      */
  169.     public LevenbergMarquardtOptimizer withInitialStepBoundFactor(double newInitialStepBoundFactor) {
  170.         return new LevenbergMarquardtOptimizer(
  171.                 newInitialStepBoundFactor,
  172.                 costRelativeTolerance,
  173.                 parRelativeTolerance,
  174.                 orthoTolerance,
  175.                 qrRankingThreshold);
  176.     }

  177.     /**
  178.      * @param newCostRelativeTolerance Desired relative error in the sum of squares.
  179.      * @return a new instance.
  180.      */
  181.     public LevenbergMarquardtOptimizer withCostRelativeTolerance(double newCostRelativeTolerance) {
  182.         return new LevenbergMarquardtOptimizer(
  183.                 initialStepBoundFactor,
  184.                 newCostRelativeTolerance,
  185.                 parRelativeTolerance,
  186.                 orthoTolerance,
  187.                 qrRankingThreshold);
  188.     }

  189.     /**
  190.      * @param newParRelativeTolerance Desired relative error in the approximate solution
  191.      * parameters.
  192.      * @return a new instance.
  193.      */
  194.     public LevenbergMarquardtOptimizer withParameterRelativeTolerance(double newParRelativeTolerance) {
  195.         return new LevenbergMarquardtOptimizer(
  196.                 initialStepBoundFactor,
  197.                 costRelativeTolerance,
  198.                 newParRelativeTolerance,
  199.                 orthoTolerance,
  200.                 qrRankingThreshold);
  201.     }

  202.     /**
  203.      * Modifies the given parameter.
  204.      *
  205.      * @param newOrthoTolerance Desired max cosine on the orthogonality between
  206.      * the function vector and the columns of the Jacobian.
  207.      * @return a new instance.
  208.      */
  209.     public LevenbergMarquardtOptimizer withOrthoTolerance(double newOrthoTolerance) {
  210.         return new LevenbergMarquardtOptimizer(
  211.                 initialStepBoundFactor,
  212.                 costRelativeTolerance,
  213.                 parRelativeTolerance,
  214.                 newOrthoTolerance,
  215.                 qrRankingThreshold);
  216.     }

  217.     /**
  218.      * @param newQRRankingThreshold Desired threshold for QR ranking.
  219.      * If the squared norm of a column vector is smaller or equal to this
  220.      * threshold during QR decomposition, it is considered to be a zero vector
  221.      * and hence the rank of the matrix is reduced.
  222.      * @return a new instance.
  223.      */
  224.     public LevenbergMarquardtOptimizer withRankingThreshold(double newQRRankingThreshold) {
  225.         return new LevenbergMarquardtOptimizer(
  226.                 initialStepBoundFactor,
  227.                 costRelativeTolerance,
  228.                 parRelativeTolerance,
  229.                 orthoTolerance,
  230.                 newQRRankingThreshold);
  231.     }

  232.     /**
  233.      * Gets the value of a tuning parameter.
  234.      * @see #withInitialStepBoundFactor(double)
  235.      *
  236.      * @return the parameter's value.
  237.      */
  238.     public double getInitialStepBoundFactor() {
  239.         return initialStepBoundFactor;
  240.     }

  241.     /**
  242.      * Gets the value of a tuning parameter.
  243.      * @see #withCostRelativeTolerance(double)
  244.      *
  245.      * @return the parameter's value.
  246.      */
  247.     public double getCostRelativeTolerance() {
  248.         return costRelativeTolerance;
  249.     }

  250.     /**
  251.      * Gets the value of a tuning parameter.
  252.      * @see #withParameterRelativeTolerance(double)
  253.      *
  254.      * @return the parameter's value.
  255.      */
  256.     public double getParameterRelativeTolerance() {
  257.         return parRelativeTolerance;
  258.     }

  259.     /**
  260.      * Gets the value of a tuning parameter.
  261.      * @see #withOrthoTolerance(double)
  262.      *
  263.      * @return the parameter's value.
  264.      */
  265.     public double getOrthoTolerance() {
  266.         return orthoTolerance;
  267.     }

  268.     /**
  269.      * Gets the value of a tuning parameter.
  270.      * @see #withRankingThreshold(double)
  271.      *
  272.      * @return the parameter's value.
  273.      */
  274.     public double getRankingThreshold() {
  275.         return qrRankingThreshold;
  276.     }

  277.     /** {@inheritDoc} */
  278.     @Override
  279.     public Optimum optimize(final LeastSquaresProblem problem) {
  280.         // Pull in relevant data from the problem as locals.
  281.         final int nR = problem.getObservationSize(); // Number of observed data.
  282.         final int nC = problem.getParameterSize(); // Number of parameters.
  283.         // Counters.
  284.         final IntegerSequence.Incrementor iterationCounter = problem.getIterationCounter();
  285.         final IntegerSequence.Incrementor evaluationCounter = problem.getEvaluationCounter();
  286.         // Convergence criterion.
  287.         final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();

  288.         // arrays shared with the other private methods
  289.         final int solvedCols  = JdkMath.min(nR, nC);
  290.         /* Parameters evolution direction associated with lmPar. */
  291.         double[] lmDir = new double[nC];
  292.         /* Levenberg-Marquardt parameter. */
  293.         double lmPar = 0;

  294.         // local point
  295.         double   delta   = 0;
  296.         double   xNorm   = 0;
  297.         double[] diag    = new double[nC];
  298.         double[] oldX    = new double[nC];
  299.         double[] oldRes  = new double[nR];
  300.         double[] qtf     = new double[nR];
  301.         double[] work1   = new double[nC];
  302.         double[] work2   = new double[nC];
  303.         double[] work3   = new double[nC];


  304.         // Evaluate the function at the starting point and calculate its norm.
  305.         evaluationCounter.increment();
  306.         //value will be reassigned in the loop
  307.         Evaluation current = problem.evaluate(problem.getStart());
  308.         double[] currentResiduals = current.getResiduals().toArray();
  309.         double currentCost = current.getCost();
  310.         double[] currentPoint = current.getPoint().toArray();

  311.         // Outer loop.
  312.         boolean firstIteration = true;
  313.         while (true) {
  314.             iterationCounter.increment();

  315.             final Evaluation previous = current;

  316.             // QR decomposition of the jacobian matrix
  317.             final InternalData internalData
  318.                     = qrDecomposition(current.getJacobian(), solvedCols);
  319.             final double[][] weightedJacobian = internalData.weightedJacobian;
  320.             final int[] permutation = internalData.permutation;
  321.             final double[] diagR = internalData.diagR;
  322.             final double[] jacNorm = internalData.jacNorm;

  323.             //residuals already have weights applied
  324.             double[] weightedResidual = currentResiduals;
  325.             System.arraycopy(weightedResidual, 0, qtf, 0, nR);

  326.             // compute Qt.res
  327.             qTy(qtf, internalData);

  328.             // now we don't need Q anymore,
  329.             // so let jacobian contain the R matrix with its diagonal elements
  330.             for (int k = 0; k < solvedCols; ++k) {
  331.                 int pk = permutation[k];
  332.                 weightedJacobian[k][pk] = diagR[pk];
  333.             }

  334.             if (firstIteration) {
  335.                 // scale the point according to the norms of the columns
  336.                 // of the initial jacobian
  337.                 xNorm = 0;
  338.                 for (int k = 0; k < nC; ++k) {
  339.                     double dk = jacNorm[k];
  340.                     if (dk == 0) {
  341.                         dk = 1.0;
  342.                     }
  343.                     double xk = dk * currentPoint[k];
  344.                     xNorm  += xk * xk;
  345.                     diag[k] = dk;
  346.                 }
  347.                 xNorm = JdkMath.sqrt(xNorm);

  348.                 // initialize the step bound delta
  349.                 delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
  350.             }

  351.             // check orthogonality between function vector and jacobian columns
  352.             double maxCosine = 0;
  353.             if (currentCost != 0) {
  354.                 for (int j = 0; j < solvedCols; ++j) {
  355.                     int    pj = permutation[j];
  356.                     double s  = jacNorm[pj];
  357.                     if (s != 0) {
  358.                         double sum = 0;
  359.                         for (int i = 0; i <= j; ++i) {
  360.                             sum += weightedJacobian[i][pj] * qtf[i];
  361.                         }
  362.                         maxCosine = JdkMath.max(maxCosine, JdkMath.abs(sum) / (s * currentCost));
  363.                     }
  364.                 }
  365.             }
  366.             if (maxCosine <= orthoTolerance) {
  367.                 // Convergence has been reached.
  368.                 return new OptimumImpl(
  369.                         current,
  370.                         evaluationCounter.getCount(),
  371.                         iterationCounter.getCount());
  372.             }

  373.             // rescale if necessary
  374.             for (int j = 0; j < nC; ++j) {
  375.                 diag[j] = JdkMath.max(diag[j], jacNorm[j]);
  376.             }

  377.             // Inner loop.
  378.             for (double ratio = 0; ratio < 1.0e-4;) {

  379.                 // save the state
  380.                 for (int j = 0; j < solvedCols; ++j) {
  381.                     int pj = permutation[j];
  382.                     oldX[pj] = currentPoint[pj];
  383.                 }
  384.                 final double previousCost = currentCost;
  385.                 double[] tmpVec = weightedResidual;
  386.                 weightedResidual = oldRes;
  387.                 oldRes    = tmpVec;

  388.                 // determine the Levenberg-Marquardt parameter
  389.                 lmPar = determineLMParameter(qtf, delta, diag,
  390.                                      internalData, solvedCols,
  391.                                      work1, work2, work3, lmDir, lmPar);

  392.                 // compute the new point and the norm of the evolution direction
  393.                 double lmNorm = 0;
  394.                 for (int j = 0; j < solvedCols; ++j) {
  395.                     int pj = permutation[j];
  396.                     lmDir[pj] = -lmDir[pj];
  397.                     currentPoint[pj] = oldX[pj] + lmDir[pj];
  398.                     double s = diag[pj] * lmDir[pj];
  399.                     lmNorm  += s * s;
  400.                 }
  401.                 lmNorm = JdkMath.sqrt(lmNorm);
  402.                 // on the first iteration, adjust the initial step bound.
  403.                 if (firstIteration) {
  404.                     delta = JdkMath.min(delta, lmNorm);
  405.                 }

  406.                 // Evaluate the function at x + p and calculate its norm.
  407.                 evaluationCounter.increment();
  408.                 current = problem.evaluate(new ArrayRealVector(currentPoint));
  409.                 currentResiduals = current.getResiduals().toArray();
  410.                 currentCost = current.getCost();
  411.                 currentPoint = current.getPoint().toArray();

  412.                 // compute the scaled actual reduction
  413.                 double actRed = -1.0;
  414.                 if (0.1 * currentCost < previousCost) {
  415.                     double r = currentCost / previousCost;
  416.                     actRed = 1.0 - r * r;
  417.                 }

  418.                 // compute the scaled predicted reduction
  419.                 // and the scaled directional derivative
  420.                 for (int j = 0; j < solvedCols; ++j) {
  421.                     int pj = permutation[j];
  422.                     double dirJ = lmDir[pj];
  423.                     work1[j] = 0;
  424.                     for (int i = 0; i <= j; ++i) {
  425.                         work1[i] += weightedJacobian[i][pj] * dirJ;
  426.                     }
  427.                 }
  428.                 double coeff1 = 0;
  429.                 for (int j = 0; j < solvedCols; ++j) {
  430.                     coeff1 += work1[j] * work1[j];
  431.                 }
  432.                 double pc2 = previousCost * previousCost;
  433.                 coeff1 /= pc2;
  434.                 double coeff2 = lmPar * lmNorm * lmNorm / pc2;
  435.                 double preRed = coeff1 + 2 * coeff2;
  436.                 double dirDer = -(coeff1 + coeff2);

  437.                 // ratio of the actual to the predicted reduction
  438.                 ratio = (preRed == 0) ? 0 : (actRed / preRed);

  439.                 // update the step bound
  440.                 if (ratio <= 0.25) {
  441.                     double tmp =
  442.                         (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
  443.                         if (0.1 * currentCost >= previousCost || tmp < 0.1) {
  444.                             tmp = 0.1;
  445.                         }
  446.                         delta = tmp * JdkMath.min(delta, 10.0 * lmNorm);
  447.                         lmPar /= tmp;
  448.                 } else if (lmPar == 0 || ratio >= 0.75) {
  449.                     delta = 2 * lmNorm;
  450.                     lmPar *= 0.5;
  451.                 }

  452.                 // test for successful iteration.
  453.                 if (ratio >= 1.0e-4) {
  454.                     // successful iteration, update the norm
  455.                     firstIteration = false;
  456.                     xNorm = 0;
  457.                     for (int k = 0; k < nC; ++k) {
  458.                         double xK = diag[k] * currentPoint[k];
  459.                         xNorm += xK * xK;
  460.                     }
  461.                     xNorm = JdkMath.sqrt(xNorm);

  462.                     // tests for convergence.
  463.                     if (checker != null && checker.converged(iterationCounter.getCount(), previous, current)) {
  464.                         return new OptimumImpl(current, evaluationCounter.getCount(), iterationCounter.getCount());
  465.                     }
  466.                 } else {
  467.                     // failed iteration, reset the previous values
  468.                     currentCost = previousCost;
  469.                     for (int j = 0; j < solvedCols; ++j) {
  470.                         int pj = permutation[j];
  471.                         currentPoint[pj] = oldX[pj];
  472.                     }
  473.                     tmpVec    = weightedResidual;
  474.                     weightedResidual = oldRes;
  475.                     oldRes    = tmpVec;
  476.                     // Reset "current" to previous values.
  477.                     current = previous;
  478.                 }

  479.                 // Default convergence criteria.
  480.                 if ((JdkMath.abs(actRed) <= costRelativeTolerance &&
  481.                      preRed <= costRelativeTolerance &&
  482.                      ratio <= 2.0) ||
  483.                     delta <= parRelativeTolerance * xNorm) {
  484.                     return new OptimumImpl(current, evaluationCounter.getCount(), iterationCounter.getCount());
  485.                 }

  486.                 // tests for termination and stringent tolerances
  487.                 if (JdkMath.abs(actRed) <= TWO_EPS &&
  488.                     preRed <= TWO_EPS &&
  489.                     ratio <= 2.0) {
  490.                     throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
  491.                                                    costRelativeTolerance);
  492.                 } else if (delta <= TWO_EPS * xNorm) {
  493.                     throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
  494.                                                    parRelativeTolerance);
  495.                 } else if (maxCosine <= TWO_EPS) {
  496.                     throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
  497.                                                    orthoTolerance);
  498.                 }
  499.             }
  500.         }
  501.     }

  502.     /**
  503.      * Holds internal data.
  504.      * This structure was created so that all optimizer fields can be "final".
  505.      * Code should be further refactored in order to not pass around arguments
  506.      * that will modified in-place (cf. "work" arrays).
  507.      */
  508.     private static final class InternalData {
  509.         /** Weighted Jacobian. */
  510.         private final double[][] weightedJacobian;
  511.         /** Columns permutation array. */
  512.         private final int[] permutation;
  513.         /** Rank of the Jacobian matrix. */
  514.         private final int rank;
  515.         /** Diagonal elements of the R matrix in the QR decomposition. */
  516.         private final double[] diagR;
  517.         /** Norms of the columns of the jacobian matrix. */
  518.         private final double[] jacNorm;
  519.         /** Coefficients of the Householder transforms vectors. */
  520.         private final double[] beta;

  521.         /**
  522.          * @param weightedJacobian Weighted Jacobian.
  523.          * @param permutation Columns permutation array.
  524.          * @param rank Rank of the Jacobian matrix.
  525.          * @param diagR Diagonal elements of the R matrix in the QR decomposition.
  526.          * @param jacNorm Norms of the columns of the jacobian matrix.
  527.          * @param beta Coefficients of the Householder transforms vectors.
  528.          */
  529.         InternalData(double[][] weightedJacobian,
  530.                      int[] permutation,
  531.                      int rank,
  532.                      double[] diagR,
  533.                      double[] jacNorm,
  534.                      double[] beta) {
  535.             this.weightedJacobian = weightedJacobian;
  536.             this.permutation = permutation;
  537.             this.rank = rank;
  538.             this.diagR = diagR;
  539.             this.jacNorm = jacNorm;
  540.             this.beta = beta;
  541.         }
  542.     }

  543.     /**
  544.      * Determines the Levenberg-Marquardt parameter.
  545.      *
  546.      * <p>This implementation is a translation in Java of the MINPACK
  547.      * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
  548.      * routine.</p>
  549.      * <p>This method sets the lmPar and lmDir attributes.</p>
  550.      * <p>The authors of the original fortran function are:</p>
  551.      * <ul>
  552.      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  553.      *   <li>Burton  S. Garbow</li>
  554.      *   <li>Kenneth E. Hillstrom</li>
  555.      *   <li>Jorge   J. More</li>
  556.      * </ul>
  557.      * <p>Luc Maisonobe did the Java translation.</p>
  558.      *
  559.      * @param qy Array containing qTy.
  560.      * @param delta Upper bound on the euclidean norm of diagR * lmDir.
  561.      * @param diag Diagonal matrix.
  562.      * @param internalData Data (modified in-place in this method).
  563.      * @param solvedCols Number of solved point.
  564.      * @param work1 work array
  565.      * @param work2 work array
  566.      * @param work3 work array
  567.      * @param lmDir the "returned" LM direction will be stored in this array.
  568.      * @param lmPar the value of the LM parameter from the previous iteration.
  569.      * @return the new LM parameter
  570.      */
  571.     private double determineLMParameter(double[] qy, double delta, double[] diag,
  572.                                       InternalData internalData, int solvedCols,
  573.                                       double[] work1, double[] work2, double[] work3,
  574.                                       double[] lmDir, double lmPar) {
  575.         final double[][] weightedJacobian = internalData.weightedJacobian;
  576.         final int[] permutation = internalData.permutation;
  577.         final int rank = internalData.rank;
  578.         final double[] diagR = internalData.diagR;

  579.         final int nC = weightedJacobian[0].length;

  580.         // compute and store in x the gauss-newton direction, if the
  581.         // jacobian is rank-deficient, obtain a least squares solution
  582.         for (int j = 0; j < rank; ++j) {
  583.             lmDir[permutation[j]] = qy[j];
  584.         }
  585.         for (int j = rank; j < nC; ++j) {
  586.             lmDir[permutation[j]] = 0;
  587.         }
  588.         for (int k = rank - 1; k >= 0; --k) {
  589.             int pk = permutation[k];
  590.             double ypk = lmDir[pk] / diagR[pk];
  591.             for (int i = 0; i < k; ++i) {
  592.                 lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
  593.             }
  594.             lmDir[pk] = ypk;
  595.         }

  596.         // evaluate the function at the origin, and test
  597.         // for acceptance of the Gauss-Newton direction
  598.         double dxNorm = 0;
  599.         for (int j = 0; j < solvedCols; ++j) {
  600.             int pj = permutation[j];
  601.             double s = diag[pj] * lmDir[pj];
  602.             work1[pj] = s;
  603.             dxNorm += s * s;
  604.         }
  605.         dxNorm = JdkMath.sqrt(dxNorm);
  606.         double fp = dxNorm - delta;
  607.         if (fp <= 0.1 * delta) {
  608.             lmPar = 0;
  609.             return lmPar;
  610.         }

  611.         // if the jacobian is not rank deficient, the Newton step provides
  612.         // a lower bound, parl, for the zero of the function,
  613.         // otherwise set this bound to zero
  614.         double sum2;
  615.         double parl = 0;
  616.         if (rank == solvedCols) {
  617.             for (int j = 0; j < solvedCols; ++j) {
  618.                 int pj = permutation[j];
  619.                 work1[pj] *= diag[pj] / dxNorm;
  620.             }
  621.             sum2 = 0;
  622.             for (int j = 0; j < solvedCols; ++j) {
  623.                 int pj = permutation[j];
  624.                 double sum = 0;
  625.                 for (int i = 0; i < j; ++i) {
  626.                     sum += weightedJacobian[i][pj] * work1[permutation[i]];
  627.                 }
  628.                 double s = (work1[pj] - sum) / diagR[pj];
  629.                 work1[pj] = s;
  630.                 sum2 += s * s;
  631.             }
  632.             parl = fp / (delta * sum2);
  633.         }

  634.         // calculate an upper bound, paru, for the zero of the function
  635.         sum2 = 0;
  636.         for (int j = 0; j < solvedCols; ++j) {
  637.             int pj = permutation[j];
  638.             double sum = 0;
  639.             for (int i = 0; i <= j; ++i) {
  640.                 sum += weightedJacobian[i][pj] * qy[i];
  641.             }
  642.             sum /= diag[pj];
  643.             sum2 += sum * sum;
  644.         }
  645.         double gNorm = JdkMath.sqrt(sum2);
  646.         double paru = gNorm / delta;
  647.         if (paru == 0) {
  648.             paru = Precision.SAFE_MIN / JdkMath.min(delta, 0.1);
  649.         }

  650.         // if the input par lies outside of the interval (parl,paru),
  651.         // set par to the closer endpoint
  652.         lmPar = JdkMath.min(paru, JdkMath.max(lmPar, parl));
  653.         if (lmPar == 0) {
  654.             lmPar = gNorm / dxNorm;
  655.         }

  656.         for (int countdown = 10; countdown >= 0; --countdown) {

  657.             // evaluate the function at the current value of lmPar
  658.             if (lmPar == 0) {
  659.                 lmPar = JdkMath.max(Precision.SAFE_MIN, 0.001 * paru);
  660.             }
  661.             double sPar = JdkMath.sqrt(lmPar);
  662.             for (int j = 0; j < solvedCols; ++j) {
  663.                 int pj = permutation[j];
  664.                 work1[pj] = sPar * diag[pj];
  665.             }
  666.             determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);

  667.             dxNorm = 0;
  668.             for (int j = 0; j < solvedCols; ++j) {
  669.                 int pj = permutation[j];
  670.                 double s = diag[pj] * lmDir[pj];
  671.                 work3[pj] = s;
  672.                 dxNorm += s * s;
  673.             }
  674.             dxNorm = JdkMath.sqrt(dxNorm);
  675.             double previousFP = fp;
  676.             fp = dxNorm - delta;

  677.             // if the function is small enough, accept the current value
  678.             // of lmPar, also test for the exceptional cases where parl is zero
  679.             if (JdkMath.abs(fp) <= 0.1 * delta ||
  680.                 (parl == 0 &&
  681.                  fp <= previousFP &&
  682.                  previousFP < 0)) {
  683.                 return lmPar;
  684.             }

  685.             // compute the Newton correction
  686.             for (int j = 0; j < solvedCols; ++j) {
  687.                 int pj = permutation[j];
  688.                 work1[pj] = work3[pj] * diag[pj] / dxNorm;
  689.             }
  690.             for (int j = 0; j < solvedCols; ++j) {
  691.                 int pj = permutation[j];
  692.                 work1[pj] /= work2[j];
  693.                 double tmp = work1[pj];
  694.                 for (int i = j + 1; i < solvedCols; ++i) {
  695.                     work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
  696.                 }
  697.             }
  698.             sum2 = 0;
  699.             for (int j = 0; j < solvedCols; ++j) {
  700.                 double s = work1[permutation[j]];
  701.                 sum2 += s * s;
  702.             }
  703.             double correction = fp / (delta * sum2);

  704.             // depending on the sign of the function, update parl or paru.
  705.             if (fp > 0) {
  706.                 parl = JdkMath.max(parl, lmPar);
  707.             } else if (fp < 0) {
  708.                 paru = JdkMath.min(paru, lmPar);
  709.             }

  710.             // compute an improved estimate for lmPar
  711.             lmPar = JdkMath.max(parl, lmPar + correction);
  712.         }

  713.         return lmPar;
  714.     }

  715.     /**
  716.      * Solve a*x = b and d*x = 0 in the least squares sense.
  717.      * <p>This implementation is a translation in Java of the MINPACK
  718.      * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
  719.      * routine.</p>
  720.      * <p>This method sets the lmDir and lmDiag attributes.</p>
  721.      * <p>The authors of the original fortran function are:</p>
  722.      * <ul>
  723.      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  724.      *   <li>Burton  S. Garbow</li>
  725.      *   <li>Kenneth E. Hillstrom</li>
  726.      *   <li>Jorge   J. More</li>
  727.      * </ul>
  728.      * <p>Luc Maisonobe did the Java translation.</p>
  729.      *
  730.      * @param qy array containing qTy
  731.      * @param diag diagonal matrix
  732.      * @param lmDiag diagonal elements associated with lmDir
  733.      * @param internalData Data (modified in-place in this method).
  734.      * @param solvedCols Number of sloved point.
  735.      * @param work work array
  736.      * @param lmDir the "returned" LM direction is stored in this array
  737.      */
  738.     private void determineLMDirection(double[] qy, double[] diag,
  739.                                       double[] lmDiag,
  740.                                       InternalData internalData,
  741.                                       int solvedCols,
  742.                                       double[] work,
  743.                                       double[] lmDir) {
  744.         final int[] permutation = internalData.permutation;
  745.         final double[][] weightedJacobian = internalData.weightedJacobian;
  746.         final double[] diagR = internalData.diagR;

  747.         // copy R and Qty to preserve input and initialize s
  748.         //  in particular, save the diagonal elements of R in lmDir
  749.         for (int j = 0; j < solvedCols; ++j) {
  750.             int pj = permutation[j];
  751.             for (int i = j + 1; i < solvedCols; ++i) {
  752.                 weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
  753.             }
  754.             lmDir[j] = diagR[pj];
  755.             work[j]  = qy[j];
  756.         }

  757.         // eliminate the diagonal matrix d using a Givens rotation
  758.         for (int j = 0; j < solvedCols; ++j) {

  759.             // prepare the row of d to be eliminated, locating the
  760.             // diagonal element using p from the Q.R. factorization
  761.             int pj = permutation[j];
  762.             double dpj = diag[pj];
  763.             if (dpj != 0) {
  764.                 Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
  765.             }
  766.             lmDiag[j] = dpj;

  767.             //  the transformations to eliminate the row of d
  768.             // modify only a single element of Qty
  769.             // beyond the first n, which is initially zero.
  770.             double qtbpj = 0;
  771.             for (int k = j; k < solvedCols; ++k) {
  772.                 int pk = permutation[k];

  773.                 // determine a Givens rotation which eliminates the
  774.                 // appropriate element in the current row of d
  775.                 if (lmDiag[k] != 0) {

  776.                     final double sin;
  777.                     final double cos;
  778.                     double rkk = weightedJacobian[k][pk];
  779.                     if (JdkMath.abs(rkk) < JdkMath.abs(lmDiag[k])) {
  780.                         final double cotan = rkk / lmDiag[k];
  781.                         sin   = 1.0 / JdkMath.sqrt(1.0 + cotan * cotan);
  782.                         cos   = sin * cotan;
  783.                     } else {
  784.                         final double tan = lmDiag[k] / rkk;
  785.                         cos = 1.0 / JdkMath.sqrt(1.0 + tan * tan);
  786.                         sin = cos * tan;
  787.                     }

  788.                     // compute the modified diagonal element of R and
  789.                     // the modified element of (Qty,0)
  790.                     weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
  791.                     final double temp = cos * work[k] + sin * qtbpj;
  792.                     qtbpj = -sin * work[k] + cos * qtbpj;
  793.                     work[k] = temp;

  794.                     // accumulate the tranformation in the row of s
  795.                     for (int i = k + 1; i < solvedCols; ++i) {
  796.                         double rik = weightedJacobian[i][pk];
  797.                         final double temp2 = cos * rik + sin * lmDiag[i];
  798.                         lmDiag[i] = -sin * rik + cos * lmDiag[i];
  799.                         weightedJacobian[i][pk] = temp2;
  800.                     }
  801.                 }
  802.             }

  803.             // store the diagonal element of s and restore
  804.             // the corresponding diagonal element of R
  805.             lmDiag[j] = weightedJacobian[j][permutation[j]];
  806.             weightedJacobian[j][permutation[j]] = lmDir[j];
  807.         }

  808.         // solve the triangular system for z, if the system is
  809.         // singular, then obtain a least squares solution
  810.         int nSing = solvedCols;
  811.         for (int j = 0; j < solvedCols; ++j) {
  812.             if (lmDiag[j] == 0 && nSing == solvedCols) {
  813.                 nSing = j;
  814.             }
  815.             if (nSing < solvedCols) {
  816.                 work[j] = 0;
  817.             }
  818.         }
  819.         if (nSing > 0) {
  820.             for (int j = nSing - 1; j >= 0; --j) {
  821.                 int pj = permutation[j];
  822.                 double sum = 0;
  823.                 for (int i = j + 1; i < nSing; ++i) {
  824.                     sum += weightedJacobian[i][pj] * work[i];
  825.                 }
  826.                 work[j] = (work[j] - sum) / lmDiag[j];
  827.             }
  828.         }

  829.         // permute the components of z back to components of lmDir
  830.         for (int j = 0; j < lmDir.length; ++j) {
  831.             lmDir[permutation[j]] = work[j];
  832.         }
  833.     }

  834.     /**
  835.      * Decompose a matrix A as A.P = Q.R using Householder transforms.
  836.      * <p>As suggested in the P. Lascaux and R. Theodor book
  837.      * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
  838.      * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
  839.      * the Householder transforms with u<sub>k</sub> unit vectors such that:
  840.      * <pre>
  841.      * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
  842.      * </pre>
  843.      * we use <sub>k</sub> non-unit vectors such that:
  844.      * <pre>
  845.      * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
  846.      * </pre>
  847.      * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
  848.      * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
  849.      * them from the v<sub>k</sub> vectors would be costly.</p>
  850.      * <p>This decomposition handles rank deficient cases since the tranformations
  851.      * are performed in non-increasing columns norms order thanks to columns
  852.      * pivoting. The diagonal elements of the R matrix are therefore also in
  853.      * non-increasing absolute values order.</p>
  854.      *
  855.      * @param jacobian Weighted Jacobian matrix at the current point.
  856.      * @param solvedCols Number of solved point.
  857.      * @return data used in other methods of this class.
  858.      * @throws ConvergenceException if the decomposition cannot be performed.
  859.      */
  860.     private InternalData qrDecomposition(RealMatrix jacobian,
  861.                                          int solvedCols) throws ConvergenceException {
  862.         // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
  863.         // hence the multiplication by -1.
  864.         final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();

  865.         final int nR = weightedJacobian.length;
  866.         final int nC = weightedJacobian[0].length;

  867.         final int[] permutation = new int[nC];
  868.         final double[] diagR = new double[nC];
  869.         final double[] jacNorm = new double[nC];
  870.         final double[] beta = new double[nC];

  871.         // initializations
  872.         for (int k = 0; k < nC; ++k) {
  873.             permutation[k] = k;
  874.             double norm2 = 0;
  875.             for (int i = 0; i < nR; ++i) {
  876.                 double akk = weightedJacobian[i][k];
  877.                 norm2 += akk * akk;
  878.             }
  879.             jacNorm[k] = JdkMath.sqrt(norm2);
  880.         }

  881.         // transform the matrix column after column
  882.         for (int k = 0; k < nC; ++k) {

  883.             // select the column with the greatest norm on active components
  884.             int nextColumn = -1;
  885.             double ak2 = Double.NEGATIVE_INFINITY;
  886.             for (int i = k; i < nC; ++i) {
  887.                 double norm2 = 0;
  888.                 for (int j = k; j < nR; ++j) {
  889.                     double aki = weightedJacobian[j][permutation[i]];
  890.                     norm2 += aki * aki;
  891.                 }
  892.                 if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
  893.                     throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
  894.                                                    nR, nC);
  895.                 }
  896.                 if (norm2 > ak2) {
  897.                     nextColumn = i;
  898.                     ak2        = norm2;
  899.                 }
  900.             }
  901.             if (ak2 <= qrRankingThreshold) {
  902.                 return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
  903.             }
  904.             int pk = permutation[nextColumn];
  905.             permutation[nextColumn] = permutation[k];
  906.             permutation[k] = pk;

  907.             // choose alpha such that Hk.u = alpha ek
  908.             double akk = weightedJacobian[k][pk];
  909.             double alpha = (akk > 0) ? -JdkMath.sqrt(ak2) : JdkMath.sqrt(ak2);
  910.             double betak = 1.0 / (ak2 - akk * alpha);
  911.             beta[pk] = betak;

  912.             // transform the current column
  913.             diagR[pk] = alpha;
  914.             weightedJacobian[k][pk] -= alpha;

  915.             // transform the remaining columns
  916.             for (int dk = nC - 1 - k; dk > 0; --dk) {
  917.                 double gamma = 0;
  918.                 for (int j = k; j < nR; ++j) {
  919.                     gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
  920.                 }
  921.                 gamma *= betak;
  922.                 for (int j = k; j < nR; ++j) {
  923.                     weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
  924.                 }
  925.             }
  926.         }

  927.         return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
  928.     }

  929.     /**
  930.      * Compute the product Qt.y for some Q.R. decomposition.
  931.      *
  932.      * @param y vector to multiply (will be overwritten with the result)
  933.      * @param internalData Data.
  934.      */
  935.     private void qTy(double[] y,
  936.                      InternalData internalData) {
  937.         final double[][] weightedJacobian = internalData.weightedJacobian;
  938.         final int[] permutation = internalData.permutation;
  939.         final double[] beta = internalData.beta;

  940.         final int nR = weightedJacobian.length;
  941.         final int nC = weightedJacobian[0].length;

  942.         for (int k = 0; k < nC; ++k) {
  943.             int pk = permutation[k];
  944.             double gamma = 0;
  945.             for (int i = k; i < nR; ++i) {
  946.                 gamma += weightedJacobian[i][pk] * y[i];
  947.             }
  948.             gamma *= beta[pk];
  949.             for (int i = k; i < nR; ++i) {
  950.                 y[i] -= gamma * weightedJacobian[i][pk];
  951.             }
  952.         }
  953.     }
  954. }