View Javadoc
1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * The ASF licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *      http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  
18  package org.apache.commons.math4.legacy.ode.nonstiff;
19  
20  import org.apache.commons.math4.legacy.exception.DimensionMismatchException;
21  import org.apache.commons.math4.legacy.exception.MaxCountExceededException;
22  import org.apache.commons.math4.legacy.exception.NoBracketingException;
23  import org.apache.commons.math4.legacy.exception.NumberIsTooSmallException;
24  import org.apache.commons.math4.legacy.linear.Array2DRowRealMatrix;
25  import org.apache.commons.math4.legacy.linear.RealMatrix;
26  import org.apache.commons.math4.legacy.ode.EquationsMapper;
27  import org.apache.commons.math4.legacy.ode.ExpandableStatefulODE;
28  import org.apache.commons.math4.legacy.ode.sampling.NordsieckStepInterpolator;
29  import org.apache.commons.math4.core.jdkmath.JdkMath;
30  
31  
32  /**
33   * This class implements explicit Adams-Bashforth integrators for Ordinary
34   * Differential Equations.
35   *
36   * <p>Adams-Bashforth methods (in fact due to Adams alone) are explicit
37   * multistep ODE solvers. This implementation is a variation of the classical
38   * one: it uses adaptive stepsize to implement error control, whereas
39   * classical implementations are fixed step size. The value of state vector
40   * at step n+1 is a simple combination of the value at step n and of the
41   * derivatives at steps n, n-1, n-2 ... Depending on the number k of previous
42   * steps one wants to use for computing the next value, different formulas
43   * are available:</p>
44   * <ul>
45   *   <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n</sub></li>
46   *   <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (3y'<sub>n</sub>-y'<sub>n-1</sub>)/2</li>
47   *   <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (23y'<sub>n</sub>-16y'<sub>n-1</sub>+5y'<sub>n-2</sub>)/12</li>
48   *   <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (55y'<sub>n</sub>-59y'<sub>n-1</sub>+37y'<sub>n-2</sub>-9y'<sub>n-3</sub>)/24</li>
49   *   <li>...</li>
50   * </ul>
51   *
52   * <p>A k-steps Adams-Bashforth method is of order k.</p>
53   *
54   * <p><b>Implementation details</b></p>
55   *
56   * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as:
57   * <div style="white-space: pre"><code>
58   * s<sub>1</sub>(n) = h y'<sub>n</sub> for first derivative
59   * s<sub>2</sub>(n) = h<sup>2</sup>/2 y''<sub>n</sub> for second derivative
60   * s<sub>3</sub>(n) = h<sup>3</sup>/6 y'''<sub>n</sub> for third derivative
61   * ...
62   * s<sub>k</sub>(n) = h<sup>k</sup>/k! y<sup>(k)</sup><sub>n</sub> for k<sup>th</sup> derivative
63   * </code></div>
64   *
65   * <p>The definitions above use the classical representation with several previous first
66   * derivatives. Lets define
67   * <div style="white-space: pre"><code>
68   *   q<sub>n</sub> = [ s<sub>1</sub>(n-1) s<sub>1</sub>(n-2) ... s<sub>1</sub>(n-(k-1)) ]<sup>T</sup>
69   * </code></div>
70   * (we omit the k index in the notation for clarity). With these definitions,
71   * Adams-Bashforth methods can be written:
72   * <ul>
73   *   <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n)</li>
74   *   <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + 3/2 s<sub>1</sub>(n) + [ -1/2 ] q<sub>n</sub></li>
75   *   <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + 23/12 s<sub>1</sub>(n) + [ -16/12 5/12 ] q<sub>n</sub></li>
76   *   <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + 55/24 s<sub>1</sub>(n) + [ -59/24 37/24 -9/24 ] q<sub>n</sub></li>
77   *   <li>...</li>
78   * </ul>
79   *
80   * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>,
81   * s<sub>1</sub>(n) and q<sub>n</sub>), our implementation uses the Nordsieck vector with
82   * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n)
83   * and r<sub>n</sub>) where r<sub>n</sub> is defined as:
84   * <div style="white-space: pre"><code>
85   * r<sub>n</sub> = [ s<sub>2</sub>(n), s<sub>3</sub>(n) ... s<sub>k</sub>(n) ]<sup>T</sup>
86   * </code></div>
87   * (here again we omit the k index in the notation for clarity)
88   *
89   * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be
90   * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact
91   * for degree k polynomials.
92   * <div style="white-space: pre"><code>
93   * s<sub>1</sub>(n-i) = s<sub>1</sub>(n) + &sum;<sub>j&gt;0</sub> (j+1) (-i)<sup>j</sup> s<sub>j+1</sub>(n)
94   * </code></div>
95   * The previous formula can be used with several values for i to compute the transform between
96   * classical representation and Nordsieck vector. The transform between r<sub>n</sub>
97   * and q<sub>n</sub> resulting from the Taylor series formulas above is:
98   * <div style="white-space: pre"><code>
99   * q<sub>n</sub> = s<sub>1</sub>(n) u + P r<sub>n</sub>
100  * </code></div>
101  * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)&times;(k-1) matrix built
102  * with the (j+1) (-i)<sup>j</sup> terms with i being the row number starting from 1 and j being
103  * the column number starting from 1:
104  * <pre>
105  *        [  -2   3   -4    5  ... ]
106  *        [  -4  12  -32   80  ... ]
107  *   P =  [  -6  27 -108  405  ... ]
108  *        [  -8  48 -256 1280  ... ]
109  *        [          ...           ]
110  * </pre>
111  *
112  * <p>Using the Nordsieck vector has several advantages:
113  * <ul>
114  *   <li>it greatly simplifies step interpolation as the interpolator mainly applies
115  *   Taylor series formulas,</li>
116  *   <li>it simplifies step changes that occur when discrete events that truncate
117  *   the step are triggered,</li>
118  *   <li>it allows to extend the methods in order to support adaptive stepsize.</li>
119  * </ul>
120  *
121  * <p>The Nordsieck vector at step n+1 is computed from the Nordsieck vector at step n as follows:
122  * <ul>
123  *   <li>y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li>
124  *   <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li>
125  *   <li>r<sub>n+1</sub> = (s<sub>1</sub>(n) - s<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li>
126  * </ul>
127  * where A is a rows shifting matrix (the lower left part is an identity matrix):
128  * <pre>
129  *        [ 0 0   ...  0 0 | 0 ]
130  *        [ ---------------+---]
131  *        [ 1 0   ...  0 0 | 0 ]
132  *    A = [ 0 1   ...  0 0 | 0 ]
133  *        [       ...      | 0 ]
134  *        [ 0 0   ...  1 0 | 0 ]
135  *        [ 0 0   ...  0 1 | 0 ]
136  * </pre>
137  *
138  * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state,
139  * they only depend on k and therefore are precomputed once for all.</p>
140  *
141  * @since 2.0
142  */
143 public class AdamsBashforthIntegrator extends AdamsIntegrator {
144 
145     /** Integrator method name. */
146     private static final String METHOD_NAME = "Adams-Bashforth";
147 
148     /**
149      * Build an Adams-Bashforth integrator with the given order and step control parameters.
150      * @param nSteps number of steps of the method excluding the one being computed
151      * @param minStep minimal step (sign is irrelevant, regardless of
152      * integration direction, forward or backward), the last step can
153      * be smaller than this
154      * @param maxStep maximal step (sign is irrelevant, regardless of
155      * integration direction, forward or backward), the last step can
156      * be smaller than this
157      * @param scalAbsoluteTolerance allowed absolute error
158      * @param scalRelativeTolerance allowed relative error
159      * @exception NumberIsTooSmallException if order is 1 or less
160      */
161     public AdamsBashforthIntegrator(final int nSteps,
162                                     final double minStep, final double maxStep,
163                                     final double scalAbsoluteTolerance,
164                                     final double scalRelativeTolerance)
165         throws NumberIsTooSmallException {
166         super(METHOD_NAME, nSteps, nSteps, minStep, maxStep,
167               scalAbsoluteTolerance, scalRelativeTolerance);
168     }
169 
170     /**
171      * Build an Adams-Bashforth integrator with the given order and step control parameters.
172      * @param nSteps number of steps of the method excluding the one being computed
173      * @param minStep minimal step (sign is irrelevant, regardless of
174      * integration direction, forward or backward), the last step can
175      * be smaller than this
176      * @param maxStep maximal step (sign is irrelevant, regardless of
177      * integration direction, forward or backward), the last step can
178      * be smaller than this
179      * @param vecAbsoluteTolerance allowed absolute error
180      * @param vecRelativeTolerance allowed relative error
181      * @exception IllegalArgumentException if order is 1 or less
182      */
183     public AdamsBashforthIntegrator(final int nSteps,
184                                     final double minStep, final double maxStep,
185                                     final double[] vecAbsoluteTolerance,
186                                     final double[] vecRelativeTolerance)
187         throws IllegalArgumentException {
188         super(METHOD_NAME, nSteps, nSteps, minStep, maxStep,
189               vecAbsoluteTolerance, vecRelativeTolerance);
190     }
191 
192     /** Estimate error.
193      * <p>
194      * Error is estimated by interpolating back to previous state using
195      * the state Taylor expansion and comparing to real previous state.
196      * </p>
197      * @param previousState state vector at step start
198      * @param predictedState predicted state vector at step end
199      * @param predictedScaled predicted value of the scaled derivatives at step end
200      * @param predictedNordsieck predicted value of the Nordsieck vector at step end
201      * @return estimated normalized local discretization error
202      */
203     private double errorEstimation(final double[] previousState,
204                                    final double[] predictedState,
205                                    final double[] predictedScaled,
206                                    final RealMatrix predictedNordsieck) {
207 
208         double error = 0;
209         for (int i = 0; i < mainSetDimension; ++i) {
210             final double yScale = JdkMath.abs(predictedState[i]);
211             final double tol = (vecAbsoluteTolerance == null) ?
212                                (scalAbsoluteTolerance + scalRelativeTolerance * yScale) :
213                                (vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * yScale);
214 
215             // apply Taylor formula from high order to low order,
216             // for the sake of numerical accuracy
217             double variation = 0;
218             int sign = (predictedNordsieck.getRowDimension() & 1) == 0 ? -1 : 1;
219             for (int k = predictedNordsieck.getRowDimension() - 1; k >= 0; --k) {
220                 variation += sign * predictedNordsieck.getEntry(k, i);
221                 sign       = -sign;
222             }
223             variation -= predictedScaled[i];
224 
225             final double ratio  = (predictedState[i] - previousState[i] + variation) / tol;
226             error              += ratio * ratio;
227         }
228 
229         return JdkMath.sqrt(error / mainSetDimension);
230     }
231 
232     /** {@inheritDoc} */
233     @Override
234     public void integrate(final ExpandableStatefulODE equations, final double t)
235         throws NumberIsTooSmallException, DimensionMismatchException,
236                MaxCountExceededException, NoBracketingException {
237 
238         sanityChecks(equations, t);
239         setEquations(equations);
240         final boolean forward = t > equations.getTime();
241 
242         // initialize working arrays
243         final double[] y    = equations.getCompleteState();
244         final double[] yDot = new double[y.length];
245 
246         // set up an interpolator sharing the integrator arrays
247         final NordsieckStepInterpolator interpolator = new NordsieckStepInterpolator();
248         interpolator.reinitialize(y, forward,
249                                   equations.getPrimaryMapper(), equations.getSecondaryMappers());
250 
251         // set up integration control objects
252         initIntegration(equations.getTime(), y, t);
253 
254         // compute the initial Nordsieck vector using the configured starter integrator
255         start(equations.getTime(), y, t);
256         interpolator.reinitialize(stepStart, stepSize, scaled, nordsieck);
257         interpolator.storeTime(stepStart);
258 
259         // reuse the step that was chosen by the starter integrator
260         double hNew = stepSize;
261         interpolator.rescale(hNew);
262 
263         // main integration loop
264         isLastStep = false;
265         do {
266 
267             interpolator.shift();
268             final double[] predictedY      = new double[y.length];
269             final double[] predictedScaled = new double[y.length];
270             Array2DRowRealMatrix predictedNordsieck = null;
271             double error = 10;
272             while (error >= 1.0) {
273 
274                 // predict a first estimate of the state at step end
275                 final double stepEnd = stepStart + hNew;
276                 interpolator.storeTime(stepEnd);
277                 final ExpandableStatefulODE expandable = getExpandable();
278                 final EquationsMapper primary = expandable.getPrimaryMapper();
279                 primary.insertEquationData(interpolator.getInterpolatedState(), predictedY);
280                 int index = 0;
281                 for (final EquationsMapper secondary : expandable.getSecondaryMappers()) {
282                     secondary.insertEquationData(interpolator.getInterpolatedSecondaryState(index), predictedY);
283                     ++index;
284                 }
285 
286                 // evaluate the derivative
287                 computeDerivatives(stepEnd, predictedY, yDot);
288 
289                 // predict Nordsieck vector at step end
290                 for (int j = 0; j < predictedScaled.length; ++j) {
291                     predictedScaled[j] = hNew * yDot[j];
292                 }
293                 predictedNordsieck = updateHighOrderDerivativesPhase1(nordsieck);
294                 updateHighOrderDerivativesPhase2(scaled, predictedScaled, predictedNordsieck);
295 
296                 // evaluate error
297                 error = errorEstimation(y, predictedY, predictedScaled, predictedNordsieck);
298 
299                 if (error >= 1.0) {
300                     // reject the step and attempt to reduce error by stepsize control
301                     final double factor = computeStepGrowShrinkFactor(error);
302                     hNew = filterStep(hNew * factor, forward, false);
303                     interpolator.rescale(hNew);
304                 }
305             }
306 
307             stepSize = hNew;
308             final double stepEnd = stepStart + stepSize;
309             interpolator.reinitialize(stepEnd, stepSize, predictedScaled, predictedNordsieck);
310 
311             // discrete events handling
312             interpolator.storeTime(stepEnd);
313             System.arraycopy(predictedY, 0, y, 0, y.length);
314             stepStart = acceptStep(interpolator, y, yDot, t);
315             scaled    = predictedScaled;
316             nordsieck = predictedNordsieck;
317             interpolator.reinitialize(stepEnd, stepSize, scaled, nordsieck);
318 
319             if (!isLastStep) {
320 
321                 // prepare next step
322                 interpolator.storeTime(stepStart);
323 
324                 if (resetOccurred) {
325                     // some events handler has triggered changes that
326                     // invalidate the derivatives, we need to restart from scratch
327                     start(stepStart, y, t);
328                     interpolator.reinitialize(stepStart, stepSize, scaled, nordsieck);
329                 }
330 
331                 // stepsize control for next step
332                 final double  factor     = computeStepGrowShrinkFactor(error);
333                 final double  scaledH    = stepSize * factor;
334                 final double  nextT      = stepStart + scaledH;
335                 final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
336                 hNew = filterStep(scaledH, forward, nextIsLast);
337 
338                 final double  filteredNextT      = stepStart + hNew;
339                 final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
340                 if (filteredNextIsLast) {
341                     hNew = t - stepStart;
342                 }
343 
344                 interpolator.rescale(hNew);
345             }
346         } while (!isLastStep);
347 
348         // dispatch results
349         equations.setTime(stepStart);
350         equations.setCompleteState(y);
351 
352         resetInternalState();
353     }
354 }