001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018package org.apache.commons.math4.legacy.ode.nonstiff;
019
020import org.apache.commons.math4.legacy.core.Field;
021import org.apache.commons.math4.legacy.core.RealFieldElement;
022import org.apache.commons.math4.legacy.exception.DimensionMismatchException;
023import org.apache.commons.math4.legacy.exception.MaxCountExceededException;
024import org.apache.commons.math4.legacy.exception.NoBracketingException;
025import org.apache.commons.math4.legacy.exception.NumberIsTooSmallException;
026import org.apache.commons.math4.legacy.linear.Array2DRowFieldMatrix;
027import org.apache.commons.math4.legacy.linear.FieldMatrix;
028import org.apache.commons.math4.legacy.ode.FieldExpandableODE;
029import org.apache.commons.math4.legacy.ode.FieldODEState;
030import org.apache.commons.math4.legacy.ode.FieldODEStateAndDerivative;
031import org.apache.commons.math4.legacy.core.MathArrays;
032
033
034/**
035 * This class implements explicit Adams-Bashforth integrators for Ordinary
036 * Differential Equations.
037 *
038 * <p>Adams-Bashforth methods (in fact due to Adams alone) are explicit
039 * multistep ODE solvers. This implementation is a variation of the classical
040 * one: it uses adaptive stepsize to implement error control, whereas
041 * classical implementations are fixed step size. The value of state vector
042 * at step n+1 is a simple combination of the value at step n and of the
043 * derivatives at steps n, n-1, n-2 ... Depending on the number k of previous
044 * steps one wants to use for computing the next value, different formulas
045 * are available:</p>
046 * <ul>
047 *   <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n</sub></li>
048 *   <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (3y'<sub>n</sub>-y'<sub>n-1</sub>)/2</li>
049 *   <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (23y'<sub>n</sub>-16y'<sub>n-1</sub>+5y'<sub>n-2</sub>)/12</li>
050 *   <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (55y'<sub>n</sub>-59y'<sub>n-1</sub>+37y'<sub>n-2</sub>-9y'<sub>n-3</sub>)/24</li>
051 *   <li>...</li>
052 * </ul>
053 *
054 * <p>A k-steps Adams-Bashforth method is of order k.</p>
055 *
056 * <p><b>Implementation details</b></p>
057 *
058 * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as:
059 * <div style="white-space: pre"><code>
060 * s<sub>1</sub>(n) = h y'<sub>n</sub> for first derivative
061 * s<sub>2</sub>(n) = h<sup>2</sup>/2 y''<sub>n</sub> for second derivative
062 * s<sub>3</sub>(n) = h<sup>3</sup>/6 y'''<sub>n</sub> for third derivative
063 * ...
064 * s<sub>k</sub>(n) = h<sup>k</sup>/k! y<sup>(k)</sup><sub>n</sub> for k<sup>th</sup> derivative
065 * </code></div>
066 *
067 * <p>The definitions above use the classical representation with several previous first
068 * derivatives. Lets define
069 * <div style="white-space: pre"><code>
070 *   q<sub>n</sub> = [ s<sub>1</sub>(n-1) s<sub>1</sub>(n-2) ... s<sub>1</sub>(n-(k-1)) ]<sup>T</sup>
071 * </code></div>
072 * (we omit the k index in the notation for clarity). With these definitions,
073 * Adams-Bashforth methods can be written:
074 * <ul>
075 *   <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n)</li>
076 *   <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + 3/2 s<sub>1</sub>(n) + [ -1/2 ] q<sub>n</sub></li>
077 *   <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + 23/12 s<sub>1</sub>(n) + [ -16/12 5/12 ] q<sub>n</sub></li>
078 *   <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + 55/24 s<sub>1</sub>(n) + [ -59/24 37/24 -9/24 ] q<sub>n</sub></li>
079 *   <li>...</li>
080 * </ul>
081 *
082 * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>,
083 * s<sub>1</sub>(n) and q<sub>n</sub>), our implementation uses the Nordsieck vector with
084 * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n)
085 * and r<sub>n</sub>) where r<sub>n</sub> is defined as:
086 * <div style="white-space: pre"><code>
087 * r<sub>n</sub> = [ s<sub>2</sub>(n), s<sub>3</sub>(n) ... s<sub>k</sub>(n) ]<sup>T</sup>
088 * </code></div>
089 * (here again we omit the k index in the notation for clarity)
090 *
091 * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be
092 * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact
093 * for degree k polynomials.
094 * <div style="white-space: pre"><code>
095 * s<sub>1</sub>(n-i) = s<sub>1</sub>(n) + &sum;<sub>j&gt;0</sub> (j+1) (-i)<sup>j</sup> s<sub>j+1</sub>(n)
096 * </code></div>
097 * The previous formula can be used with several values for i to compute the transform between
098 * classical representation and Nordsieck vector. The transform between r<sub>n</sub>
099 * and q<sub>n</sub> resulting from the Taylor series formulas above is:
100 * <div style="white-space: pre"><code>
101 * q<sub>n</sub> = s<sub>1</sub>(n) u + P r<sub>n</sub>
102 * </code></div>
103 * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)&times;(k-1) matrix built
104 * with the (j+1) (-i)<sup>j</sup> terms with i being the row number starting from 1 and j being
105 * the column number starting from 1:
106 * <pre>
107 *        [  -2   3   -4    5  ... ]
108 *        [  -4  12  -32   80  ... ]
109 *   P =  [  -6  27 -108  405  ... ]
110 *        [  -8  48 -256 1280  ... ]
111 *        [          ...           ]
112 * </pre>
113 *
114 * <p>Using the Nordsieck vector has several advantages:
115 * <ul>
116 *   <li>it greatly simplifies step interpolation as the interpolator mainly applies
117 *   Taylor series formulas,</li>
118 *   <li>it simplifies step changes that occur when discrete events that truncate
119 *   the step are triggered,</li>
120 *   <li>it allows to extend the methods in order to support adaptive stepsize.</li>
121 * </ul>
122 *
123 * <p>The Nordsieck vector at step n+1 is computed from the Nordsieck vector at step n as follows:
124 * <ul>
125 *   <li>y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li>
126 *   <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li>
127 *   <li>r<sub>n+1</sub> = (s<sub>1</sub>(n) - s<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li>
128 * </ul>
129 * where A is a rows shifting matrix (the lower left part is an identity matrix):
130 * <pre>
131 *        [ 0 0   ...  0 0 | 0 ]
132 *        [ ---------------+---]
133 *        [ 1 0   ...  0 0 | 0 ]
134 *    A = [ 0 1   ...  0 0 | 0 ]
135 *        [       ...      | 0 ]
136 *        [ 0 0   ...  1 0 | 0 ]
137 *        [ 0 0   ...  0 1 | 0 ]
138 * </pre>
139 *
140 * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state,
141 * they only depend on k and therefore are precomputed once for all.</p>
142 *
143 * @param <T> the type of the field elements
144 * @since 3.6
145 */
146public class AdamsBashforthFieldIntegrator<T extends RealFieldElement<T>> extends AdamsFieldIntegrator<T> {
147
148    /** Integrator method name. */
149    private static final String METHOD_NAME = "Adams-Bashforth";
150
151    /**
152     * Build an Adams-Bashforth integrator with the given order and step control parameters.
153     * @param field field to which the time and state vector elements belong
154     * @param nSteps number of steps of the method excluding the one being computed
155     * @param minStep minimal step (sign is irrelevant, regardless of
156     * integration direction, forward or backward), the last step can
157     * be smaller than this
158     * @param maxStep maximal step (sign is irrelevant, regardless of
159     * integration direction, forward or backward), the last step can
160     * be smaller than this
161     * @param scalAbsoluteTolerance allowed absolute error
162     * @param scalRelativeTolerance allowed relative error
163     * @exception NumberIsTooSmallException if order is 1 or less
164     */
165    public AdamsBashforthFieldIntegrator(final Field<T> field, final int nSteps,
166                                         final double minStep, final double maxStep,
167                                         final double scalAbsoluteTolerance,
168                                         final double scalRelativeTolerance)
169        throws NumberIsTooSmallException {
170        super(field, METHOD_NAME, nSteps, nSteps, minStep, maxStep,
171              scalAbsoluteTolerance, scalRelativeTolerance);
172    }
173
174    /**
175     * Build an Adams-Bashforth integrator with the given order and step control parameters.
176     * @param field field to which the time and state vector elements belong
177     * @param nSteps number of steps of the method excluding the one being computed
178     * @param minStep minimal step (sign is irrelevant, regardless of
179     * integration direction, forward or backward), the last step can
180     * be smaller than this
181     * @param maxStep maximal step (sign is irrelevant, regardless of
182     * integration direction, forward or backward), the last step can
183     * be smaller than this
184     * @param vecAbsoluteTolerance allowed absolute error
185     * @param vecRelativeTolerance allowed relative error
186     * @exception IllegalArgumentException if order is 1 or less
187     */
188    public AdamsBashforthFieldIntegrator(final Field<T> field, final int nSteps,
189                                         final double minStep, final double maxStep,
190                                         final double[] vecAbsoluteTolerance,
191                                         final double[] vecRelativeTolerance)
192        throws IllegalArgumentException {
193        super(field, METHOD_NAME, nSteps, nSteps, minStep, maxStep,
194              vecAbsoluteTolerance, vecRelativeTolerance);
195    }
196
197    /** Estimate error.
198     * <p>
199     * Error is estimated by interpolating back to previous state using
200     * the state Taylor expansion and comparing to real previous state.
201     * </p>
202     * @param previousState state vector at step start
203     * @param predictedState predicted state vector at step end
204     * @param predictedScaled predicted value of the scaled derivatives at step end
205     * @param predictedNordsieck predicted value of the Nordsieck vector at step end
206     * @return estimated normalized local discretization error
207     */
208    private T errorEstimation(final T[] previousState,
209                              final T[] predictedState,
210                              final T[] predictedScaled,
211                              final FieldMatrix<T> predictedNordsieck) {
212
213        T error = getField().getZero();
214        for (int i = 0; i < mainSetDimension; ++i) {
215            final T yScale = predictedState[i].abs();
216            final T tol = (vecAbsoluteTolerance == null) ?
217                          yScale.multiply(scalRelativeTolerance).add(scalAbsoluteTolerance) :
218                          yScale.multiply(vecRelativeTolerance[i]).add(vecAbsoluteTolerance[i]);
219
220            // apply Taylor formula from high order to low order,
221            // for the sake of numerical accuracy
222            T variation = getField().getZero();
223            int sign = (predictedNordsieck.getRowDimension() & 1) == 0 ? -1 : 1;
224            for (int k = predictedNordsieck.getRowDimension() - 1; k >= 0; --k) {
225                variation = variation.add(predictedNordsieck.getEntry(k, i).multiply(sign));
226                sign      = -sign;
227            }
228            variation = variation.subtract(predictedScaled[i]);
229
230            final T ratio  = predictedState[i].subtract(previousState[i]).add(variation).divide(tol);
231            error = error.add(ratio.multiply(ratio));
232        }
233
234        return error.divide(mainSetDimension).sqrt();
235    }
236
237    /** {@inheritDoc} */
238    @Override
239    public FieldODEStateAndDerivative<T> integrate(final FieldExpandableODE<T> equations,
240                                                   final FieldODEState<T> initialState,
241                                                   final T finalTime)
242        throws NumberIsTooSmallException, DimensionMismatchException,
243               MaxCountExceededException, NoBracketingException {
244
245        sanityChecks(initialState, finalTime);
246        final T   t0 = initialState.getTime();
247        final T[] y  = equations.getMapper().mapState(initialState);
248        setStepStart(initIntegration(equations, t0, y, finalTime));
249        final boolean forward = finalTime.subtract(initialState.getTime()).getReal() > 0;
250
251        // compute the initial Nordsieck vector using the configured starter integrator
252        start(equations, getStepStart(), finalTime);
253
254        // reuse the step that was chosen by the starter integrator
255        FieldODEStateAndDerivative<T> stepStart = getStepStart();
256        FieldODEStateAndDerivative<T> stepEnd   =
257                        AdamsFieldStepInterpolator.taylor(stepStart,
258                                                          stepStart.getTime().add(getStepSize()),
259                                                          getStepSize(), scaled, nordsieck);
260
261        // main integration loop
262        setIsLastStep(false);
263        do {
264
265            T[] predictedY = null;
266            final T[] predictedScaled = MathArrays.buildArray(getField(), y.length);
267            Array2DRowFieldMatrix<T> predictedNordsieck = null;
268            T error = getField().getZero().add(10);
269            while (error.subtract(1.0).getReal() >= 0.0) {
270
271                // predict a first estimate of the state at step end
272                predictedY = stepEnd.getState();
273
274                // evaluate the derivative
275                final T[] yDot = computeDerivatives(stepEnd.getTime(), predictedY);
276
277                // predict Nordsieck vector at step end
278                for (int j = 0; j < predictedScaled.length; ++j) {
279                    predictedScaled[j] = getStepSize().multiply(yDot[j]);
280                }
281                predictedNordsieck = updateHighOrderDerivativesPhase1(nordsieck);
282                updateHighOrderDerivativesPhase2(scaled, predictedScaled, predictedNordsieck);
283
284                // evaluate error
285                error = errorEstimation(y, predictedY, predictedScaled, predictedNordsieck);
286
287                if (error.subtract(1.0).getReal() >= 0.0) {
288                    // reject the step and attempt to reduce error by stepsize control
289                    final T factor = computeStepGrowShrinkFactor(error);
290                    rescale(filterStep(getStepSize().multiply(factor), forward, false));
291                    stepEnd = AdamsFieldStepInterpolator.taylor(getStepStart(),
292                                                                getStepStart().getTime().add(getStepSize()),
293                                                                getStepSize(),
294                                                                scaled,
295                                                                nordsieck);
296                }
297            }
298
299            // discrete events handling
300            setStepStart(acceptStep(new AdamsFieldStepInterpolator<>(getStepSize(), stepEnd,
301                                                                      predictedScaled, predictedNordsieck, forward,
302                                                                      getStepStart(), stepEnd,
303                                                                      equations.getMapper()),
304                                    finalTime));
305            scaled    = predictedScaled;
306            nordsieck = predictedNordsieck;
307
308            if (!isLastStep()) {
309
310                System.arraycopy(predictedY, 0, y, 0, y.length);
311
312                if (resetOccurred()) {
313                    // some events handler has triggered changes that
314                    // invalidate the derivatives, we need to restart from scratch
315                    start(equations, getStepStart(), finalTime);
316                }
317
318                // stepsize control for next step
319                final T       factor     = computeStepGrowShrinkFactor(error);
320                final T       scaledH    = getStepSize().multiply(factor);
321                final T       nextT      = getStepStart().getTime().add(scaledH);
322                final boolean nextIsLast = forward ?
323                                           nextT.subtract(finalTime).getReal() >= 0 :
324                                           nextT.subtract(finalTime).getReal() <= 0;
325                T hNew = filterStep(scaledH, forward, nextIsLast);
326
327                final T       filteredNextT      = getStepStart().getTime().add(hNew);
328                final boolean filteredNextIsLast = forward ?
329                                                   filteredNextT.subtract(finalTime).getReal() >= 0 :
330                                                   filteredNextT.subtract(finalTime).getReal() <= 0;
331                if (filteredNextIsLast) {
332                    hNew = finalTime.subtract(getStepStart().getTime());
333                }
334
335                rescale(hNew);
336                stepEnd = AdamsFieldStepInterpolator.taylor(getStepStart(), getStepStart().getTime().add(getStepSize()),
337                                                            getStepSize(), scaled, nordsieck);
338            }
339        } while (!isLastStep());
340
341        final FieldODEStateAndDerivative<T> finalState = getStepStart();
342        setStepStart(null);
343        setStepSize(null);
344        return finalState;
345    }
346}