1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 package org.apache.commons.math4.legacy.ode.nonstiff;
19
20 import java.util.Arrays;
21
22 import org.apache.commons.math4.legacy.core.Field;
23 import org.apache.commons.math4.legacy.core.RealFieldElement;
24 import org.apache.commons.math4.legacy.exception.DimensionMismatchException;
25 import org.apache.commons.math4.legacy.exception.MaxCountExceededException;
26 import org.apache.commons.math4.legacy.exception.NoBracketingException;
27 import org.apache.commons.math4.legacy.exception.NumberIsTooSmallException;
28 import org.apache.commons.math4.legacy.linear.Array2DRowFieldMatrix;
29 import org.apache.commons.math4.legacy.linear.FieldMatrixPreservingVisitor;
30 import org.apache.commons.math4.legacy.ode.FieldExpandableODE;
31 import org.apache.commons.math4.legacy.ode.FieldODEState;
32 import org.apache.commons.math4.legacy.ode.FieldODEStateAndDerivative;
33 import org.apache.commons.math4.legacy.core.MathArrays;
34
35
36 /**
37 * This class implements implicit Adams-Moulton integrators for Ordinary
38 * Differential Equations.
39 *
40 * <p>Adams-Moulton methods (in fact due to Adams alone) are implicit
41 * multistep ODE solvers. This implementation is a variation of the classical
42 * one: it uses adaptive stepsize to implement error control, whereas
43 * classical implementations are fixed step size. The value of state vector
44 * at step n+1 is a simple combination of the value at step n and of the
45 * derivatives at steps n+1, n, n-1 ... Since y'<sub>n+1</sub> is needed to
46 * compute y<sub>n+1</sub>, another method must be used to compute a first
47 * estimate of y<sub>n+1</sub>, then compute y'<sub>n+1</sub>, then compute
48 * a final estimate of y<sub>n+1</sub> using the following formulas. Depending
49 * on the number k of previous steps one wants to use for computing the next
50 * value, different formulas are available for the final estimate:</p>
51 * <ul>
52 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n+1</sub></li>
53 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (y'<sub>n+1</sub>+y'<sub>n</sub>)/2</li>
54 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (5y'<sub>n+1</sub>+8y'<sub>n</sub>-y'<sub>n-1</sub>)/12</li>
55 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (9y'<sub>n+1</sub>+19y'<sub>n</sub>-5y'<sub>n-1</sub>+y'<sub>n-2</sub>)/24</li>
56 * <li>...</li>
57 * </ul>
58 *
59 * <p>A k-steps Adams-Moulton method is of order k+1.</p>
60 *
61 * <p><b>Implementation details</b></p>
62 *
63 * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as:
64 * <div style="white-space: pre"><code>
65 * s<sub>1</sub>(n) = h y'<sub>n</sub> for first derivative
66 * s<sub>2</sub>(n) = h<sup>2</sup>/2 y''<sub>n</sub> for second derivative
67 * s<sub>3</sub>(n) = h<sup>3</sup>/6 y'''<sub>n</sub> for third derivative
68 * ...
69 * s<sub>k</sub>(n) = h<sup>k</sup>/k! y<sup>(k)</sup><sub>n</sub> for k<sup>th</sup> derivative
70 * </code></div>
71 *
72 * <p>The definitions above use the classical representation with several previous first
73 * derivatives. Lets define
74 * <div style="white-space: pre"><code>
75 * q<sub>n</sub> = [ s<sub>1</sub>(n-1) s<sub>1</sub>(n-2) ... s<sub>1</sub>(n-(k-1)) ]<sup>T</sup>
76 * </code></div>
77 * (we omit the k index in the notation for clarity). With these definitions,
78 * Adams-Moulton methods can be written:
79 * <ul>
80 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n+1)</li>
81 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + 1/2 s<sub>1</sub>(n+1) + [ 1/2 ] q<sub>n+1</sub></li>
82 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + 5/12 s<sub>1</sub>(n+1) + [ 8/12 -1/12 ] q<sub>n+1</sub></li>
83 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + 9/24 s<sub>1</sub>(n+1) + [ 19/24 -5/24 1/24 ] q<sub>n+1</sub></li>
84 * <li>...</li>
85 * </ul>
86 *
87 * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>,
88 * s<sub>1</sub>(n+1) and q<sub>n+1</sub>), our implementation uses the Nordsieck vector with
89 * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n)
90 * and r<sub>n</sub>) where r<sub>n</sub> is defined as:
91 * <div style="white-space: pre"><code>
92 * r<sub>n</sub> = [ s<sub>2</sub>(n), s<sub>3</sub>(n) ... s<sub>k</sub>(n) ]<sup>T</sup>
93 * </code></div>
94 * (here again we omit the k index in the notation for clarity)
95 *
96 * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be
97 * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact
98 * for degree k polynomials.
99 * <div style="white-space: pre"><code>
100 * s<sub>1</sub>(n-i) = s<sub>1</sub>(n) + ∑<sub>j>0</sub> (j+1) (-i)<sup>j</sup> s<sub>j+1</sub>(n)
101 * </code></div>
102 * The previous formula can be used with several values for i to compute the transform between
103 * classical representation and Nordsieck vector. The transform between r<sub>n</sub>
104 * and q<sub>n</sub> resulting from the Taylor series formulas above is:
105 * <div style="white-space: pre"><code>
106 * q<sub>n</sub> = s<sub>1</sub>(n) u + P r<sub>n</sub>
107 * </code></div>
108 * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)×(k-1) matrix built
109 * with the (j+1) (-i)<sup>j</sup> terms with i being the row number starting from 1 and j being
110 * the column number starting from 1:
111 * <pre>
112 * [ -2 3 -4 5 ... ]
113 * [ -4 12 -32 80 ... ]
114 * P = [ -6 27 -108 405 ... ]
115 * [ -8 48 -256 1280 ... ]
116 * [ ... ]
117 * </pre>
118 *
119 * <p>Using the Nordsieck vector has several advantages:
120 * <ul>
121 * <li>it greatly simplifies step interpolation as the interpolator mainly applies
122 * Taylor series formulas,</li>
123 * <li>it simplifies step changes that occur when discrete events that truncate
124 * the step are triggered,</li>
125 * <li>it allows to extend the methods in order to support adaptive stepsize.</li>
126 * </ul>
127 *
128 * <p>The predicted Nordsieck vector at step n+1 is computed from the Nordsieck vector at step
129 * n as follows:
130 * <ul>
131 * <li>Y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li>
132 * <li>S<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, Y<sub>n+1</sub>)</li>
133 * <li>R<sub>n+1</sub> = (s<sub>1</sub>(n) - S<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li>
134 * </ul>
135 * where A is a rows shifting matrix (the lower left part is an identity matrix):
136 * <pre>
137 * [ 0 0 ... 0 0 | 0 ]
138 * [ ---------------+---]
139 * [ 1 0 ... 0 0 | 0 ]
140 * A = [ 0 1 ... 0 0 | 0 ]
141 * [ ... | 0 ]
142 * [ 0 0 ... 1 0 | 0 ]
143 * [ 0 0 ... 0 1 | 0 ]
144 * </pre>
145 * From this predicted vector, the corrected vector is computed as follows:
146 * <ul>
147 * <li>y<sub>n+1</sub> = y<sub>n</sub> + S<sub>1</sub>(n+1) + [ -1 +1 -1 +1 ... ±1 ] r<sub>n+1</sub></li>
148 * <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li>
149 * <li>r<sub>n+1</sub> = R<sub>n+1</sub> + (s<sub>1</sub>(n+1) - S<sub>1</sub>(n+1)) P<sup>-1</sup> u</li>
150 * </ul>
151 * where the upper case Y<sub>n+1</sub>, S<sub>1</sub>(n+1) and R<sub>n+1</sub> represent the
152 * predicted states whereas the lower case y<sub>n+1</sub>, s<sub>n+1</sub> and r<sub>n+1</sub>
153 * represent the corrected states.
154 *
155 * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state,
156 * they only depend on k and therefore are precomputed once for all.</p>
157 *
158 * @param <T> the type of the field elements
159 * @since 3.6
160 */
161 public class AdamsMoultonFieldIntegrator<T extends RealFieldElement<T>> extends AdamsFieldIntegrator<T> {
162
163 /** Integrator method name. */
164 private static final String METHOD_NAME = "Adams-Moulton";
165
166 /**
167 * Build an Adams-Moulton integrator with the given order and error control parameters.
168 * @param field field to which the time and state vector elements belong
169 * @param nSteps number of steps of the method excluding the one being computed
170 * @param minStep minimal step (sign is irrelevant, regardless of
171 * integration direction, forward or backward), the last step can
172 * be smaller than this
173 * @param maxStep maximal step (sign is irrelevant, regardless of
174 * integration direction, forward or backward), the last step can
175 * be smaller than this
176 * @param scalAbsoluteTolerance allowed absolute error
177 * @param scalRelativeTolerance allowed relative error
178 * @exception NumberIsTooSmallException if order is 1 or less
179 */
180 public AdamsMoultonFieldIntegrator(final Field<T> field, final int nSteps,
181 final double minStep, final double maxStep,
182 final double scalAbsoluteTolerance,
183 final double scalRelativeTolerance)
184 throws NumberIsTooSmallException {
185 super(field, METHOD_NAME, nSteps, nSteps + 1, minStep, maxStep,
186 scalAbsoluteTolerance, scalRelativeTolerance);
187 }
188
189 /**
190 * Build an Adams-Moulton integrator with the given order and error control parameters.
191 * @param field field to which the time and state vector elements belong
192 * @param nSteps number of steps of the method excluding the one being computed
193 * @param minStep minimal step (sign is irrelevant, regardless of
194 * integration direction, forward or backward), the last step can
195 * be smaller than this
196 * @param maxStep maximal step (sign is irrelevant, regardless of
197 * integration direction, forward or backward), the last step can
198 * be smaller than this
199 * @param vecAbsoluteTolerance allowed absolute error
200 * @param vecRelativeTolerance allowed relative error
201 * @exception IllegalArgumentException if order is 1 or less
202 */
203 public AdamsMoultonFieldIntegrator(final Field<T> field, final int nSteps,
204 final double minStep, final double maxStep,
205 final double[] vecAbsoluteTolerance,
206 final double[] vecRelativeTolerance)
207 throws IllegalArgumentException {
208 super(field, METHOD_NAME, nSteps, nSteps + 1, minStep, maxStep,
209 vecAbsoluteTolerance, vecRelativeTolerance);
210 }
211
212 /** {@inheritDoc} */
213 @Override
214 public FieldODEStateAndDerivative<T> integrate(final FieldExpandableODE<T> equations,
215 final FieldODEState<T> initialState,
216 final T finalTime)
217 throws NumberIsTooSmallException, DimensionMismatchException,
218 MaxCountExceededException, NoBracketingException {
219
220 sanityChecks(initialState, finalTime);
221 final T t0 = initialState.getTime();
222 final T[] y = equations.getMapper().mapState(initialState);
223 setStepStart(initIntegration(equations, t0, y, finalTime));
224 final boolean forward = finalTime.subtract(initialState.getTime()).getReal() > 0;
225
226 // compute the initial Nordsieck vector using the configured starter integrator
227 start(equations, getStepStart(), finalTime);
228
229 // reuse the step that was chosen by the starter integrator
230 FieldODEStateAndDerivative<T> stepStart = getStepStart();
231 FieldODEStateAndDerivative<T> stepEnd =
232 AdamsFieldStepInterpolator.taylor(stepStart,
233 stepStart.getTime().add(getStepSize()),
234 getStepSize(), scaled, nordsieck);
235
236 // main integration loop
237 setIsLastStep(false);
238 do {
239
240 T[] predictedY = null;
241 final T[] predictedScaled = MathArrays.buildArray(getField(), y.length);
242 Array2DRowFieldMatrix<T> predictedNordsieck = null;
243 T error = getField().getZero().add(10);
244 while (error.subtract(1.0).getReal() >= 0.0) {
245
246 // predict a first estimate of the state at step end (P in the PECE sequence)
247 predictedY = stepEnd.getState();
248
249 // evaluate a first estimate of the derivative (first E in the PECE sequence)
250 final T[] yDot = computeDerivatives(stepEnd.getTime(), predictedY);
251
252 // update Nordsieck vector
253 for (int j = 0; j < predictedScaled.length; ++j) {
254 predictedScaled[j] = getStepSize().multiply(yDot[j]);
255 }
256 predictedNordsieck = updateHighOrderDerivativesPhase1(nordsieck);
257 updateHighOrderDerivativesPhase2(scaled, predictedScaled, predictedNordsieck);
258
259 // apply correction (C in the PECE sequence)
260 error = predictedNordsieck.walkInOptimizedOrder(new Corrector(y, predictedScaled, predictedY));
261
262 if (error.subtract(1.0).getReal() >= 0.0) {
263 // reject the step and attempt to reduce error by stepsize control
264 final T factor = computeStepGrowShrinkFactor(error);
265 rescale(filterStep(getStepSize().multiply(factor), forward, false));
266 stepEnd = AdamsFieldStepInterpolator.taylor(getStepStart(),
267 getStepStart().getTime().add(getStepSize()),
268 getStepSize(),
269 scaled,
270 nordsieck);
271 }
272 }
273
274 // evaluate a final estimate of the derivative (second E in the PECE sequence)
275 final T[] correctedYDot = computeDerivatives(stepEnd.getTime(), predictedY);
276
277 // update Nordsieck vector
278 final T[] correctedScaled = MathArrays.buildArray(getField(), y.length);
279 for (int j = 0; j < correctedScaled.length; ++j) {
280 correctedScaled[j] = getStepSize().multiply(correctedYDot[j]);
281 }
282 updateHighOrderDerivativesPhase2(predictedScaled, correctedScaled, predictedNordsieck);
283
284 // discrete events handling
285 stepEnd = new FieldODEStateAndDerivative<>(stepEnd.getTime(), predictedY, correctedYDot);
286 setStepStart(acceptStep(new AdamsFieldStepInterpolator<>(getStepSize(), stepEnd,
287 correctedScaled, predictedNordsieck, forward,
288 getStepStart(), stepEnd,
289 equations.getMapper()),
290 finalTime));
291 scaled = correctedScaled;
292 nordsieck = predictedNordsieck;
293
294 if (!isLastStep()) {
295
296 System.arraycopy(predictedY, 0, y, 0, y.length);
297
298 if (resetOccurred()) {
299 // some events handler has triggered changes that
300 // invalidate the derivatives, we need to restart from scratch
301 start(equations, getStepStart(), finalTime);
302 }
303
304 // stepsize control for next step
305 final T factor = computeStepGrowShrinkFactor(error);
306 final T scaledH = getStepSize().multiply(factor);
307 final T nextT = getStepStart().getTime().add(scaledH);
308 final boolean nextIsLast = forward ?
309 nextT.subtract(finalTime).getReal() >= 0 :
310 nextT.subtract(finalTime).getReal() <= 0;
311 T hNew = filterStep(scaledH, forward, nextIsLast);
312
313 final T filteredNextT = getStepStart().getTime().add(hNew);
314 final boolean filteredNextIsLast = forward ?
315 filteredNextT.subtract(finalTime).getReal() >= 0 :
316 filteredNextT.subtract(finalTime).getReal() <= 0;
317 if (filteredNextIsLast) {
318 hNew = finalTime.subtract(getStepStart().getTime());
319 }
320
321 rescale(hNew);
322 stepEnd = AdamsFieldStepInterpolator.taylor(getStepStart(), getStepStart().getTime().add(getStepSize()),
323 getStepSize(), scaled, nordsieck);
324 }
325 } while (!isLastStep());
326
327 final FieldODEStateAndDerivative<T> finalState = getStepStart();
328 setStepStart(null);
329 setStepSize(null);
330 return finalState;
331 }
332
333 /** Corrector for current state in Adams-Moulton method.
334 * <p>
335 * This visitor implements the Taylor series formula:
336 * <pre>
337 * Y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n+1) + [ -1 +1 -1 +1 ... ±1 ] r<sub>n+1</sub>
338 * </pre>
339 * </p>
340 */
341 private final class Corrector implements FieldMatrixPreservingVisitor<T> {
342
343 /** Previous state. */
344 private final T[] previous;
345
346 /** Current scaled first derivative. */
347 private final T[] scaled;
348
349 /** Current state before correction. */
350 private final T[] before;
351
352 /** Current state after correction. */
353 private final T[] after;
354
355 /** Simple constructor.
356 * @param previous previous state
357 * @param scaled current scaled first derivative
358 * @param state state to correct (will be overwritten after visit)
359 */
360 Corrector(final T[] previous, final T[] scaled, final T[] state) {
361 this.previous = previous;
362 this.scaled = scaled;
363 this.after = state;
364 this.before = state.clone();
365 }
366
367 /** {@inheritDoc} */
368 @Override
369 public void start(int rows, int columns,
370 int startRow, int endRow, int startColumn, int endColumn) {
371 Arrays.fill(after, getField().getZero());
372 }
373
374 /** {@inheritDoc} */
375 @Override
376 public void visit(int row, int column, T value) {
377 if ((row & 0x1) == 0) {
378 after[column] = after[column].subtract(value);
379 } else {
380 after[column] = after[column].add(value);
381 }
382 }
383
384 /**
385 * End visiting the Nordsieck vector.
386 * <p>The correction is used to control stepsize. So its amplitude is
387 * considered to be an error, which must be normalized according to
388 * error control settings. If the normalized value is greater than 1,
389 * the correction was too large and the step must be rejected.</p>
390 * @return the normalized correction, if greater than 1, the step
391 * must be rejected
392 */
393 @Override
394 public T end() {
395
396 T error = getField().getZero();
397 for (int i = 0; i < after.length; ++i) {
398 after[i] = after[i].add(previous[i].add(scaled[i]));
399 if (i < mainSetDimension) {
400 final T yScale = RealFieldElement.max(previous[i].abs(), after[i].abs());
401 final T tol = (vecAbsoluteTolerance == null) ?
402 yScale.multiply(scalRelativeTolerance).add(scalAbsoluteTolerance) :
403 yScale.multiply(vecRelativeTolerance[i]).add(vecAbsoluteTolerance[i]);
404 final T ratio = after[i].subtract(before[i]).divide(tol); // (corrected-predicted)/tol
405 error = error.add(ratio.multiply(ratio));
406 }
407 }
408
409 return error.divide(mainSetDimension).sqrt();
410 }
411 }
412 }