1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17 package org.apache.commons.math4.legacy.linear;
18
19 import org.apache.commons.math4.legacy.exception.DimensionMismatchException;
20 import org.apache.commons.math4.legacy.exception.MaxCountExceededException;
21 import org.apache.commons.math4.legacy.exception.NullArgumentException;
22 import org.apache.commons.math4.legacy.exception.util.ExceptionContext;
23 import org.apache.commons.math4.core.jdkmath.JdkMath;
24
25 /**
26 * <p>
27 * Implementation of the SYMMLQ iterative linear solver proposed by <a
28 * href="#PAIG1975">Paige and Saunders (1975)</a>. This implementation is
29 * largely based on the FORTRAN code by Pr. Michael A. Saunders, available <a
30 * href="http://www.stanford.edu/group/SOL/software/symmlq/f77/">here</a>.
31 * </p>
32 * <p>
33 * SYMMLQ is designed to solve the system of linear equations A · x = b
34 * where A is an n × n self-adjoint linear operator (defined as a
35 * {@link RealLinearOperator}), and b is a given vector. The operator A is not
36 * required to be positive definite. If A is known to be definite, the method of
37 * conjugate gradients might be preferred, since it will require about the same
38 * number of iterations as SYMMLQ but slightly less work per iteration.
39 * </p>
40 * <p>
41 * SYMMLQ is designed to solve the system (A - shift · I) · x = b,
42 * where shift is a specified scalar value. If shift and b are suitably chosen,
43 * the computed vector x may approximate an (unnormalized) eigenvector of A, as
44 * in the methods of inverse iteration and/or Rayleigh-quotient iteration.
45 * Again, the linear operator (A - shift · I) need not be positive
46 * definite (but <em>must</em> be self-adjoint). The work per iteration is very
47 * slightly less if shift = 0.
48 * </p>
49 * <p><b>Preconditioning</b></p>
50 * <p>
51 * Preconditioning may reduce the number of iterations required. The solver may
52 * be provided with a positive definite preconditioner
53 * M = P<sup>T</sup> · P
54 * that is known to approximate
55 * (A - shift · I)<sup>-1</sup> in some sense, where matrix-vector
56 * products of the form M · y = x can be computed efficiently. Then
57 * SYMMLQ will implicitly solve the system of equations
58 * P · (A - shift · I) · P<sup>T</sup> ·
59 * x<sub>hat</sub> = P · b, i.e.
60 * A<sub>hat</sub> · x<sub>hat</sub> = b<sub>hat</sub>,
61 * where
62 * A<sub>hat</sub> = P · (A - shift · I) · P<sup>T</sup>,
63 * b<sub>hat</sub> = P · b,
64 * and return the solution
65 * x = P<sup>T</sup> · x<sub>hat</sub>.
66 * The associated residual is
67 * r<sub>hat</sub> = b<sub>hat</sub> - A<sub>hat</sub> · x<sub>hat</sub>
68 * = P · [b - (A - shift · I) · x]
69 * = P · r.
70 * </p>
71 * <p>
72 * In the case of preconditioning, the {@link IterativeLinearSolverEvent}s that
73 * this solver fires are such that
74 * {@link IterativeLinearSolverEvent#getNormOfResidual()} returns the norm of
75 * the <em>preconditioned</em>, updated residual, ||P · r||, not the norm
76 * of the <em>true</em> residual ||r||.
77 * </p>
78 * <p><b><a id="stopcrit">Default stopping criterion</a></b></p>
79 * <p>
80 * A default stopping criterion is implemented. The iterations stop when || rhat
81 * || ≤ δ || Ahat || || xhat ||, where xhat is the current estimate of
82 * the solution of the transformed system, rhat the current estimate of the
83 * corresponding residual, and δ a user-specified tolerance.
84 * </p>
85 * <p><b>Iteration count</b></p>
86 * <p>
87 * In the present context, an iteration should be understood as one evaluation
88 * of the matrix-vector product A · x. The initialization phase therefore
89 * counts as one iteration. If the user requires checks on the symmetry of A,
90 * this entails one further matrix-vector product in the initial phase. This
91 * further product is <em>not</em> accounted for in the iteration count. In
92 * other words, the number of iterations required to reach convergence will be
93 * identical, whether checks have been required or not.
94 * </p>
95 * <p>
96 * The present definition of the iteration count differs from that adopted in
97 * the original FOTRAN code, where the initialization phase was <em>not</em>
98 * taken into account.
99 * </p>
100 * <p><b><a id="initguess">Initial guess of the solution</a></b></p>
101 * <p>
102 * The {@code x} parameter in
103 * <ul>
104 * <li>{@link #solve(RealLinearOperator, RealVector, RealVector)},</li>
105 * <li>{@link #solve(RealLinearOperator, RealLinearOperator, RealVector, RealVector)}},</li>
106 * <li>{@link #solveInPlace(RealLinearOperator, RealVector, RealVector)},</li>
107 * <li>{@link #solveInPlace(RealLinearOperator, RealLinearOperator, RealVector, RealVector)},</li>
108 * <li>{@link #solveInPlace(RealLinearOperator, RealLinearOperator, RealVector, RealVector, boolean, double)},</li>
109 * </ul>
110 * should not be considered as an initial guess, as it is set to zero in the
111 * initial phase. If x<sub>0</sub> is known to be a good approximation to x, one
112 * should compute r<sub>0</sub> = b - A · x, solve A · dx = r0,
113 * and set x = x<sub>0</sub> + dx.
114 *
115 * <p><b><a id="context">Exception context</a></b></p>
116 * <p>
117 * Besides standard {@link DimensionMismatchException}, this class might throw
118 * {@link NonSelfAdjointOperatorException} if the linear operator or the
119 * preconditioner are not symmetric. In this case, the {@link ExceptionContext}
120 * provides more information
121 * <ul>
122 * <li>key {@code "operator"} points to the offending linear operator, say L,</li>
123 * <li>key {@code "vector1"} points to the first offending vector, say x,
124 * <li>key {@code "vector2"} points to the second offending vector, say y, such
125 * that x<sup>T</sup> · L · y ≠ y<sup>T</sup> · L
126 * · x (within a certain accuracy).</li>
127 * </ul>
128 *
129 * <p>
130 * {@link NonPositiveDefiniteOperatorException} might also be thrown in case the
131 * preconditioner is not positive definite. The relevant keys to the
132 * {@link ExceptionContext} are
133 * <ul>
134 * <li>key {@code "operator"}, which points to the offending linear operator,
135 * say L,</li>
136 * <li>key {@code "vector"}, which points to the offending vector, say x, such
137 * that x<sup>T</sup> · L · x < 0.</li>
138 * </ul>
139 *
140 * <p><b>References</b></p>
141 * <dl>
142 * <dt><a id="PAIG1975">Paige and Saunders (1975)</a></dt>
143 * <dd>C. C. Paige and M. A. Saunders, <a
144 * href="http://www.stanford.edu/group/SOL/software/symmlq/PS75.pdf"><em>
145 * Solution of Sparse Indefinite Systems of Linear Equations</em></a>, SIAM
146 * Journal on Numerical Analysis 12(4): 617-629, 1975</dd>
147 * </dl>
148 *
149 * @since 3.0
150 */
151 public class SymmLQ
152 extends PreconditionedIterativeLinearSolver {
153
154 /*
155 * IMPLEMENTATION NOTES
156 * --------------------
157 * The implementation follows as closely as possible the notations of Paige
158 * and Saunders (1975). Attention must be paid to the fact that some
159 * quantities which are relevant to iteration k can only be computed in
160 * iteration (k+1). Therefore, minute attention must be paid to the index of
161 * each state variable of this algorithm.
162 *
163 * 1. Preconditioning
164 * ---------------
165 * The Lanczos iterations associated with Ahat and bhat read
166 * beta[1] = ||P * b||
167 * v[1] = P * b / beta[1]
168 * beta[k+1] * v[k+1] = Ahat * v[k] - alpha[k] * v[k] - beta[k] * v[k-1]
169 * = P * (A - shift * I) * P' * v[k] - alpha[k] * v[k]
170 * - beta[k] * v[k-1]
171 * Multiplying both sides by P', we get
172 * beta[k+1] * (P' * v)[k+1] = M * (A - shift * I) * (P' * v)[k]
173 * - alpha[k] * (P' * v)[k]
174 * - beta[k] * (P' * v[k-1]),
175 * and
176 * alpha[k+1] = v[k+1]' * Ahat * v[k+1]
177 * = v[k+1]' * P * (A - shift * I) * P' * v[k+1]
178 * = (P' * v)[k+1]' * (A - shift * I) * (P' * v)[k+1].
179 *
180 * In other words, the Lanczos iterations are unchanged, except for the fact
181 * that we really compute (P' * v) instead of v. It can easily be checked
182 * that all other formulas are unchanged. It must be noted that P is never
183 * explicitly used, only matrix-vector products involving are invoked.
184 *
185 * 2. Accounting for the shift parameter
186 * ----------------------------------
187 * Is trivial: each time A.operate(x) is invoked, one must subtract shift * x
188 * to the result.
189 *
190 * 3. Accounting for the goodb flag
191 * -----------------------------
192 * When goodb is set to true, the component of xL along b is computed
193 * separately. From Paige and Saunders (1975), equation (5.9), we have
194 * wbar[k+1] = s[k] * wbar[k] - c[k] * v[k+1],
195 * wbar[1] = v[1].
196 * Introducing wbar2[k] = wbar[k] - s[1] * ... * s[k-1] * v[1], it can
197 * easily be verified by induction that wbar2 follows the same recursive
198 * relation
199 * wbar2[k+1] = s[k] * wbar2[k] - c[k] * v[k+1],
200 * wbar2[1] = 0,
201 * and we then have
202 * w[k] = c[k] * wbar2[k] + s[k] * v[k+1]
203 * + s[1] * ... * s[k-1] * c[k] * v[1].
204 * Introducing w2[k] = w[k] - s[1] * ... * s[k-1] * c[k] * v[1], we find,
205 * from (5.10)
206 * xL[k] = zeta[1] * w[1] + ... + zeta[k] * w[k]
207 * = zeta[1] * w2[1] + ... + zeta[k] * w2[k]
208 * + (s[1] * c[2] * zeta[2] + ...
209 * + s[1] * ... * s[k-1] * c[k] * zeta[k]) * v[1]
210 * = xL2[k] + bstep[k] * v[1],
211 * where xL2[k] is defined by
212 * xL2[0] = 0,
213 * xL2[k+1] = xL2[k] + zeta[k+1] * w2[k+1],
214 * and bstep is defined by
215 * bstep[1] = 0,
216 * bstep[k] = bstep[k-1] + s[1] * ... * s[k-1] * c[k] * zeta[k].
217 * We also have, from (5.11)
218 * xC[k] = xL[k-1] + zbar[k] * wbar[k]
219 * = xL2[k-1] + zbar[k] * wbar2[k]
220 * + (bstep[k-1] + s[1] * ... * s[k-1] * zbar[k]) * v[1].
221 */
222
223 /**
224 * <p>
225 * A simple container holding the non-final variables used in the
226 * iterations. Making the current state of the solver visible from the
227 * outside is necessary, because during the iterations, {@code x} does not
228 * <em>exactly</em> hold the current estimate of the solution. Indeed,
229 * {@code x} needs in general to be moved from the LQ point to the CG point.
230 * Besides, additional upudates must be carried out in case {@code goodb} is
231 * set to {@code true}.
232 * </p>
233 * <p>
234 * In all subsequent comments, the description of the state variables refer
235 * to their value after a call to {@link #update()}. In these comments, k is
236 * the current number of evaluations of matrix-vector products.
237 * </p>
238 */
239 private static final class State {
240 /** The cubic root of {@link #MACH_PREC}. */
241 static final double CBRT_MACH_PREC;
242
243 /** The machine precision. */
244 static final double MACH_PREC;
245
246 /** Reference to the linear operator. */
247 private final RealLinearOperator a;
248
249 /** Reference to the right-hand side vector. */
250 private final RealVector b;
251
252 /** {@code true} if symmetry of matrix and conditioner must be checked. */
253 private final boolean check;
254
255 /**
256 * The value of the custom tolerance δ for the default stopping
257 * criterion.
258 */
259 private final double delta;
260
261 /** The value of beta[k+1]. */
262 private double beta;
263
264 /** The value of beta[1]. */
265 private double beta1;
266
267 /** The value of bstep[k-1]. */
268 private double bstep;
269
270 /** The estimate of the norm of P * rC[k]. */
271 private double cgnorm;
272
273 /** The value of dbar[k+1] = -beta[k+1] * c[k-1]. */
274 private double dbar;
275
276 /**
277 * The value of gamma[k] * zeta[k]. Was called {@code rhs1} in the
278 * initial code.
279 */
280 private double gammaZeta;
281
282 /** The value of gbar[k]. */
283 private double gbar;
284
285 /** The value of max(|alpha[1]|, gamma[1], ..., gamma[k-1]). */
286 private double gmax;
287
288 /** The value of min(|alpha[1]|, gamma[1], ..., gamma[k-1]). */
289 private double gmin;
290
291 /** Copy of the {@code goodb} parameter. */
292 private final boolean goodb;
293
294 /** {@code true} if the default convergence criterion is verified. */
295 private boolean hasConverged;
296
297 /** The estimate of the norm of P * rL[k-1]. */
298 private double lqnorm;
299
300 /** Reference to the preconditioner, M. */
301 private final RealLinearOperator m;
302
303 /**
304 * The value of (-eps[k+1] * zeta[k-1]). Was called {@code rhs2} in the
305 * initial code.
306 */
307 private double minusEpsZeta;
308
309 /** The value of M * b. */
310 private final RealVector mb;
311
312 /** The value of beta[k]. */
313 private double oldb;
314
315 /** The value of beta[k] * M^(-1) * P' * v[k]. */
316 private RealVector r1;
317
318 /** The value of beta[k+1] * M^(-1) * P' * v[k+1]. */
319 private RealVector r2;
320
321 /**
322 * The value of the updated, preconditioned residual P * r. This value is
323 * given by {@code min(}{@link #cgnorm}{@code , }{@link #lqnorm}{@code )}.
324 */
325 private double rnorm;
326
327 /** Copy of the {@code shift} parameter. */
328 private final double shift;
329
330 /** The value of s[1] * ... * s[k-1]. */
331 private double snprod;
332
333 /**
334 * An estimate of the square of the norm of A * V[k], based on Paige and
335 * Saunders (1975), equation (3.3).
336 */
337 private double tnorm;
338
339 /**
340 * The value of P' * wbar[k] or P' * (wbar[k] - s[1] * ... * s[k-1] *
341 * v[1]) if {@code goodb} is {@code true}. Was called {@code w} in the
342 * initial code.
343 */
344 private RealVector wbar;
345
346 /**
347 * A reference to the vector to be updated with the solution. Contains
348 * the value of xL[k-1] if {@code goodb} is {@code false}, (xL[k-1] -
349 * bstep[k-1] * v[1]) otherwise.
350 */
351 private final RealVector xL;
352
353 /** The value of beta[k+1] * P' * v[k+1]. */
354 private RealVector y;
355
356 /** The value of zeta[1]^2 + ... + zeta[k-1]^2. */
357 private double ynorm2;
358
359 /** The value of {@code b == 0} (exact floating-point equality). */
360 private boolean bIsNull;
361
362 static {
363 MACH_PREC = JdkMath.ulp(1.);
364 CBRT_MACH_PREC = JdkMath.cbrt(MACH_PREC);
365 }
366
367 /**
368 * Creates and inits to k = 1 a new instance of this class.
369 *
370 * @param a the linear operator A of the system
371 * @param m the preconditioner, M (can be {@code null})
372 * @param b the right-hand side vector
373 * @param goodb usually {@code false}, except if {@code x} is expected
374 * to contain a large multiple of {@code b}
375 * @param shift the amount to be subtracted to all diagonal elements of
376 * A
377 * @param delta the δ parameter for the default stopping criterion
378 * @param check {@code true} if self-adjointedness of both matrix and
379 * preconditioner should be checked
380 */
381 State(final RealLinearOperator a,
382 final RealLinearOperator m,
383 final RealVector b,
384 final boolean goodb,
385 final double shift,
386 final double delta,
387 final boolean check) {
388 this.a = a;
389 this.m = m;
390 this.b = b;
391 this.xL = new ArrayRealVector(b.getDimension());
392 this.goodb = goodb;
393 this.shift = shift;
394 this.mb = m == null ? b : m.operate(b);
395 this.hasConverged = false;
396 this.check = check;
397 this.delta = delta;
398 }
399
400 /**
401 * Performs a symmetry check on the specified linear operator, and throws an
402 * exception in case this check fails. Given a linear operator L, and a
403 * vector x, this method checks that
404 * x' · L · y = y' · L · x
405 * (within a given accuracy), where y = L · x.
406 *
407 * @param l the linear operator L
408 * @param x the candidate vector x
409 * @param y the candidate vector y = L · x
410 * @param z the vector z = L · y
411 * @throws NonSelfAdjointOperatorException when the test fails
412 */
413 private static void checkSymmetry(final RealLinearOperator l,
414 final RealVector x, final RealVector y, final RealVector z)
415 throws NonSelfAdjointOperatorException {
416 final double s = y.dotProduct(y);
417 final double t = x.dotProduct(z);
418 final double epsa = (s + MACH_PREC) * CBRT_MACH_PREC;
419 if (JdkMath.abs(s - t) > epsa) {
420 final NonSelfAdjointOperatorException e;
421 e = new NonSelfAdjointOperatorException();
422 final ExceptionContext context = e.getContext();
423 context.setValue(SymmLQ.OPERATOR, l);
424 context.setValue(SymmLQ.VECTOR1, x);
425 context.setValue(SymmLQ.VECTOR2, y);
426 context.setValue(SymmLQ.THRESHOLD, Double.valueOf(epsa));
427 throw e;
428 }
429 }
430
431 /**
432 * Throws a new {@link NonPositiveDefiniteOperatorException} with
433 * appropriate context.
434 *
435 * @param l the offending linear operator
436 * @param v the offending vector
437 * @throws NonPositiveDefiniteOperatorException in any circumstances
438 */
439 private static void throwNPDLOException(final RealLinearOperator l,
440 final RealVector v) throws NonPositiveDefiniteOperatorException {
441 final NonPositiveDefiniteOperatorException e;
442 e = new NonPositiveDefiniteOperatorException();
443 final ExceptionContext context = e.getContext();
444 context.setValue(OPERATOR, l);
445 context.setValue(VECTOR, v);
446 throw e;
447 }
448
449 /**
450 * A clone of the BLAS {@code DAXPY} function, which carries out the
451 * operation y ← a · x + y. This is for internal use only: no
452 * dimension checks are provided.
453 *
454 * @param a the scalar by which {@code x} is to be multiplied
455 * @param x the vector to be added to {@code y}
456 * @param y the vector to be incremented
457 */
458 private static void daxpy(final double a, final RealVector x,
459 final RealVector y) {
460 final int n = x.getDimension();
461 for (int i = 0; i < n; i++) {
462 y.setEntry(i, a * x.getEntry(i) + y.getEntry(i));
463 }
464 }
465
466 /**
467 * A BLAS-like function, for the operation z ← a · x + b
468 * · y + z. This is for internal use only: no dimension checks are
469 * provided.
470 *
471 * @param a the scalar by which {@code x} is to be multiplied
472 * @param x the first vector to be added to {@code z}
473 * @param b the scalar by which {@code y} is to be multiplied
474 * @param y the second vector to be added to {@code z}
475 * @param z the vector to be incremented
476 */
477 private static void daxpbypz(final double a, final RealVector x,
478 final double b, final RealVector y, final RealVector z) {
479 final int n = z.getDimension();
480 for (int i = 0; i < n; i++) {
481 final double zi;
482 zi = a * x.getEntry(i) + b * y.getEntry(i) + z.getEntry(i);
483 z.setEntry(i, zi);
484 }
485 }
486
487 /**
488 * <p>
489 * Move to the CG point if it seems better. In this version of SYMMLQ,
490 * the convergence tests involve only cgnorm, so we're unlikely to stop
491 * at an LQ point, except if the iteration limit interferes.
492 * </p>
493 * <p>
494 * Additional upudates are also carried out in case {@code goodb} is set
495 * to {@code true}.
496 * </p>
497 *
498 * @param x the vector to be updated with the refined value of xL
499 */
500 void refineSolution(final RealVector x) {
501 final int n = this.xL.getDimension();
502 if (lqnorm < cgnorm) {
503 if (!goodb) {
504 x.setSubVector(0, this.xL);
505 } else {
506 final double step = bstep / beta1;
507 for (int i = 0; i < n; i++) {
508 final double bi = mb.getEntry(i);
509 final double xi = this.xL.getEntry(i);
510 x.setEntry(i, xi + step * bi);
511 }
512 }
513 } else {
514 final double anorm = JdkMath.sqrt(tnorm);
515 final double diag = gbar == 0. ? anorm * MACH_PREC : gbar;
516 final double zbar = gammaZeta / diag;
517 final double step = (bstep + snprod * zbar) / beta1;
518 // ynorm = JdkMath.sqrt(ynorm2 + zbar * zbar);
519 if (!goodb) {
520 for (int i = 0; i < n; i++) {
521 final double xi = this.xL.getEntry(i);
522 final double wi = wbar.getEntry(i);
523 x.setEntry(i, xi + zbar * wi);
524 }
525 } else {
526 for (int i = 0; i < n; i++) {
527 final double xi = this.xL.getEntry(i);
528 final double wi = wbar.getEntry(i);
529 final double bi = mb.getEntry(i);
530 x.setEntry(i, xi + zbar * wi + step * bi);
531 }
532 }
533 }
534 }
535
536 /**
537 * Performs the initial phase of the SYMMLQ algorithm. On return, the
538 * value of the state variables of {@code this} object correspond to k =
539 * 1.
540 */
541 void init() {
542 this.xL.set(0.);
543 /*
544 * Set up y for the first Lanczos vector. y and beta1 will be zero
545 * if b = 0.
546 */
547 this.r1 = this.b.copy();
548 this.y = this.m == null ? this.b.copy() : this.m.operate(this.r1);
549 if (this.m != null && this.check) {
550 checkSymmetry(this.m, this.r1, this.y, this.m.operate(this.y));
551 }
552
553 this.beta1 = this.r1.dotProduct(this.y);
554 if (this.beta1 < 0.) {
555 throwNPDLOException(this.m, this.y);
556 }
557 if (this.beta1 == 0.) {
558 /* If b = 0 exactly, stop with x = 0. */
559 this.bIsNull = true;
560 return;
561 }
562 this.bIsNull = false;
563 this.beta1 = JdkMath.sqrt(this.beta1);
564 /* At this point
565 * r1 = b,
566 * y = M * b,
567 * beta1 = beta[1].
568 */
569 final RealVector v = this.y.mapMultiply(1. / this.beta1);
570 this.y = this.a.operate(v);
571 if (this.check) {
572 checkSymmetry(this.a, v, this.y, this.a.operate(this.y));
573 }
574 /*
575 * Set up y for the second Lanczos vector. y and beta will be zero
576 * or very small if b is an eigenvector.
577 */
578 daxpy(-this.shift, v, this.y);
579 final double alpha = v.dotProduct(this.y);
580 daxpy(-alpha / this.beta1, this.r1, this.y);
581 /*
582 * At this point
583 * alpha = alpha[1]
584 * y = beta[2] * M^(-1) * P' * v[2]
585 */
586 /* Make sure r2 will be orthogonal to the first v. */
587 final double vty = v.dotProduct(this.y);
588 final double vtv = v.dotProduct(v);
589 daxpy(-vty / vtv, v, this.y);
590 this.r2 = this.y.copy();
591 if (this.m != null) {
592 this.y = this.m.operate(this.r2);
593 }
594 this.oldb = this.beta1;
595 this.beta = this.r2.dotProduct(this.y);
596 if (this.beta < 0.) {
597 throwNPDLOException(this.m, this.y);
598 }
599 this.beta = JdkMath.sqrt(this.beta);
600 /*
601 * At this point
602 * oldb = beta[1]
603 * beta = beta[2]
604 * y = beta[2] * P' * v[2]
605 * r2 = beta[2] * M^(-1) * P' * v[2]
606 */
607 this.cgnorm = this.beta1;
608 this.gbar = alpha;
609 this.dbar = this.beta;
610 this.gammaZeta = this.beta1;
611 this.minusEpsZeta = 0.;
612 this.bstep = 0.;
613 this.snprod = 1.;
614 this.tnorm = alpha * alpha + this.beta * this.beta;
615 this.ynorm2 = 0.;
616 this.gmax = JdkMath.abs(alpha) + MACH_PREC;
617 this.gmin = this.gmax;
618
619 if (this.goodb) {
620 this.wbar = new ArrayRealVector(this.a.getRowDimension());
621 this.wbar.set(0.);
622 } else {
623 this.wbar = v;
624 }
625 updateNorms();
626 }
627
628 /**
629 * Performs the next iteration of the algorithm. The iteration count
630 * should be incremented prior to calling this method. On return, the
631 * value of the state variables of {@code this} object correspond to the
632 * current iteration count {@code k}.
633 */
634 void update() {
635 final RealVector v = y.mapMultiply(1. / beta);
636 y = a.operate(v);
637 daxpbypz(-shift, v, -beta / oldb, r1, y);
638 final double alpha = v.dotProduct(y);
639 /*
640 * At this point
641 * v = P' * v[k],
642 * y = (A - shift * I) * P' * v[k] - beta[k] * M^(-1) * P' * v[k-1],
643 * alpha = v'[k] * P * (A - shift * I) * P' * v[k]
644 * - beta[k] * v[k]' * P * M^(-1) * P' * v[k-1]
645 * = v'[k] * P * (A - shift * I) * P' * v[k]
646 * - beta[k] * v[k]' * v[k-1]
647 * = alpha[k].
648 */
649 daxpy(-alpha / beta, r2, y);
650 /*
651 * At this point
652 * y = (A - shift * I) * P' * v[k] - alpha[k] * M^(-1) * P' * v[k]
653 * - beta[k] * M^(-1) * P' * v[k-1]
654 * = M^(-1) * P' * (P * (A - shift * I) * P' * v[k] -alpha[k] * v[k]
655 * - beta[k] * v[k-1])
656 * = beta[k+1] * M^(-1) * P' * v[k+1],
657 * from Paige and Saunders (1975), equation (3.2).
658 *
659 * WATCH-IT: the two following lines work only because y is no longer
660 * updated up to the end of the present iteration, and is
661 * reinitialized at the beginning of the next iteration.
662 */
663 r1 = r2;
664 r2 = y;
665 if (m != null) {
666 y = m.operate(r2);
667 }
668 oldb = beta;
669 beta = r2.dotProduct(y);
670 if (beta < 0.) {
671 throwNPDLOException(m, y);
672 }
673 beta = JdkMath.sqrt(beta);
674 /*
675 * At this point
676 * r1 = beta[k] * M^(-1) * P' * v[k],
677 * r2 = beta[k+1] * M^(-1) * P' * v[k+1],
678 * y = beta[k+1] * P' * v[k+1],
679 * oldb = beta[k],
680 * beta = beta[k+1].
681 */
682 tnorm += alpha * alpha + oldb * oldb + beta * beta;
683 /*
684 * Compute the next plane rotation for Q. See Paige and Saunders
685 * (1975), equation (5.6), with
686 * gamma = gamma[k-1],
687 * c = c[k-1],
688 * s = s[k-1].
689 */
690 final double gamma = JdkMath.sqrt(gbar * gbar + oldb * oldb);
691 final double c = gbar / gamma;
692 final double s = oldb / gamma;
693 /*
694 * The relations
695 * gbar[k] = s[k-1] * (-c[k-2] * beta[k]) - c[k-1] * alpha[k]
696 * = s[k-1] * dbar[k] - c[k-1] * alpha[k],
697 * delta[k] = c[k-1] * dbar[k] + s[k-1] * alpha[k],
698 * are not stated in Paige and Saunders (1975), but can be retrieved
699 * by expanding the (k, k-1) and (k, k) coefficients of the matrix in
700 * equation (5.5).
701 */
702 final double deltak = c * dbar + s * alpha;
703 gbar = s * dbar - c * alpha;
704 final double eps = s * beta;
705 dbar = -c * beta;
706 final double zeta = gammaZeta / gamma;
707 /*
708 * At this point
709 * gbar = gbar[k]
710 * deltak = delta[k]
711 * eps = eps[k+1]
712 * dbar = dbar[k+1]
713 * zeta = zeta[k-1]
714 */
715 final double zetaC = zeta * c;
716 final double zetaS = zeta * s;
717 final int n = xL.getDimension();
718 for (int i = 0; i < n; i++) {
719 final double xi = xL.getEntry(i);
720 final double vi = v.getEntry(i);
721 final double wi = wbar.getEntry(i);
722 xL.setEntry(i, xi + wi * zetaC + vi * zetaS);
723 wbar.setEntry(i, wi * s - vi * c);
724 }
725 /*
726 * At this point
727 * x = xL[k-1],
728 * ptwbar = P' wbar[k],
729 * see Paige and Saunders (1975), equations (5.9) and (5.10).
730 */
731 bstep += snprod * c * zeta;
732 snprod *= s;
733 gmax = JdkMath.max(gmax, gamma);
734 gmin = JdkMath.min(gmin, gamma);
735 ynorm2 += zeta * zeta;
736 gammaZeta = minusEpsZeta - deltak * zeta;
737 minusEpsZeta = -eps * zeta;
738 /*
739 * At this point
740 * snprod = s[1] * ... * s[k-1],
741 * gmax = max(|alpha[1]|, gamma[1], ..., gamma[k-1]),
742 * gmin = min(|alpha[1]|, gamma[1], ..., gamma[k-1]),
743 * ynorm2 = zeta[1]^2 + ... + zeta[k-1]^2,
744 * gammaZeta = gamma[k] * zeta[k],
745 * minusEpsZeta = -eps[k+1] * zeta[k-1].
746 * The relation for gammaZeta can be retrieved from Paige and
747 * Saunders (1975), equation (5.4a), last line of the vector
748 * gbar[k] * zbar[k] = -eps[k] * zeta[k-2] - delta[k] * zeta[k-1].
749 */
750 updateNorms();
751 }
752
753 /**
754 * Computes the norms of the residuals, and checks for convergence.
755 * Updates {@link #lqnorm} and {@link #cgnorm}.
756 */
757 private void updateNorms() {
758 final double anorm = JdkMath.sqrt(tnorm);
759 final double ynorm = JdkMath.sqrt(ynorm2);
760 final double epsa = anorm * MACH_PREC;
761 final double epsx = anorm * ynorm * MACH_PREC;
762 final double epsr = anorm * ynorm * delta;
763 final double diag = gbar == 0. ? epsa : gbar;
764 lqnorm = JdkMath.sqrt(gammaZeta * gammaZeta +
765 minusEpsZeta * minusEpsZeta);
766 final double qrnorm = snprod * beta1;
767 cgnorm = qrnorm * beta / JdkMath.abs(diag);
768
769 /*
770 * Estimate cond(A). In this version we look at the diagonals of L
771 * in the factorization of the tridiagonal matrix, T = L * Q.
772 * Sometimes, T[k] can be misleadingly ill-conditioned when T[k+1]
773 * is not, so we must be careful not to overestimate acond.
774 */
775 final double acond;
776 if (lqnorm <= cgnorm) {
777 acond = gmax / gmin;
778 } else {
779 acond = gmax / JdkMath.min(gmin, JdkMath.abs(diag));
780 }
781 if (acond * MACH_PREC >= 0.1) {
782 throw new IllConditionedOperatorException(acond);
783 }
784 if (beta1 <= epsx) {
785 /*
786 * x has converged to an eigenvector of A corresponding to the
787 * eigenvalue shift.
788 */
789 throw new SingularOperatorException();
790 }
791 rnorm = JdkMath.min(cgnorm, lqnorm);
792 hasConverged = cgnorm <= epsx || cgnorm <= epsr;
793 }
794
795 /**
796 * Returns {@code true} if the default stopping criterion is fulfilled.
797 *
798 * @return {@code true} if convergence of the iterations has occurred
799 */
800 boolean hasConverged() {
801 return hasConverged;
802 }
803
804 /**
805 * Returns {@code true} if the right-hand side vector is zero exactly.
806 *
807 * @return the boolean value of {@code b == 0}
808 */
809 boolean bEqualsNullVector() {
810 return bIsNull;
811 }
812
813 /**
814 * Returns {@code true} if {@code beta} is essentially zero. This method
815 * is used to check for early stop of the iterations.
816 *
817 * @return {@code true} if {@code beta < }{@link #MACH_PREC}
818 */
819 boolean betaEqualsZero() {
820 return beta < MACH_PREC;
821 }
822
823 /**
824 * Returns the norm of the updated, preconditioned residual.
825 *
826 * @return the norm of the residual, ||P * r||
827 */
828 double getNormOfResidual() {
829 return rnorm;
830 }
831 }
832
833 /** Key for the exception context. */
834 private static final String OPERATOR = "operator";
835
836 /** Key for the exception context. */
837 private static final String THRESHOLD = "threshold";
838
839 /** Key for the exception context. */
840 private static final String VECTOR = "vector";
841
842 /** Key for the exception context. */
843 private static final String VECTOR1 = "vector1";
844
845 /** Key for the exception context. */
846 private static final String VECTOR2 = "vector2";
847
848 /** {@code true} if symmetry of matrix and conditioner must be checked. */
849 private final boolean check;
850
851 /**
852 * The value of the custom tolerance δ for the default stopping
853 * criterion.
854 */
855 private final double delta;
856
857 /**
858 * Creates a new instance of this class, with <a href="#stopcrit">default
859 * stopping criterion</a>. Note that setting {@code check} to {@code true}
860 * entails an extra matrix-vector product in the initial phase.
861 *
862 * @param maxIterations the maximum number of iterations
863 * @param delta the δ parameter for the default stopping criterion
864 * @param check {@code true} if self-adjointedness of both matrix and
865 * preconditioner should be checked
866 */
867 public SymmLQ(final int maxIterations, final double delta,
868 final boolean check) {
869 super(maxIterations);
870 this.delta = delta;
871 this.check = check;
872 }
873
874 /**
875 * Creates a new instance of this class, with <a href="#stopcrit">default
876 * stopping criterion</a> and custom iteration manager. Note that setting
877 * {@code check} to {@code true} entails an extra matrix-vector product in
878 * the initial phase.
879 *
880 * @param manager the custom iteration manager
881 * @param delta the δ parameter for the default stopping criterion
882 * @param check {@code true} if self-adjointedness of both matrix and
883 * preconditioner should be checked
884 */
885 public SymmLQ(final IterationManager manager, final double delta,
886 final boolean check) {
887 super(manager);
888 this.delta = delta;
889 this.check = check;
890 }
891
892 /**
893 * Returns {@code true} if symmetry of the matrix, and symmetry as well as
894 * positive definiteness of the preconditioner should be checked.
895 *
896 * @return {@code true} if the tests are to be performed
897 */
898 public final boolean getCheck() {
899 return check;
900 }
901
902 /**
903 * {@inheritDoc}
904 *
905 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
906 * {@code true}, and {@code a} or {@code m} is not self-adjoint
907 * @throws NonPositiveDefiniteOperatorException if {@code m} is not
908 * positive definite
909 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
910 */
911 @Override
912 public RealVector solve(final RealLinearOperator a,
913 final RealLinearOperator m, final RealVector b) throws
914 NullArgumentException, NonSquareOperatorException,
915 DimensionMismatchException, MaxCountExceededException,
916 NonSelfAdjointOperatorException, NonPositiveDefiniteOperatorException,
917 IllConditionedOperatorException {
918 NullArgumentException.check(a);
919 final RealVector x = new ArrayRealVector(a.getColumnDimension());
920 return solveInPlace(a, m, b, x, false, 0.);
921 }
922
923 /**
924 * Returns an estimate of the solution to the linear system (A - shift
925 * · I) · x = b.
926 * <p>
927 * If the solution x is expected to contain a large multiple of {@code b}
928 * (as in Rayleigh-quotient iteration), then better precision may be
929 * achieved with {@code goodb} set to {@code true}; this however requires an
930 * extra call to the preconditioner.
931 * </p>
932 * <p>
933 * {@code shift} should be zero if the system A · x = b is to be
934 * solved. Otherwise, it could be an approximation to an eigenvalue of A,
935 * such as the Rayleigh quotient b<sup>T</sup> · A · b /
936 * (b<sup>T</sup> · b) corresponding to the vector b. If b is
937 * sufficiently like an eigenvector corresponding to an eigenvalue near
938 * shift, then the computed x may have very large components. When
939 * normalized, x may be closer to an eigenvector than b.
940 * </p>
941 *
942 * @param a the linear operator A of the system
943 * @param m the preconditioner, M (can be {@code null})
944 * @param b the right-hand side vector
945 * @param goodb usually {@code false}, except if {@code x} is expected to
946 * contain a large multiple of {@code b}
947 * @param shift the amount to be subtracted to all diagonal elements of A
948 * @return a reference to {@code x} (shallow copy)
949 * @throws NullArgumentException if one of the parameters is {@code null}
950 * @throws NonSquareOperatorException if {@code a} or {@code m} is not square
951 * @throws DimensionMismatchException if {@code m} or {@code b} have dimensions
952 * inconsistent with {@code a}
953 * @throws MaxCountExceededException at exhaustion of the iteration count,
954 * unless a custom
955 * {@link org.apache.commons.math4.legacy.core.IntegerSequence.Incrementor.MaxCountExceededCallback callback}
956 * has been set at construction of the {@link IterationManager}
957 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
958 * {@code true}, and {@code a} or {@code m} is not self-adjoint
959 * @throws NonPositiveDefiniteOperatorException if {@code m} is not
960 * positive definite
961 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
962 */
963 public RealVector solve(final RealLinearOperator a,
964 final RealLinearOperator m, final RealVector b, final boolean goodb,
965 final double shift) throws NullArgumentException,
966 NonSquareOperatorException, DimensionMismatchException,
967 MaxCountExceededException, NonSelfAdjointOperatorException,
968 NonPositiveDefiniteOperatorException, IllConditionedOperatorException {
969 NullArgumentException.check(a);
970 final RealVector x = new ArrayRealVector(a.getColumnDimension());
971 return solveInPlace(a, m, b, x, goodb, shift);
972 }
973
974 /**
975 * {@inheritDoc}
976 *
977 * @param x not meaningful in this implementation; should not be considered
978 * as an initial guess (<a href="#initguess">more</a>)
979 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
980 * {@code true}, and {@code a} or {@code m} is not self-adjoint
981 * @throws NonPositiveDefiniteOperatorException if {@code m} is not positive
982 * definite
983 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
984 */
985 @Override
986 public RealVector solve(final RealLinearOperator a,
987 final RealLinearOperator m, final RealVector b, final RealVector x)
988 throws NullArgumentException, NonSquareOperatorException,
989 DimensionMismatchException, NonSelfAdjointOperatorException,
990 NonPositiveDefiniteOperatorException, IllConditionedOperatorException,
991 MaxCountExceededException {
992 NullArgumentException.check(x);
993 return solveInPlace(a, m, b, x.copy(), false, 0.);
994 }
995
996 /**
997 * {@inheritDoc}
998 *
999 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
1000 * {@code true}, and {@code a} is not self-adjoint
1001 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
1002 */
1003 @Override
1004 public RealVector solve(final RealLinearOperator a, final RealVector b)
1005 throws NullArgumentException, NonSquareOperatorException,
1006 DimensionMismatchException, NonSelfAdjointOperatorException,
1007 IllConditionedOperatorException, MaxCountExceededException {
1008 NullArgumentException.check(a);
1009 final RealVector x = new ArrayRealVector(a.getColumnDimension());
1010 x.set(0.);
1011 return solveInPlace(a, null, b, x, false, 0.);
1012 }
1013
1014 /**
1015 * Returns the solution to the system (A - shift · I) · x = b.
1016 * <p>
1017 * If the solution x is expected to contain a large multiple of {@code b}
1018 * (as in Rayleigh-quotient iteration), then better precision may be
1019 * achieved with {@code goodb} set to {@code true}.
1020 * </p>
1021 * <p>
1022 * {@code shift} should be zero if the system A · x = b is to be
1023 * solved. Otherwise, it could be an approximation to an eigenvalue of A,
1024 * such as the Rayleigh quotient b<sup>T</sup> · A · b /
1025 * (b<sup>T</sup> · b) corresponding to the vector b. If b is
1026 * sufficiently like an eigenvector corresponding to an eigenvalue near
1027 * shift, then the computed x may have very large components. When
1028 * normalized, x may be closer to an eigenvector than b.
1029 * </p>
1030 *
1031 * @param a the linear operator A of the system
1032 * @param b the right-hand side vector
1033 * @param goodb usually {@code false}, except if {@code x} is expected to
1034 * contain a large multiple of {@code b}
1035 * @param shift the amount to be subtracted to all diagonal elements of A
1036 * @return a reference to {@code x}
1037 * @throws NullArgumentException if one of the parameters is {@code null}
1038 * @throws NonSquareOperatorException if {@code a} is not square
1039 * @throws DimensionMismatchException if {@code b} has dimensions
1040 * inconsistent with {@code a}
1041 * @throws MaxCountExceededException at exhaustion of the iteration count,
1042 * unless a custom
1043 * {@link org.apache.commons.math4.legacy.core.IntegerSequence.Incrementor.MaxCountExceededCallback callback}
1044 * has been set at construction of the {@link IterationManager}
1045 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
1046 * {@code true}, and {@code a} is not self-adjoint
1047 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
1048 */
1049 public RealVector solve(final RealLinearOperator a, final RealVector b,
1050 final boolean goodb, final double shift) throws NullArgumentException,
1051 NonSquareOperatorException, DimensionMismatchException,
1052 NonSelfAdjointOperatorException, IllConditionedOperatorException,
1053 MaxCountExceededException {
1054 NullArgumentException.check(a);
1055 final RealVector x = new ArrayRealVector(a.getColumnDimension());
1056 return solveInPlace(a, null, b, x, goodb, shift);
1057 }
1058
1059 /**
1060 * {@inheritDoc}
1061 *
1062 * @param x not meaningful in this implementation; should not be considered
1063 * as an initial guess (<a href="#initguess">more</a>)
1064 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
1065 * {@code true}, and {@code a} is not self-adjoint
1066 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
1067 */
1068 @Override
1069 public RealVector solve(final RealLinearOperator a, final RealVector b,
1070 final RealVector x) throws NullArgumentException,
1071 NonSquareOperatorException, DimensionMismatchException,
1072 NonSelfAdjointOperatorException, IllConditionedOperatorException,
1073 MaxCountExceededException {
1074 NullArgumentException.check(x);
1075 return solveInPlace(a, null, b, x.copy(), false, 0.);
1076 }
1077
1078 /**
1079 * {@inheritDoc}
1080 *
1081 * @param x the vector to be updated with the solution; {@code x} should
1082 * not be considered as an initial guess (<a href="#initguess">more</a>)
1083 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
1084 * {@code true}, and {@code a} or {@code m} is not self-adjoint
1085 * @throws NonPositiveDefiniteOperatorException if {@code m} is not
1086 * positive definite
1087 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
1088 */
1089 @Override
1090 public RealVector solveInPlace(final RealLinearOperator a,
1091 final RealLinearOperator m, final RealVector b, final RealVector x)
1092 throws NullArgumentException, NonSquareOperatorException,
1093 DimensionMismatchException, NonSelfAdjointOperatorException,
1094 NonPositiveDefiniteOperatorException, IllConditionedOperatorException,
1095 MaxCountExceededException {
1096 return solveInPlace(a, m, b, x, false, 0.);
1097 }
1098
1099 /**
1100 * Returns an estimate of the solution to the linear system (A - shift
1101 * · I) · x = b. The solution is computed in-place.
1102 * <p>
1103 * If the solution x is expected to contain a large multiple of {@code b}
1104 * (as in Rayleigh-quotient iteration), then better precision may be
1105 * achieved with {@code goodb} set to {@code true}; this however requires an
1106 * extra call to the preconditioner.
1107 * </p>
1108 * <p>
1109 * {@code shift} should be zero if the system A · x = b is to be
1110 * solved. Otherwise, it could be an approximation to an eigenvalue of A,
1111 * such as the Rayleigh quotient b<sup>T</sup> · A · b /
1112 * (b<sup>T</sup> · b) corresponding to the vector b. If b is
1113 * sufficiently like an eigenvector corresponding to an eigenvalue near
1114 * shift, then the computed x may have very large components. When
1115 * normalized, x may be closer to an eigenvector than b.
1116 * </p>
1117 *
1118 * @param a the linear operator A of the system
1119 * @param m the preconditioner, M (can be {@code null})
1120 * @param b the right-hand side vector
1121 * @param x the vector to be updated with the solution; {@code x} should
1122 * not be considered as an initial guess (<a href="#initguess">more</a>)
1123 * @param goodb usually {@code false}, except if {@code x} is expected to
1124 * contain a large multiple of {@code b}
1125 * @param shift the amount to be subtracted to all diagonal elements of A
1126 * @return a reference to {@code x} (shallow copy).
1127 * @throws NullArgumentException if one of the parameters is {@code null}
1128 * @throws NonSquareOperatorException if {@code a} or {@code m} is not square
1129 * @throws DimensionMismatchException if {@code m}, {@code b} or {@code x}
1130 * have dimensions inconsistent with {@code a}.
1131 * @throws MaxCountExceededException at exhaustion of the iteration count,
1132 * unless a custom
1133 * {@link org.apache.commons.math4.legacy.core.IntegerSequence.Incrementor.MaxCountExceededCallback callback}
1134 * has been set at construction of the {@link IterationManager}
1135 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
1136 * {@code true}, and {@code a} or {@code m} is not self-adjoint
1137 * @throws NonPositiveDefiniteOperatorException if {@code m} is not positive
1138 * definite
1139 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
1140 */
1141 public RealVector solveInPlace(final RealLinearOperator a,
1142 final RealLinearOperator m, final RealVector b,
1143 final RealVector x, final boolean goodb, final double shift)
1144 throws NullArgumentException, NonSquareOperatorException,
1145 DimensionMismatchException, NonSelfAdjointOperatorException,
1146 NonPositiveDefiniteOperatorException, IllConditionedOperatorException,
1147 MaxCountExceededException {
1148 checkParameters(a, m, b, x);
1149
1150 final IterationManager manager = getIterationManager();
1151 /* Initialization counts as an iteration. */
1152 manager.resetIterationCount();
1153 manager.incrementIterationCount();
1154
1155 final State state;
1156 state = new State(a, m, b, goodb, shift, delta, check);
1157 state.init();
1158 state.refineSolution(x);
1159 IterativeLinearSolverEvent event;
1160 event = new DefaultIterativeLinearSolverEvent(this,
1161 manager.getIterations(),
1162 x,
1163 b,
1164 state.getNormOfResidual());
1165 if (state.bEqualsNullVector()) {
1166 /* If b = 0 exactly, stop with x = 0. */
1167 manager.fireTerminationEvent(event);
1168 return x;
1169 }
1170 /* Cause termination if beta is essentially zero. */
1171 final boolean earlyStop;
1172 earlyStop = state.betaEqualsZero() || state.hasConverged();
1173 manager.fireInitializationEvent(event);
1174 if (!earlyStop) {
1175 do {
1176 manager.incrementIterationCount();
1177 event = new DefaultIterativeLinearSolverEvent(this,
1178 manager.getIterations(),
1179 x,
1180 b,
1181 state.getNormOfResidual());
1182 manager.fireIterationStartedEvent(event);
1183 state.update();
1184 state.refineSolution(x);
1185 event = new DefaultIterativeLinearSolverEvent(this,
1186 manager.getIterations(),
1187 x,
1188 b,
1189 state.getNormOfResidual());
1190 manager.fireIterationPerformedEvent(event);
1191 } while (!state.hasConverged());
1192 }
1193 event = new DefaultIterativeLinearSolverEvent(this,
1194 manager.getIterations(),
1195 x,
1196 b,
1197 state.getNormOfResidual());
1198 manager.fireTerminationEvent(event);
1199 return x;
1200 }
1201
1202 /**
1203 * {@inheritDoc}
1204 *
1205 * @param x the vector to be updated with the solution; {@code x} should
1206 * not be considered as an initial guess (<a href="#initguess">more</a>)
1207 * @throws NonSelfAdjointOperatorException if {@link #getCheck()} is
1208 * {@code true}, and {@code a} is not self-adjoint
1209 * @throws IllConditionedOperatorException if {@code a} is ill-conditioned
1210 */
1211 @Override
1212 public RealVector solveInPlace(final RealLinearOperator a,
1213 final RealVector b, final RealVector x) throws NullArgumentException,
1214 NonSquareOperatorException, DimensionMismatchException,
1215 NonSelfAdjointOperatorException, IllConditionedOperatorException,
1216 MaxCountExceededException {
1217 return solveInPlace(a, null, b, x, false, 0.);
1218 }
1219 }