Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/scipy/sparse/linalg/matfuncs.py : 17%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1"""
2Sparse matrix functions
3"""
5#
6# Authors: Travis Oliphant, March 2002
7# Anthony Scopatz, August 2012 (Sparse Updates)
8# Jake Vanderplas, August 2012 (Sparse Updates)
9#
11__all__ = ['expm', 'inv']
13import math
15import numpy as np
17import scipy.special
18from scipy.linalg.basic import solve, solve_triangular
20from scipy.sparse.base import isspmatrix
21from scipy.sparse.linalg import spsolve
22from scipy.sparse.sputils import is_pydata_spmatrix
24import scipy.sparse
25import scipy.sparse.linalg
26from scipy.sparse.linalg.interface import LinearOperator
28from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
31UPPER_TRIANGULAR = 'upper_triangular'
34def inv(A):
35 """
36 Compute the inverse of a sparse matrix
38 Parameters
39 ----------
40 A : (M,M) ndarray or sparse matrix
41 square matrix to be inverted
43 Returns
44 -------
45 Ainv : (M,M) ndarray or sparse matrix
46 inverse of `A`
48 Notes
49 -----
50 This computes the sparse inverse of `A`. If the inverse of `A` is expected
51 to be non-sparse, it will likely be faster to convert `A` to dense and use
52 scipy.linalg.inv.
54 Examples
55 --------
56 >>> from scipy.sparse import csc_matrix
57 >>> from scipy.sparse.linalg import inv
58 >>> A = csc_matrix([[1., 0.], [1., 2.]])
59 >>> Ainv = inv(A)
60 >>> Ainv
61 <2x2 sparse matrix of type '<class 'numpy.float64'>'
62 with 3 stored elements in Compressed Sparse Column format>
63 >>> A.dot(Ainv)
64 <2x2 sparse matrix of type '<class 'numpy.float64'>'
65 with 2 stored elements in Compressed Sparse Column format>
66 >>> A.dot(Ainv).todense()
67 matrix([[ 1., 0.],
68 [ 0., 1.]])
70 .. versionadded:: 0.12.0
72 """
73 #check input
74 if not (scipy.sparse.isspmatrix(A) or is_pydata_spmatrix(A)):
75 raise TypeError('Input must be a sparse matrix')
77 I = _ident_like(A)
78 Ainv = spsolve(A, I)
79 return Ainv
82def _onenorm_matrix_power_nnm(A, p):
83 """
84 Compute the 1-norm of a non-negative integer power of a non-negative matrix.
86 Parameters
87 ----------
88 A : a square ndarray or matrix or sparse matrix
89 Input matrix with non-negative entries.
90 p : non-negative integer
91 The power to which the matrix is to be raised.
93 Returns
94 -------
95 out : float
96 The 1-norm of the matrix power p of A.
98 """
99 # check input
100 if int(p) != p or p < 0:
101 raise ValueError('expected non-negative integer p')
102 p = int(p)
103 if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
104 raise ValueError('expected A to be like a square matrix')
106 # Explicitly make a column vector so that this works when A is a
107 # numpy matrix (in addition to ndarray and sparse matrix).
108 v = np.ones((A.shape[0], 1), dtype=float)
109 M = A.T
110 for i in range(p):
111 v = M.dot(v)
112 return np.max(v)
115def _is_upper_triangular(A):
116 # This function could possibly be of wider interest.
117 if isspmatrix(A):
118 lower_part = scipy.sparse.tril(A, -1)
119 # Check structural upper triangularity,
120 # then coincidental upper triangularity if needed.
121 return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
122 elif is_pydata_spmatrix(A):
123 import sparse
124 lower_part = sparse.tril(A, -1)
125 return lower_part.nnz == 0
126 else:
127 return not np.tril(A, -1).any()
130def _smart_matrix_product(A, B, alpha=None, structure=None):
131 """
132 A matrix product that knows about sparse and structured matrices.
134 Parameters
135 ----------
136 A : 2d ndarray
137 First matrix.
138 B : 2d ndarray
139 Second matrix.
140 alpha : float
141 The matrix product will be scaled by this constant.
142 structure : str, optional
143 A string describing the structure of both matrices `A` and `B`.
144 Only `upper_triangular` is currently supported.
146 Returns
147 -------
148 M : 2d ndarray
149 Matrix product of A and B.
151 """
152 if len(A.shape) != 2:
153 raise ValueError('expected A to be a rectangular matrix')
154 if len(B.shape) != 2:
155 raise ValueError('expected B to be a rectangular matrix')
156 f = None
157 if structure == UPPER_TRIANGULAR:
158 if (not isspmatrix(A) and not isspmatrix(B)
159 and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
160 f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
161 if f is not None:
162 if alpha is None:
163 alpha = 1.
164 out = f(alpha, A, B)
165 else:
166 if alpha is None:
167 out = A.dot(B)
168 else:
169 out = alpha * A.dot(B)
170 return out
173class MatrixPowerOperator(LinearOperator):
175 def __init__(self, A, p, structure=None):
176 if A.ndim != 2 or A.shape[0] != A.shape[1]:
177 raise ValueError('expected A to be like a square matrix')
178 if p < 0:
179 raise ValueError('expected p to be a non-negative integer')
180 self._A = A
181 self._p = p
182 self._structure = structure
183 self.dtype = A.dtype
184 self.ndim = A.ndim
185 self.shape = A.shape
187 def _matvec(self, x):
188 for i in range(self._p):
189 x = self._A.dot(x)
190 return x
192 def _rmatvec(self, x):
193 A_T = self._A.T
194 x = x.ravel()
195 for i in range(self._p):
196 x = A_T.dot(x)
197 return x
199 def _matmat(self, X):
200 for i in range(self._p):
201 X = _smart_matrix_product(self._A, X, structure=self._structure)
202 return X
204 @property
205 def T(self):
206 return MatrixPowerOperator(self._A.T, self._p)
209class ProductOperator(LinearOperator):
210 """
211 For now, this is limited to products of multiple square matrices.
212 """
214 def __init__(self, *args, **kwargs):
215 self._structure = kwargs.get('structure', None)
216 for A in args:
217 if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
218 raise ValueError(
219 'For now, the ProductOperator implementation is '
220 'limited to the product of multiple square matrices.')
221 if args:
222 n = args[0].shape[0]
223 for A in args:
224 for d in A.shape:
225 if d != n:
226 raise ValueError(
227 'The square matrices of the ProductOperator '
228 'must all have the same shape.')
229 self.shape = (n, n)
230 self.ndim = len(self.shape)
231 self.dtype = np.find_common_type([x.dtype for x in args], [])
232 self._operator_sequence = args
234 def _matvec(self, x):
235 for A in reversed(self._operator_sequence):
236 x = A.dot(x)
237 return x
239 def _rmatvec(self, x):
240 x = x.ravel()
241 for A in self._operator_sequence:
242 x = A.T.dot(x)
243 return x
245 def _matmat(self, X):
246 for A in reversed(self._operator_sequence):
247 X = _smart_matrix_product(A, X, structure=self._structure)
248 return X
250 @property
251 def T(self):
252 T_args = [A.T for A in reversed(self._operator_sequence)]
253 return ProductOperator(*T_args)
256def _onenormest_matrix_power(A, p,
257 t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
258 """
259 Efficiently estimate the 1-norm of A^p.
261 Parameters
262 ----------
263 A : ndarray
264 Matrix whose 1-norm of a power is to be computed.
265 p : int
266 Non-negative integer power.
267 t : int, optional
268 A positive parameter controlling the tradeoff between
269 accuracy versus time and memory usage.
270 Larger values take longer and use more memory
271 but give more accurate output.
272 itmax : int, optional
273 Use at most this many iterations.
274 compute_v : bool, optional
275 Request a norm-maximizing linear operator input vector if True.
276 compute_w : bool, optional
277 Request a norm-maximizing linear operator output vector if True.
279 Returns
280 -------
281 est : float
282 An underestimate of the 1-norm of the sparse matrix.
283 v : ndarray, optional
284 The vector such that ||Av||_1 == est*||v||_1.
285 It can be thought of as an input to the linear operator
286 that gives an output with particularly large norm.
287 w : ndarray, optional
288 The vector Av which has relatively large 1-norm.
289 It can be thought of as an output of the linear operator
290 that is relatively large in norm compared to the input.
292 """
293 return scipy.sparse.linalg.onenormest(
294 MatrixPowerOperator(A, p, structure=structure))
297def _onenormest_product(operator_seq,
298 t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
299 """
300 Efficiently estimate the 1-norm of the matrix product of the args.
302 Parameters
303 ----------
304 operator_seq : linear operator sequence
305 Matrices whose 1-norm of product is to be computed.
306 t : int, optional
307 A positive parameter controlling the tradeoff between
308 accuracy versus time and memory usage.
309 Larger values take longer and use more memory
310 but give more accurate output.
311 itmax : int, optional
312 Use at most this many iterations.
313 compute_v : bool, optional
314 Request a norm-maximizing linear operator input vector if True.
315 compute_w : bool, optional
316 Request a norm-maximizing linear operator output vector if True.
317 structure : str, optional
318 A string describing the structure of all operators.
319 Only `upper_triangular` is currently supported.
321 Returns
322 -------
323 est : float
324 An underestimate of the 1-norm of the sparse matrix.
325 v : ndarray, optional
326 The vector such that ||Av||_1 == est*||v||_1.
327 It can be thought of as an input to the linear operator
328 that gives an output with particularly large norm.
329 w : ndarray, optional
330 The vector Av which has relatively large 1-norm.
331 It can be thought of as an output of the linear operator
332 that is relatively large in norm compared to the input.
334 """
335 return scipy.sparse.linalg.onenormest(
336 ProductOperator(*operator_seq, structure=structure))
339class _ExpmPadeHelper(object):
340 """
341 Help lazily evaluate a matrix exponential.
343 The idea is to not do more work than we need for high expm precision,
344 so we lazily compute matrix powers and store or precompute
345 other properties of the matrix.
347 """
348 def __init__(self, A, structure=None, use_exact_onenorm=False):
349 """
350 Initialize the object.
352 Parameters
353 ----------
354 A : a dense or sparse square numpy matrix or ndarray
355 The matrix to be exponentiated.
356 structure : str, optional
357 A string describing the structure of matrix `A`.
358 Only `upper_triangular` is currently supported.
359 use_exact_onenorm : bool, optional
360 If True then only the exact one-norm of matrix powers and products
361 will be used. Otherwise, the one-norm of powers and products
362 may initially be estimated.
363 """
364 self.A = A
365 self._A2 = None
366 self._A4 = None
367 self._A6 = None
368 self._A8 = None
369 self._A10 = None
370 self._d4_exact = None
371 self._d6_exact = None
372 self._d8_exact = None
373 self._d10_exact = None
374 self._d4_approx = None
375 self._d6_approx = None
376 self._d8_approx = None
377 self._d10_approx = None
378 self.ident = _ident_like(A)
379 self.structure = structure
380 self.use_exact_onenorm = use_exact_onenorm
382 @property
383 def A2(self):
384 if self._A2 is None:
385 self._A2 = _smart_matrix_product(
386 self.A, self.A, structure=self.structure)
387 return self._A2
389 @property
390 def A4(self):
391 if self._A4 is None:
392 self._A4 = _smart_matrix_product(
393 self.A2, self.A2, structure=self.structure)
394 return self._A4
396 @property
397 def A6(self):
398 if self._A6 is None:
399 self._A6 = _smart_matrix_product(
400 self.A4, self.A2, structure=self.structure)
401 return self._A6
403 @property
404 def A8(self):
405 if self._A8 is None:
406 self._A8 = _smart_matrix_product(
407 self.A6, self.A2, structure=self.structure)
408 return self._A8
410 @property
411 def A10(self):
412 if self._A10 is None:
413 self._A10 = _smart_matrix_product(
414 self.A4, self.A6, structure=self.structure)
415 return self._A10
417 @property
418 def d4_tight(self):
419 if self._d4_exact is None:
420 self._d4_exact = _onenorm(self.A4)**(1/4.)
421 return self._d4_exact
423 @property
424 def d6_tight(self):
425 if self._d6_exact is None:
426 self._d6_exact = _onenorm(self.A6)**(1/6.)
427 return self._d6_exact
429 @property
430 def d8_tight(self):
431 if self._d8_exact is None:
432 self._d8_exact = _onenorm(self.A8)**(1/8.)
433 return self._d8_exact
435 @property
436 def d10_tight(self):
437 if self._d10_exact is None:
438 self._d10_exact = _onenorm(self.A10)**(1/10.)
439 return self._d10_exact
441 @property
442 def d4_loose(self):
443 if self.use_exact_onenorm:
444 return self.d4_tight
445 if self._d4_exact is not None:
446 return self._d4_exact
447 else:
448 if self._d4_approx is None:
449 self._d4_approx = _onenormest_matrix_power(self.A2, 2,
450 structure=self.structure)**(1/4.)
451 return self._d4_approx
453 @property
454 def d6_loose(self):
455 if self.use_exact_onenorm:
456 return self.d6_tight
457 if self._d6_exact is not None:
458 return self._d6_exact
459 else:
460 if self._d6_approx is None:
461 self._d6_approx = _onenormest_matrix_power(self.A2, 3,
462 structure=self.structure)**(1/6.)
463 return self._d6_approx
465 @property
466 def d8_loose(self):
467 if self.use_exact_onenorm:
468 return self.d8_tight
469 if self._d8_exact is not None:
470 return self._d8_exact
471 else:
472 if self._d8_approx is None:
473 self._d8_approx = _onenormest_matrix_power(self.A4, 2,
474 structure=self.structure)**(1/8.)
475 return self._d8_approx
477 @property
478 def d10_loose(self):
479 if self.use_exact_onenorm:
480 return self.d10_tight
481 if self._d10_exact is not None:
482 return self._d10_exact
483 else:
484 if self._d10_approx is None:
485 self._d10_approx = _onenormest_product((self.A4, self.A6),
486 structure=self.structure)**(1/10.)
487 return self._d10_approx
489 def pade3(self):
490 b = (120., 60., 12., 1.)
491 U = _smart_matrix_product(self.A,
492 b[3]*self.A2 + b[1]*self.ident,
493 structure=self.structure)
494 V = b[2]*self.A2 + b[0]*self.ident
495 return U, V
497 def pade5(self):
498 b = (30240., 15120., 3360., 420., 30., 1.)
499 U = _smart_matrix_product(self.A,
500 b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
501 structure=self.structure)
502 V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
503 return U, V
505 def pade7(self):
506 b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
507 U = _smart_matrix_product(self.A,
508 b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
509 structure=self.structure)
510 V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
511 return U, V
513 def pade9(self):
514 b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
515 2162160., 110880., 3960., 90., 1.)
516 U = _smart_matrix_product(self.A,
517 (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
518 b[3]*self.A2 + b[1]*self.ident),
519 structure=self.structure)
520 V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
521 b[2]*self.A2 + b[0]*self.ident)
522 return U, V
524 def pade13_scaled(self, s):
525 b = (64764752532480000., 32382376266240000., 7771770303897600.,
526 1187353796428800., 129060195264000., 10559470521600.,
527 670442572800., 33522128640., 1323241920., 40840800., 960960.,
528 16380., 182., 1.)
529 B = self.A * 2**-s
530 B2 = self.A2 * 2**(-2*s)
531 B4 = self.A4 * 2**(-4*s)
532 B6 = self.A6 * 2**(-6*s)
533 U2 = _smart_matrix_product(B6,
534 b[13]*B6 + b[11]*B4 + b[9]*B2,
535 structure=self.structure)
536 U = _smart_matrix_product(B,
537 (U2 + b[7]*B6 + b[5]*B4 +
538 b[3]*B2 + b[1]*self.ident),
539 structure=self.structure)
540 V2 = _smart_matrix_product(B6,
541 b[12]*B6 + b[10]*B4 + b[8]*B2,
542 structure=self.structure)
543 V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
544 return U, V
547def expm(A):
548 """
549 Compute the matrix exponential using Pade approximation.
551 Parameters
552 ----------
553 A : (M,M) array_like or sparse matrix
554 2D Array or Matrix (sparse or dense) to be exponentiated
556 Returns
557 -------
558 expA : (M,M) ndarray
559 Matrix exponential of `A`
561 Notes
562 -----
563 This is algorithm (6.1) which is a simplification of algorithm (5.1).
565 .. versionadded:: 0.12.0
567 References
568 ----------
569 .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
570 "A New Scaling and Squaring Algorithm for the Matrix Exponential."
571 SIAM Journal on Matrix Analysis and Applications.
572 31 (3). pp. 970-989. ISSN 1095-7162
574 Examples
575 --------
576 >>> from scipy.sparse import csc_matrix
577 >>> from scipy.sparse.linalg import expm
578 >>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
579 >>> A.todense()
580 matrix([[1, 0, 0],
581 [0, 2, 0],
582 [0, 0, 3]], dtype=int64)
583 >>> Aexp = expm(A)
584 >>> Aexp
585 <3x3 sparse matrix of type '<class 'numpy.float64'>'
586 with 3 stored elements in Compressed Sparse Column format>
587 >>> Aexp.todense()
588 matrix([[ 2.71828183, 0. , 0. ],
589 [ 0. , 7.3890561 , 0. ],
590 [ 0. , 0. , 20.08553692]])
591 """
592 return _expm(A, use_exact_onenorm='auto')
595def _expm(A, use_exact_onenorm):
596 # Core of expm, separated to allow testing exact and approximate
597 # algorithms.
599 # Avoid indiscriminate asarray() to allow sparse or other strange arrays.
600 if isinstance(A, (list, tuple, np.matrix)):
601 A = np.asarray(A)
602 if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
603 raise ValueError('expected a square matrix')
605 # gracefully handle size-0 input,
606 # carefully handling sparse scenario
607 if A.shape == (0, 0):
608 out = np.zeros([0, 0], dtype=A.dtype)
609 if isspmatrix(A) or is_pydata_spmatrix(A):
610 return A.__class__(out)
611 return out
613 # Trivial case
614 if A.shape == (1, 1):
615 out = [[np.exp(A[0, 0])]]
617 # Avoid indiscriminate casting to ndarray to
618 # allow for sparse or other strange arrays
619 if isspmatrix(A) or is_pydata_spmatrix(A):
620 return A.__class__(out)
622 return np.array(out)
624 # Ensure input is of float type, to avoid integer overflows etc.
625 if ((isinstance(A, np.ndarray) or isspmatrix(A) or is_pydata_spmatrix(A))
626 and not np.issubdtype(A.dtype, np.inexact)):
627 A = A.astype(float)
629 # Detect upper triangularity.
630 structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
632 if use_exact_onenorm == "auto":
633 # Hardcode a matrix order threshold for exact vs. estimated one-norms.
634 use_exact_onenorm = A.shape[0] < 200
636 # Track functions of A to help compute the matrix exponential.
637 h = _ExpmPadeHelper(
638 A, structure=structure, use_exact_onenorm=use_exact_onenorm)
640 # Try Pade order 3.
641 eta_1 = max(h.d4_loose, h.d6_loose)
642 if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
643 U, V = h.pade3()
644 return _solve_P_Q(U, V, structure=structure)
646 # Try Pade order 5.
647 eta_2 = max(h.d4_tight, h.d6_loose)
648 if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
649 U, V = h.pade5()
650 return _solve_P_Q(U, V, structure=structure)
652 # Try Pade orders 7 and 9.
653 eta_3 = max(h.d6_tight, h.d8_loose)
654 if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
655 U, V = h.pade7()
656 return _solve_P_Q(U, V, structure=structure)
657 if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
658 U, V = h.pade9()
659 return _solve_P_Q(U, V, structure=structure)
661 # Use Pade order 13.
662 eta_4 = max(h.d8_loose, h.d10_loose)
663 eta_5 = min(eta_3, eta_4)
664 theta_13 = 4.25
666 # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
667 if eta_5 == 0:
668 # Nilpotent special case
669 s = 0
670 else:
671 s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
672 s = s + _ell(2**-s * h.A, 13)
673 U, V = h.pade13_scaled(s)
674 X = _solve_P_Q(U, V, structure=structure)
675 if structure == UPPER_TRIANGULAR:
676 # Invoke Code Fragment 2.1.
677 X = _fragment_2_1(X, h.A, s)
678 else:
679 # X = r_13(A)^(2^s) by repeated squaring.
680 for i in range(s):
681 X = X.dot(X)
682 return X
685def _solve_P_Q(U, V, structure=None):
686 """
687 A helper function for expm_2009.
689 Parameters
690 ----------
691 U : ndarray
692 Pade numerator.
693 V : ndarray
694 Pade denominator.
695 structure : str, optional
696 A string describing the structure of both matrices `U` and `V`.
697 Only `upper_triangular` is currently supported.
699 Notes
700 -----
701 The `structure` argument is inspired by similar args
702 for theano and cvxopt functions.
704 """
705 P = U + V
706 Q = -U + V
707 if isspmatrix(U) or is_pydata_spmatrix(U):
708 return spsolve(Q, P)
709 elif structure is None:
710 return solve(Q, P)
711 elif structure == UPPER_TRIANGULAR:
712 return solve_triangular(Q, P)
713 else:
714 raise ValueError('unsupported matrix structure: ' + str(structure))
717def _exp_sinch(a, x):
718 """
719 Stably evaluate exp(a)*sinh(x)/x
721 Notes
722 -----
723 The strategy of falling back to a sixth order Taylor expansion
724 was suggested by the Spallation Neutron Source docs
725 which was found on the internet by google search.
726 http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
727 The details of the cutoff point and the Horner-like evaluation
728 was picked without reference to anything in particular.
730 Note that sinch is not currently implemented in scipy.special,
731 whereas the "engineer's" definition of sinc is implemented.
732 The implementation of sinc involves a scaling factor of pi
733 that distinguishes it from the "mathematician's" version of sinc.
735 """
737 # If x is small then use sixth order Taylor expansion.
738 # How small is small? I am using the point where the relative error
739 # of the approximation is less than 1e-14.
740 # If x is large then directly evaluate sinh(x) / x.
741 if abs(x) < 0.0135:
742 x2 = x*x
743 return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
744 else:
745 return (np.exp(a + x) - np.exp(a - x)) / (2*x)
748def _eq_10_42(lam_1, lam_2, t_12):
749 """
750 Equation (10.42) of Functions of Matrices: Theory and Computation.
752 Notes
753 -----
754 This is a helper function for _fragment_2_1 of expm_2009.
755 Equation (10.42) is on page 251 in the section on Schur algorithms.
756 In particular, section 10.4.3 explains the Schur-Parlett algorithm.
757 expm([[lam_1, t_12], [0, lam_1])
758 =
759 [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
760 [0, exp(lam_2)]
761 """
763 # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
764 # apparently suffers from cancellation, according to Higham's textbook.
765 # A nice implementation of sinch, defined as sinh(x)/x,
766 # will apparently work around the cancellation.
767 a = 0.5 * (lam_1 + lam_2)
768 b = 0.5 * (lam_1 - lam_2)
769 return t_12 * _exp_sinch(a, b)
772def _fragment_2_1(X, T, s):
773 """
774 A helper function for expm_2009.
776 Notes
777 -----
778 The argument X is modified in-place, but this modification is not the same
779 as the returned value of the function.
780 This function also takes pains to do things in ways that are compatible
781 with sparse matrices, for example by avoiding fancy indexing
782 and by using methods of the matrices whenever possible instead of
783 using functions of the numpy or scipy libraries themselves.
785 """
786 # Form X = r_m(2^-s T)
787 # Replace diag(X) by exp(2^-s diag(T)).
788 n = X.shape[0]
789 diag_T = np.ravel(T.diagonal().copy())
791 # Replace diag(X) by exp(2^-s diag(T)).
792 scale = 2 ** -s
793 exp_diag = np.exp(scale * diag_T)
794 for k in range(n):
795 X[k, k] = exp_diag[k]
797 for i in range(s-1, -1, -1):
798 X = X.dot(X)
800 # Replace diag(X) by exp(2^-i diag(T)).
801 scale = 2 ** -i
802 exp_diag = np.exp(scale * diag_T)
803 for k in range(n):
804 X[k, k] = exp_diag[k]
806 # Replace (first) superdiagonal of X by explicit formula
807 # for superdiagonal of exp(2^-i T) from Eq (10.42) of
808 # the author's 2008 textbook
809 # Functions of Matrices: Theory and Computation.
810 for k in range(n-1):
811 lam_1 = scale * diag_T[k]
812 lam_2 = scale * diag_T[k+1]
813 t_12 = scale * T[k, k+1]
814 value = _eq_10_42(lam_1, lam_2, t_12)
815 X[k, k+1] = value
817 # Return the updated X matrix.
818 return X
821def _ell(A, m):
822 """
823 A helper function for expm_2009.
825 Parameters
826 ----------
827 A : linear operator
828 A linear operator whose norm of power we care about.
829 m : int
830 The power of the linear operator
832 Returns
833 -------
834 value : int
835 A value related to a bound.
837 """
838 if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
839 raise ValueError('expected A to be like a square matrix')
841 # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
842 # They are coefficients of terms of a generating function series expansion.
843 choose_2m_m = scipy.special.comb(2*m, m, exact=True)
844 abs_c_recip = float(choose_2m_m * math.factorial(2*m + 1))
846 # This is explained after Eq. (1.2) of the 2009 expm paper.
847 # It is the "unit roundoff" of IEEE double precision arithmetic.
848 u = 2**-53
850 # Compute the one-norm of matrix power p of abs(A).
851 A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
853 # Treat zero norm as a special case.
854 if not A_abs_onenorm:
855 return 0
857 alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
858 log2_alpha_div_u = np.log2(alpha/u)
859 value = int(np.ceil(log2_alpha_div_u / (2 * m)))
860 return max(value, 0)