Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/scipy/optimize/_numdiff.py : 6%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1"""Routines for numerical differentiation."""
3import numpy as np
4from numpy.linalg import norm
6from scipy.sparse.linalg import LinearOperator
7from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
8from ._group_columns import group_dense, group_sparse
10EPS = np.finfo(np.float64).eps
13def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
14 """Adjust final difference scheme to the presence of bounds.
16 Parameters
17 ----------
18 x0 : ndarray, shape (n,)
19 Point at which we wish to estimate derivative.
20 h : ndarray, shape (n,)
21 Desired absolute finite difference steps.
22 num_steps : int
23 Number of `h` steps in one direction required to implement finite
24 difference scheme. For example, 2 means that we need to evaluate
25 f(x0 + 2 * h) or f(x0 - 2 * h)
26 scheme : {'1-sided', '2-sided'}
27 Whether steps in one or both directions are required. In other
28 words '1-sided' applies to forward and backward schemes, '2-sided'
29 applies to center schemes.
30 lb : ndarray, shape (n,)
31 Lower bounds on independent variables.
32 ub : ndarray, shape (n,)
33 Upper bounds on independent variables.
35 Returns
36 -------
37 h_adjusted : ndarray, shape (n,)
38 Adjusted absolute step sizes. Step size decreases only if a sign flip
39 or switching to one-sided scheme doesn't allow to take a full step.
40 use_one_sided : ndarray of bool, shape (n,)
41 Whether to switch to one-sided scheme. Informative only for
42 ``scheme='2-sided'``.
43 """
44 if scheme == '1-sided':
45 use_one_sided = np.ones_like(h, dtype=bool)
46 elif scheme == '2-sided':
47 h = np.abs(h)
48 use_one_sided = np.zeros_like(h, dtype=bool)
49 else:
50 raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
52 if np.all((lb == -np.inf) & (ub == np.inf)):
53 return h, use_one_sided
55 h_total = h * num_steps
56 h_adjusted = h.copy()
58 lower_dist = x0 - lb
59 upper_dist = ub - x0
61 if scheme == '1-sided':
62 x = x0 + h_total
63 violated = (x < lb) | (x > ub)
64 fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
65 h_adjusted[violated & fitting] *= -1
67 forward = (upper_dist >= lower_dist) & ~fitting
68 h_adjusted[forward] = upper_dist[forward] / num_steps
69 backward = (upper_dist < lower_dist) & ~fitting
70 h_adjusted[backward] = -lower_dist[backward] / num_steps
71 elif scheme == '2-sided':
72 central = (lower_dist >= h_total) & (upper_dist >= h_total)
74 forward = (upper_dist >= lower_dist) & ~central
75 h_adjusted[forward] = np.minimum(
76 h[forward], 0.5 * upper_dist[forward] / num_steps)
77 use_one_sided[forward] = True
79 backward = (upper_dist < lower_dist) & ~central
80 h_adjusted[backward] = -np.minimum(
81 h[backward], 0.5 * lower_dist[backward] / num_steps)
82 use_one_sided[backward] = True
84 min_dist = np.minimum(upper_dist, lower_dist) / num_steps
85 adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
86 h_adjusted[adjusted_central] = min_dist[adjusted_central]
87 use_one_sided[adjusted_central] = False
89 return h_adjusted, use_one_sided
92relative_step = {"2-point": EPS**0.5,
93 "3-point": EPS**(1/3),
94 "cs": EPS**0.5}
97def _compute_absolute_step(rel_step, x0, method):
98 """
99 Computes an absolute step from a relative step for finite difference
100 calculation.
102 Parameters
103 ----------
104 rel_step: None or array-like
105 Relative step for the finite difference calculation
106 x0 : np.ndarray
107 Parameter vector
108 method : {'2-point', '3-point', 'cs'}
109 """
110 if rel_step is None:
111 rel_step = relative_step[method]
112 sign_x0 = (x0 >= 0).astype(float) * 2 - 1
113 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
116def _prepare_bounds(bounds, x0):
117 """
118 Prepares new-style bounds from a two-tuple specifying the lower and upper
119 limits for values in x0. If a value is not bound then the lower/upper bound
120 will be expected to be -np.inf/np.inf.
122 Examples
123 --------
124 >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
125 (array([0., 1., 2.]), array([ 1., 2., inf]))
126 """
127 lb, ub = [np.asarray(b, dtype=float) for b in bounds]
128 if lb.ndim == 0:
129 lb = np.resize(lb, x0.shape)
131 if ub.ndim == 0:
132 ub = np.resize(ub, x0.shape)
134 return lb, ub
137def group_columns(A, order=0):
138 """Group columns of a 2-D matrix for sparse finite differencing [1]_.
140 Two columns are in the same group if in each row at least one of them
141 has zero. A greedy sequential algorithm is used to construct groups.
143 Parameters
144 ----------
145 A : array_like or sparse matrix, shape (m, n)
146 Matrix of which to group columns.
147 order : int, iterable of int with shape (n,) or None
148 Permutation array which defines the order of columns enumeration.
149 If int or None, a random permutation is used with `order` used as
150 a random seed. Default is 0, that is use a random permutation but
151 guarantee repeatability.
153 Returns
154 -------
155 groups : ndarray of int, shape (n,)
156 Contains values from 0 to n_groups-1, where n_groups is the number
157 of found groups. Each value ``groups[i]`` is an index of a group to
158 which ith column assigned. The procedure was helpful only if
159 n_groups is significantly less than n.
161 References
162 ----------
163 .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
164 sparse Jacobian matrices", Journal of the Institute of Mathematics
165 and its Applications, 13 (1974), pp. 117-120.
166 """
167 if issparse(A):
168 A = csc_matrix(A)
169 else:
170 A = np.atleast_2d(A)
171 A = (A != 0).astype(np.int32)
173 if A.ndim != 2:
174 raise ValueError("`A` must be 2-dimensional.")
176 m, n = A.shape
178 if order is None or np.isscalar(order):
179 rng = np.random.RandomState(order)
180 order = rng.permutation(n)
181 else:
182 order = np.asarray(order)
183 if order.shape != (n,):
184 raise ValueError("`order` has incorrect shape.")
186 A = A[:, order]
188 if issparse(A):
189 groups = group_sparse(m, n, A.indices, A.indptr)
190 else:
191 groups = group_dense(m, n, A)
193 groups[order] = groups.copy()
195 return groups
198def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
199 f0=None, bounds=(-np.inf, np.inf), sparsity=None,
200 as_linear_operator=False, args=(), kwargs={}):
201 """Compute finite difference approximation of the derivatives of a
202 vector-valued function.
204 If a function maps from R^n to R^m, its derivatives form m-by-n matrix
205 called the Jacobian, where an element (i, j) is a partial derivative of
206 f[i] with respect to x[j].
208 Parameters
209 ----------
210 fun : callable
211 Function of which to estimate the derivatives. The argument x
212 passed to this function is ndarray of shape (n,) (never a scalar
213 even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
214 x0 : array_like of shape (n,) or float
215 Point at which to estimate the derivatives. Float will be converted
216 to a 1-D array.
217 method : {'3-point', '2-point', 'cs'}, optional
218 Finite difference method to use:
219 - '2-point' - use the first order accuracy forward or backward
220 difference.
221 - '3-point' - use central difference in interior points and the
222 second order accuracy forward or backward difference
223 near the boundary.
224 - 'cs' - use a complex-step finite difference scheme. This assumes
225 that the user function is real-valued and can be
226 analytically continued to the complex plane. Otherwise,
227 produces bogus results.
228 rel_step : None or array_like, optional
229 Relative step size to use. The absolute step size is computed as
230 ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
231 fit into the bounds. For ``method='3-point'`` the sign of `h` is
232 ignored. If None (default) then step is selected automatically,
233 see Notes.
234 abs_step : array_like, optional
235 Absolute step size to use, possibly adjusted to fit into the bounds.
236 For ``method='3-point'`` the sign of `abs_step` is ignored. By default
237 relative steps are used, only if ``abs_step is not None`` are absolute
238 steps used.
239 f0 : None or array_like, optional
240 If not None it is assumed to be equal to ``fun(x0)``, in this case
241 the ``fun(x0)`` is not called. Default is None.
242 bounds : tuple of array_like, optional
243 Lower and upper bounds on independent variables. Defaults to no bounds.
244 Each bound must match the size of `x0` or be a scalar, in the latter
245 case the bound will be the same for all variables. Use it to limit the
246 range of function evaluation. Bounds checking is not implemented
247 when `as_linear_operator` is True.
248 sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
249 Defines a sparsity structure of the Jacobian matrix. If the Jacobian
250 matrix is known to have only few non-zero elements in each row, then
251 it's possible to estimate its several columns by a single function
252 evaluation [3]_. To perform such economic computations two ingredients
253 are required:
255 * structure : array_like or sparse matrix of shape (m, n). A zero
256 element means that a corresponding element of the Jacobian
257 identically equals to zero.
258 * groups : array_like of shape (n,). A column grouping for a given
259 sparsity structure, use `group_columns` to obtain it.
261 A single array or a sparse matrix is interpreted as a sparsity
262 structure, and groups are computed inside the function. A tuple is
263 interpreted as (structure, groups). If None (default), a standard
264 dense differencing will be used.
266 Note, that sparse differencing makes sense only for large Jacobian
267 matrices where each row contains few non-zero elements.
268 as_linear_operator : bool, optional
269 When True the function returns an `scipy.sparse.linalg.LinearOperator`.
270 Otherwise it returns a dense array or a sparse matrix depending on
271 `sparsity`. The linear operator provides an efficient way of computing
272 ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
273 direct access to individual elements of the matrix. By default
274 `as_linear_operator` is False.
275 args, kwargs : tuple and dict, optional
276 Additional arguments passed to `fun`. Both empty by default.
277 The calling signature is ``fun(x, *args, **kwargs)``.
279 Returns
280 -------
281 J : {ndarray, sparse matrix, LinearOperator}
282 Finite difference approximation of the Jacobian matrix.
283 If `as_linear_operator` is True returns a LinearOperator
284 with shape (m, n). Otherwise it returns a dense array or sparse
285 matrix depending on how `sparsity` is defined. If `sparsity`
286 is None then a ndarray with shape (m, n) is returned. If
287 `sparsity` is not None returns a csr_matrix with shape (m, n).
288 For sparse matrices and linear operators it is always returned as
289 a 2-D structure, for ndarrays, if m=1 it is returned
290 as a 1-D gradient array with shape (n,).
292 See Also
293 --------
294 check_derivative : Check correctness of a function computing derivatives.
296 Notes
297 -----
298 If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
299 machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
300 '3-point' method. Such relative step approximately minimizes a sum of
301 truncation and round-off errors, see [1]_. Relative steps are used by
302 default. However, absolute steps are used when ``abs_step is not None``.
303 If any of the absolute steps produces an indistinguishable difference from
304 the original `x0`, ``(x0 + abs_step) - x0 == 0``, then a relative step is
305 substituted for that particular entry.
307 A finite difference scheme for '3-point' method is selected automatically.
308 The well-known central difference scheme is used for points sufficiently
309 far from the boundary, and 3-point forward or backward scheme is used for
310 points near the boundary. Both schemes have the second-order accuracy in
311 terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
312 forward and backward difference schemes.
314 For dense differencing when m=1 Jacobian is returned with a shape (n,),
315 on the other hand when n=1 Jacobian is returned with a shape (m, 1).
316 Our motivation is the following: a) It handles a case of gradient
317 computation (m=1) in a conventional way. b) It clearly separates these two
318 different cases. b) In all cases np.atleast_2d can be called to get 2-D
319 Jacobian with correct dimensions.
321 References
322 ----------
323 .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
324 Computing. 3rd edition", sec. 5.7.
326 .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
327 sparse Jacobian matrices", Journal of the Institute of Mathematics
328 and its Applications, 13 (1974), pp. 117-120.
330 .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
331 Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
333 Examples
334 --------
335 >>> import numpy as np
336 >>> from scipy.optimize import approx_derivative
337 >>>
338 >>> def f(x, c1, c2):
339 ... return np.array([x[0] * np.sin(c1 * x[1]),
340 ... x[0] * np.cos(c2 * x[1])])
341 ...
342 >>> x0 = np.array([1.0, 0.5 * np.pi])
343 >>> approx_derivative(f, x0, args=(1, 2))
344 array([[ 1., 0.],
345 [-1., 0.]])
347 Bounds can be used to limit the region of function evaluation.
348 In the example below we compute left and right derivative at point 1.0.
350 >>> def g(x):
351 ... return x**2 if x >= 1 else x
352 ...
353 >>> x0 = 1.0
354 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
355 array([ 1.])
356 >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
357 array([ 2.])
358 """
359 if method not in ['2-point', '3-point', 'cs']:
360 raise ValueError("Unknown method '%s'. " % method)
362 x0 = np.atleast_1d(x0)
363 if x0.ndim > 1:
364 raise ValueError("`x0` must have at most 1 dimension.")
366 lb, ub = _prepare_bounds(bounds, x0)
368 if lb.shape != x0.shape or ub.shape != x0.shape:
369 raise ValueError("Inconsistent shapes between bounds and `x0`.")
371 if as_linear_operator and not (np.all(np.isinf(lb))
372 and np.all(np.isinf(ub))):
373 raise ValueError("Bounds not supported when "
374 "`as_linear_operator` is True.")
376 def fun_wrapped(x):
377 f = np.atleast_1d(fun(x, *args, **kwargs))
378 if f.ndim > 1:
379 raise RuntimeError("`fun` return value has "
380 "more than 1 dimension.")
381 return f
383 if f0 is None:
384 f0 = fun_wrapped(x0)
385 else:
386 f0 = np.atleast_1d(f0)
387 if f0.ndim > 1:
388 raise ValueError("`f0` passed has more than 1 dimension.")
390 if np.any((x0 < lb) | (x0 > ub)):
391 raise ValueError("`x0` violates bound constraints.")
393 if as_linear_operator:
394 if rel_step is None:
395 rel_step = relative_step[method]
397 return _linear_operator_difference(fun_wrapped, x0,
398 f0, rel_step, method)
399 else:
400 # by default we use rel_step
401 if abs_step is None:
402 h = _compute_absolute_step(rel_step, x0, method)
403 else:
404 # user specifies an absolute step
405 sign_x0 = (x0 >= 0).astype(float) * 2 - 1
406 h = abs_step * sign_x0
408 # cannot have a zero step. This might happen if x0 is very large
409 # or small. In which case fall back to relative step.
410 dx = ((x0 + h) - x0)
411 h = np.where(dx == 0,
412 relative_step[method] * sign_x0 *
413 np.maximum(1.0, np.abs(x0)),
414 h)
416 if method == '2-point':
417 h, use_one_sided = _adjust_scheme_to_bounds(
418 x0, h, 1, '1-sided', lb, ub)
419 elif method == '3-point':
420 h, use_one_sided = _adjust_scheme_to_bounds(
421 x0, h, 1, '2-sided', lb, ub)
422 elif method == 'cs':
423 use_one_sided = False
425 if sparsity is None:
426 return _dense_difference(fun_wrapped, x0, f0, h,
427 use_one_sided, method)
428 else:
429 if not issparse(sparsity) and len(sparsity) == 2:
430 structure, groups = sparsity
431 else:
432 structure = sparsity
433 groups = group_columns(sparsity)
435 if issparse(structure):
436 structure = csc_matrix(structure)
437 else:
438 structure = np.atleast_2d(structure)
440 groups = np.atleast_1d(groups)
441 return _sparse_difference(fun_wrapped, x0, f0, h,
442 use_one_sided, structure,
443 groups, method)
446def _linear_operator_difference(fun, x0, f0, h, method):
447 m = f0.size
448 n = x0.size
450 if method == '2-point':
451 def matvec(p):
452 if np.array_equal(p, np.zeros_like(p)):
453 return np.zeros(m)
454 dx = h / norm(p)
455 x = x0 + dx*p
456 df = fun(x) - f0
457 return df / dx
459 elif method == '3-point':
460 def matvec(p):
461 if np.array_equal(p, np.zeros_like(p)):
462 return np.zeros(m)
463 dx = 2*h / norm(p)
464 x1 = x0 - (dx/2)*p
465 x2 = x0 + (dx/2)*p
466 f1 = fun(x1)
467 f2 = fun(x2)
468 df = f2 - f1
469 return df / dx
471 elif method == 'cs':
472 def matvec(p):
473 if np.array_equal(p, np.zeros_like(p)):
474 return np.zeros(m)
475 dx = h / norm(p)
476 x = x0 + dx*p*1.j
477 f1 = fun(x)
478 df = f1.imag
479 return df / dx
481 else:
482 raise RuntimeError("Never be here.")
484 return LinearOperator((m, n), matvec)
487def _dense_difference(fun, x0, f0, h, use_one_sided, method):
488 m = f0.size
489 n = x0.size
490 J_transposed = np.empty((n, m))
491 h_vecs = np.diag(h)
493 for i in range(h.size):
494 if method == '2-point':
495 x = x0 + h_vecs[i]
496 dx = x[i] - x0[i] # Recompute dx as exactly representable number.
497 df = fun(x) - f0
498 elif method == '3-point' and use_one_sided[i]:
499 x1 = x0 + h_vecs[i]
500 x2 = x0 + 2 * h_vecs[i]
501 dx = x2[i] - x0[i]
502 f1 = fun(x1)
503 f2 = fun(x2)
504 df = -3.0 * f0 + 4 * f1 - f2
505 elif method == '3-point' and not use_one_sided[i]:
506 x1 = x0 - h_vecs[i]
507 x2 = x0 + h_vecs[i]
508 dx = x2[i] - x1[i]
509 f1 = fun(x1)
510 f2 = fun(x2)
511 df = f2 - f1
512 elif method == 'cs':
513 f1 = fun(x0 + h_vecs[i]*1.j)
514 df = f1.imag
515 dx = h_vecs[i, i]
516 else:
517 raise RuntimeError("Never be here.")
519 J_transposed[i] = df / dx
521 if m == 1:
522 J_transposed = np.ravel(J_transposed)
524 return J_transposed.T
527def _sparse_difference(fun, x0, f0, h, use_one_sided,
528 structure, groups, method):
529 m = f0.size
530 n = x0.size
531 row_indices = []
532 col_indices = []
533 fractions = []
535 n_groups = np.max(groups) + 1
536 for group in range(n_groups):
537 # Perturb variables which are in the same group simultaneously.
538 e = np.equal(group, groups)
539 h_vec = h * e
540 if method == '2-point':
541 x = x0 + h_vec
542 dx = x - x0
543 df = fun(x) - f0
544 # The result is written to columns which correspond to perturbed
545 # variables.
546 cols, = np.nonzero(e)
547 # Find all non-zero elements in selected columns of Jacobian.
548 i, j, _ = find(structure[:, cols])
549 # Restore column indices in the full array.
550 j = cols[j]
551 elif method == '3-point':
552 # Here we do conceptually the same but separate one-sided
553 # and two-sided schemes.
554 x1 = x0.copy()
555 x2 = x0.copy()
557 mask_1 = use_one_sided & e
558 x1[mask_1] += h_vec[mask_1]
559 x2[mask_1] += 2 * h_vec[mask_1]
561 mask_2 = ~use_one_sided & e
562 x1[mask_2] -= h_vec[mask_2]
563 x2[mask_2] += h_vec[mask_2]
565 dx = np.zeros(n)
566 dx[mask_1] = x2[mask_1] - x0[mask_1]
567 dx[mask_2] = x2[mask_2] - x1[mask_2]
569 f1 = fun(x1)
570 f2 = fun(x2)
572 cols, = np.nonzero(e)
573 i, j, _ = find(structure[:, cols])
574 j = cols[j]
576 mask = use_one_sided[j]
577 df = np.empty(m)
579 rows = i[mask]
580 df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
582 rows = i[~mask]
583 df[rows] = f2[rows] - f1[rows]
584 elif method == 'cs':
585 f1 = fun(x0 + h_vec*1.j)
586 df = f1.imag
587 dx = h_vec
588 cols, = np.nonzero(e)
589 i, j, _ = find(structure[:, cols])
590 j = cols[j]
591 else:
592 raise ValueError("Never be here.")
594 # All that's left is to compute the fraction. We store i, j and
595 # fractions as separate arrays and later construct coo_matrix.
596 row_indices.append(i)
597 col_indices.append(j)
598 fractions.append(df[i] / dx[j])
600 row_indices = np.hstack(row_indices)
601 col_indices = np.hstack(col_indices)
602 fractions = np.hstack(fractions)
603 J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
604 return csr_matrix(J)
607def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
608 kwargs={}):
609 """Check correctness of a function computing derivatives (Jacobian or
610 gradient) by comparison with a finite difference approximation.
612 Parameters
613 ----------
614 fun : callable
615 Function of which to estimate the derivatives. The argument x
616 passed to this function is ndarray of shape (n,) (never a scalar
617 even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
618 jac : callable
619 Function which computes Jacobian matrix of `fun`. It must work with
620 argument x the same way as `fun`. The return value must be array_like
621 or sparse matrix with an appropriate shape.
622 x0 : array_like of shape (n,) or float
623 Point at which to estimate the derivatives. Float will be converted
624 to 1-D array.
625 bounds : 2-tuple of array_like, optional
626 Lower and upper bounds on independent variables. Defaults to no bounds.
627 Each bound must match the size of `x0` or be a scalar, in the latter
628 case the bound will be the same for all variables. Use it to limit the
629 range of function evaluation.
630 args, kwargs : tuple and dict, optional
631 Additional arguments passed to `fun` and `jac`. Both empty by default.
632 The calling signature is ``fun(x, *args, **kwargs)`` and the same
633 for `jac`.
635 Returns
636 -------
637 accuracy : float
638 The maximum among all relative errors for elements with absolute values
639 higher than 1 and absolute errors for elements with absolute values
640 less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
641 then it is likely that your `jac` implementation is correct.
643 See Also
644 --------
645 approx_derivative : Compute finite difference approximation of derivative.
647 Examples
648 --------
649 >>> import numpy as np
650 >>> from scipy.optimize import check_derivative
651 >>>
652 >>>
653 >>> def f(x, c1, c2):
654 ... return np.array([x[0] * np.sin(c1 * x[1]),
655 ... x[0] * np.cos(c2 * x[1])])
656 ...
657 >>> def jac(x, c1, c2):
658 ... return np.array([
659 ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
660 ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
661 ... ])
662 ...
663 >>>
664 >>> x0 = np.array([1.0, 0.5 * np.pi])
665 >>> check_derivative(f, jac, x0, args=(1, 2))
666 2.4492935982947064e-16
667 """
668 J_to_test = jac(x0, *args, **kwargs)
669 if issparse(J_to_test):
670 J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
671 args=args, kwargs=kwargs)
672 J_to_test = csr_matrix(J_to_test)
673 abs_err = J_to_test - J_diff
674 i, j, abs_err_data = find(abs_err)
675 J_diff_data = np.asarray(J_diff[i, j]).ravel()
676 return np.max(np.abs(abs_err_data) /
677 np.maximum(1, np.abs(J_diff_data)))
678 else:
679 J_diff = approx_derivative(fun, x0, bounds=bounds,
680 args=args, kwargs=kwargs)
681 abs_err = np.abs(J_to_test - J_diff)
682 return np.max(abs_err / np.maximum(1, np.abs(J_diff)))