Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1import collections.abc 

2import functools 

3import re 

4import sys 

5import warnings 

6 

7import numpy as np 

8import numpy.core.numeric as _nx 

9from numpy.core import transpose 

10from numpy.core.numeric import ( 

11 ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, 

12 ndarray, around, floor, ceil, take, dot, where, intp, 

13 integer, isscalar, absolute 

14 ) 

15from numpy.core.umath import ( 

16 pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, 

17 mod, exp, not_equal, subtract 

18 ) 

19from numpy.core.fromnumeric import ( 

20 ravel, nonzero, partition, mean, any, sum 

21 ) 

22from numpy.core.numerictypes import typecodes 

23from numpy.core.overrides import set_module 

24from numpy.core import overrides 

25from numpy.core.function_base import add_newdoc 

26from numpy.lib.twodim_base import diag 

27from numpy.core.multiarray import ( 

28 _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, 

29 interp as compiled_interp, interp_complex as compiled_interp_complex 

30 ) 

31from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc 

32 

33import builtins 

34 

35# needed in this module for compatibility 

36from numpy.lib.histograms import histogram, histogramdd 

37 

38 

39array_function_dispatch = functools.partial( 

40 overrides.array_function_dispatch, module='numpy') 

41 

42 

43__all__ = [ 

44 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 

45 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 

46 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 

47 'bincount', 'digitize', 'cov', 'corrcoef', 

48 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 

49 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 

50 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', 

51 'quantile' 

52 ] 

53 

54 

55def _rot90_dispatcher(m, k=None, axes=None): 

56 return (m,) 

57 

58 

59@array_function_dispatch(_rot90_dispatcher) 

60def rot90(m, k=1, axes=(0, 1)): 

61 """ 

62 Rotate an array by 90 degrees in the plane specified by axes. 

63 

64 Rotation direction is from the first towards the second axis. 

65 

66 Parameters 

67 ---------- 

68 m : array_like 

69 Array of two or more dimensions. 

70 k : integer 

71 Number of times the array is rotated by 90 degrees. 

72 axes: (2,) array_like 

73 The array is rotated in the plane defined by the axes. 

74 Axes must be different. 

75 

76 .. versionadded:: 1.12.0 

77 

78 Returns 

79 ------- 

80 y : ndarray 

81 A rotated view of `m`. 

82 

83 See Also 

84 -------- 

85 flip : Reverse the order of elements in an array along the given axis. 

86 fliplr : Flip an array horizontally. 

87 flipud : Flip an array vertically. 

88 

89 Notes 

90 ----- 

91 rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) 

92 rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) 

93 

94 Examples 

95 -------- 

96 >>> m = np.array([[1,2],[3,4]], int) 

97 >>> m 

98 array([[1, 2], 

99 [3, 4]]) 

100 >>> np.rot90(m) 

101 array([[2, 4], 

102 [1, 3]]) 

103 >>> np.rot90(m, 2) 

104 array([[4, 3], 

105 [2, 1]]) 

106 >>> m = np.arange(8).reshape((2,2,2)) 

107 >>> np.rot90(m, 1, (1,2)) 

108 array([[[1, 3], 

109 [0, 2]], 

110 [[5, 7], 

111 [4, 6]]]) 

112 

113 """ 

114 axes = tuple(axes) 

115 if len(axes) != 2: 

116 raise ValueError("len(axes) must be 2.") 

117 

118 m = asanyarray(m) 

119 

120 if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: 

121 raise ValueError("Axes must be different.") 

122 

123 if (axes[0] >= m.ndim or axes[0] < -m.ndim 

124 or axes[1] >= m.ndim or axes[1] < -m.ndim): 

125 raise ValueError("Axes={} out of range for array of ndim={}." 

126 .format(axes, m.ndim)) 

127 

128 k %= 4 

129 

130 if k == 0: 

131 return m[:] 

132 if k == 2: 

133 return flip(flip(m, axes[0]), axes[1]) 

134 

135 axes_list = arange(0, m.ndim) 

136 (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], 

137 axes_list[axes[0]]) 

138 

139 if k == 1: 

140 return transpose(flip(m, axes[1]), axes_list) 

141 else: 

142 # k == 3 

143 return flip(transpose(m, axes_list), axes[1]) 

144 

145 

146def _flip_dispatcher(m, axis=None): 

147 return (m,) 

148 

149 

150@array_function_dispatch(_flip_dispatcher) 

151def flip(m, axis=None): 

152 """ 

153 Reverse the order of elements in an array along the given axis. 

154 

155 The shape of the array is preserved, but the elements are reordered. 

156 

157 .. versionadded:: 1.12.0 

158 

159 Parameters 

160 ---------- 

161 m : array_like 

162 Input array. 

163 axis : None or int or tuple of ints, optional 

164 Axis or axes along which to flip over. The default, 

165 axis=None, will flip over all of the axes of the input array. 

166 If axis is negative it counts from the last to the first axis. 

167 

168 If axis is a tuple of ints, flipping is performed on all of the axes 

169 specified in the tuple. 

170 

171 .. versionchanged:: 1.15.0 

172 None and tuples of axes are supported 

173 

174 Returns 

175 ------- 

176 out : array_like 

177 A view of `m` with the entries of axis reversed. Since a view is 

178 returned, this operation is done in constant time. 

179 

180 See Also 

181 -------- 

182 flipud : Flip an array vertically (axis=0). 

183 fliplr : Flip an array horizontally (axis=1). 

184 

185 Notes 

186 ----- 

187 flip(m, 0) is equivalent to flipud(m). 

188 

189 flip(m, 1) is equivalent to fliplr(m). 

190 

191 flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. 

192 

193 flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all 

194 positions. 

195 

196 flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at 

197 position 0 and position 1. 

198 

199 Examples 

200 -------- 

201 >>> A = np.arange(8).reshape((2,2,2)) 

202 >>> A 

203 array([[[0, 1], 

204 [2, 3]], 

205 [[4, 5], 

206 [6, 7]]]) 

207 >>> np.flip(A, 0) 

208 array([[[4, 5], 

209 [6, 7]], 

210 [[0, 1], 

211 [2, 3]]]) 

212 >>> np.flip(A, 1) 

213 array([[[2, 3], 

214 [0, 1]], 

215 [[6, 7], 

216 [4, 5]]]) 

217 >>> np.flip(A) 

218 array([[[7, 6], 

219 [5, 4]], 

220 [[3, 2], 

221 [1, 0]]]) 

222 >>> np.flip(A, (0, 2)) 

223 array([[[5, 4], 

224 [7, 6]], 

225 [[1, 0], 

226 [3, 2]]]) 

227 >>> A = np.random.randn(3,4,5) 

228 >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) 

229 True 

230 """ 

231 if not hasattr(m, 'ndim'): 

232 m = asarray(m) 

233 if axis is None: 

234 indexer = (np.s_[::-1],) * m.ndim 

235 else: 

236 axis = _nx.normalize_axis_tuple(axis, m.ndim) 

237 indexer = [np.s_[:]] * m.ndim 

238 for ax in axis: 

239 indexer[ax] = np.s_[::-1] 

240 indexer = tuple(indexer) 

241 return m[indexer] 

242 

243 

244@set_module('numpy') 

245def iterable(y): 

246 """ 

247 Check whether or not an object can be iterated over. 

248 

249 Parameters 

250 ---------- 

251 y : object 

252 Input object. 

253 

254 Returns 

255 ------- 

256 b : bool 

257 Return ``True`` if the object has an iterator method or is a 

258 sequence and ``False`` otherwise. 

259 

260 

261 Examples 

262 -------- 

263 >>> np.iterable([1, 2, 3]) 

264 True 

265 >>> np.iterable(2) 

266 False 

267 

268 """ 

269 try: 

270 iter(y) 

271 except TypeError: 

272 return False 

273 return True 

274 

275 

276def _average_dispatcher(a, axis=None, weights=None, returned=None): 

277 return (a, weights) 

278 

279 

280@array_function_dispatch(_average_dispatcher) 

281def average(a, axis=None, weights=None, returned=False): 

282 """ 

283 Compute the weighted average along the specified axis. 

284 

285 Parameters 

286 ---------- 

287 a : array_like 

288 Array containing data to be averaged. If `a` is not an array, a 

289 conversion is attempted. 

290 axis : None or int or tuple of ints, optional 

291 Axis or axes along which to average `a`. The default, 

292 axis=None, will average over all of the elements of the input array. 

293 If axis is negative it counts from the last to the first axis. 

294 

295 .. versionadded:: 1.7.0 

296 

297 If axis is a tuple of ints, averaging is performed on all of the axes 

298 specified in the tuple instead of a single axis or all the axes as 

299 before. 

300 weights : array_like, optional 

301 An array of weights associated with the values in `a`. Each value in 

302 `a` contributes to the average according to its associated weight. 

303 The weights array can either be 1-D (in which case its length must be 

304 the size of `a` along the given axis) or of the same shape as `a`. 

305 If `weights=None`, then all data in `a` are assumed to have a 

306 weight equal to one. The 1-D calculation is:: 

307 

308 avg = sum(a * weights) / sum(weights) 

309 

310 The only constraint on `weights` is that `sum(weights)` must not be 0. 

311 returned : bool, optional 

312 Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) 

313 is returned, otherwise only the average is returned. 

314 If `weights=None`, `sum_of_weights` is equivalent to the number of 

315 elements over which the average is taken. 

316 

317 Returns 

318 ------- 

319 retval, [sum_of_weights] : array_type or double 

320 Return the average along the specified axis. When `returned` is `True`, 

321 return a tuple with the average as the first element and the sum 

322 of the weights as the second element. `sum_of_weights` is of the 

323 same type as `retval`. The result dtype follows a genereal pattern. 

324 If `weights` is None, the result dtype will be that of `a` , or ``float64`` 

325 if `a` is integral. Otherwise, if `weights` is not None and `a` is non- 

326 integral, the result type will be the type of lowest precision capable of 

327 representing values of both `a` and `weights`. If `a` happens to be 

328 integral, the previous rules still applies but the result dtype will 

329 at least be ``float64``. 

330 

331 Raises 

332 ------ 

333 ZeroDivisionError 

334 When all weights along axis are zero. See `numpy.ma.average` for a 

335 version robust to this type of error. 

336 TypeError 

337 When the length of 1D `weights` is not the same as the shape of `a` 

338 along axis. 

339 

340 See Also 

341 -------- 

342 mean 

343 

344 ma.average : average for masked arrays -- useful if your data contains 

345 "missing" values 

346 numpy.result_type : Returns the type that results from applying the 

347 numpy type promotion rules to the arguments. 

348 

349 Examples 

350 -------- 

351 >>> data = np.arange(1, 5) 

352 >>> data 

353 array([1, 2, 3, 4]) 

354 >>> np.average(data) 

355 2.5 

356 >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) 

357 4.0 

358 

359 >>> data = np.arange(6).reshape((3,2)) 

360 >>> data 

361 array([[0, 1], 

362 [2, 3], 

363 [4, 5]]) 

364 >>> np.average(data, axis=1, weights=[1./4, 3./4]) 

365 array([0.75, 2.75, 4.75]) 

366 >>> np.average(data, weights=[1./4, 3./4]) 

367 Traceback (most recent call last): 

368 ... 

369 TypeError: Axis must be specified when shapes of a and weights differ. 

370 

371 >>> a = np.ones(5, dtype=np.float128) 

372 >>> w = np.ones(5, dtype=np.complex64) 

373 >>> avg = np.average(a, weights=w) 

374 >>> print(avg.dtype) 

375 complex256 

376 """ 

377 a = np.asanyarray(a) 

378 

379 if weights is None: 

380 avg = a.mean(axis) 

381 scl = avg.dtype.type(a.size/avg.size) 

382 else: 

383 wgt = np.asanyarray(weights) 

384 

385 if issubclass(a.dtype.type, (np.integer, np.bool_)): 

386 result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') 

387 else: 

388 result_dtype = np.result_type(a.dtype, wgt.dtype) 

389 

390 # Sanity checks 

391 if a.shape != wgt.shape: 

392 if axis is None: 

393 raise TypeError( 

394 "Axis must be specified when shapes of a and weights " 

395 "differ.") 

396 if wgt.ndim != 1: 

397 raise TypeError( 

398 "1D weights expected when shapes of a and weights differ.") 

399 if wgt.shape[0] != a.shape[axis]: 

400 raise ValueError( 

401 "Length of weights not compatible with specified axis.") 

402 

403 # setup wgt to broadcast along axis 

404 wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) 

405 wgt = wgt.swapaxes(-1, axis) 

406 

407 scl = wgt.sum(axis=axis, dtype=result_dtype) 

408 if np.any(scl == 0.0): 

409 raise ZeroDivisionError( 

410 "Weights sum to zero, can't be normalized") 

411 

412 avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl 

413 

414 if returned: 

415 if scl.shape != avg.shape: 

416 scl = np.broadcast_to(scl, avg.shape).copy() 

417 return avg, scl 

418 else: 

419 return avg 

420 

421 

422@set_module('numpy') 

423def asarray_chkfinite(a, dtype=None, order=None): 

424 """Convert the input to an array, checking for NaNs or Infs. 

425 

426 Parameters 

427 ---------- 

428 a : array_like 

429 Input data, in any form that can be converted to an array. This 

430 includes lists, lists of tuples, tuples, tuples of tuples, tuples 

431 of lists and ndarrays. Success requires no NaNs or Infs. 

432 dtype : data-type, optional 

433 By default, the data-type is inferred from the input data. 

434 order : {'C', 'F'}, optional 

435 Whether to use row-major (C-style) or 

436 column-major (Fortran-style) memory representation. 

437 Defaults to 'C'. 

438 

439 Returns 

440 ------- 

441 out : ndarray 

442 Array interpretation of `a`. No copy is performed if the input 

443 is already an ndarray. If `a` is a subclass of ndarray, a base 

444 class ndarray is returned. 

445 

446 Raises 

447 ------ 

448 ValueError 

449 Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). 

450 

451 See Also 

452 -------- 

453 asarray : Create and array. 

454 asanyarray : Similar function which passes through subclasses. 

455 ascontiguousarray : Convert input to a contiguous array. 

456 asfarray : Convert input to a floating point ndarray. 

457 asfortranarray : Convert input to an ndarray with column-major 

458 memory order. 

459 fromiter : Create an array from an iterator. 

460 fromfunction : Construct an array by executing a function on grid 

461 positions. 

462 

463 Examples 

464 -------- 

465 Convert a list into an array. If all elements are finite 

466 ``asarray_chkfinite`` is identical to ``asarray``. 

467 

468 >>> a = [1, 2] 

469 >>> np.asarray_chkfinite(a, dtype=float) 

470 array([1., 2.]) 

471 

472 Raises ValueError if array_like contains Nans or Infs. 

473 

474 >>> a = [1, 2, np.inf] 

475 >>> try: 

476 ... np.asarray_chkfinite(a) 

477 ... except ValueError: 

478 ... print('ValueError') 

479 ... 

480 ValueError 

481 

482 """ 

483 a = asarray(a, dtype=dtype, order=order) 

484 if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): 

485 raise ValueError( 

486 "array must not contain infs or NaNs") 

487 return a 

488 

489 

490def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): 

491 yield x 

492 # support the undocumented behavior of allowing scalars 

493 if np.iterable(condlist): 

494 yield from condlist 

495 

496 

497@array_function_dispatch(_piecewise_dispatcher) 

498def piecewise(x, condlist, funclist, *args, **kw): 

499 """ 

500 Evaluate a piecewise-defined function. 

501 

502 Given a set of conditions and corresponding functions, evaluate each 

503 function on the input data wherever its condition is true. 

504 

505 Parameters 

506 ---------- 

507 x : ndarray or scalar 

508 The input domain. 

509 condlist : list of bool arrays or bool scalars 

510 Each boolean array corresponds to a function in `funclist`. Wherever 

511 `condlist[i]` is True, `funclist[i](x)` is used as the output value. 

512 

513 Each boolean array in `condlist` selects a piece of `x`, 

514 and should therefore be of the same shape as `x`. 

515 

516 The length of `condlist` must correspond to that of `funclist`. 

517 If one extra function is given, i.e. if 

518 ``len(funclist) == len(condlist) + 1``, then that extra function 

519 is the default value, used wherever all conditions are false. 

520 funclist : list of callables, f(x,*args,**kw), or scalars 

521 Each function is evaluated over `x` wherever its corresponding 

522 condition is True. It should take a 1d array as input and give an 1d 

523 array or a scalar value as output. If, instead of a callable, 

524 a scalar is provided then a constant function (``lambda x: scalar``) is 

525 assumed. 

526 args : tuple, optional 

527 Any further arguments given to `piecewise` are passed to the functions 

528 upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then 

529 each function is called as ``f(x, 1, 'a')``. 

530 kw : dict, optional 

531 Keyword arguments used in calling `piecewise` are passed to the 

532 functions upon execution, i.e., if called 

533 ``piecewise(..., ..., alpha=1)``, then each function is called as 

534 ``f(x, alpha=1)``. 

535 

536 Returns 

537 ------- 

538 out : ndarray 

539 The output is the same shape and type as x and is found by 

540 calling the functions in `funclist` on the appropriate portions of `x`, 

541 as defined by the boolean arrays in `condlist`. Portions not covered 

542 by any condition have a default value of 0. 

543 

544 

545 See Also 

546 -------- 

547 choose, select, where 

548 

549 Notes 

550 ----- 

551 This is similar to choose or select, except that functions are 

552 evaluated on elements of `x` that satisfy the corresponding condition from 

553 `condlist`. 

554 

555 The result is:: 

556 

557 |-- 

558 |funclist[0](x[condlist[0]]) 

559 out = |funclist[1](x[condlist[1]]) 

560 |... 

561 |funclist[n2](x[condlist[n2]]) 

562 |-- 

563 

564 Examples 

565 -------- 

566 Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. 

567 

568 >>> x = np.linspace(-2.5, 2.5, 6) 

569 >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) 

570 array([-1., -1., -1., 1., 1., 1.]) 

571 

572 Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for 

573 ``x >= 0``. 

574 

575 >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) 

576 array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) 

577 

578 Apply the same function to a scalar value. 

579 

580 >>> y = -2 

581 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) 

582 array(2) 

583 

584 """ 

585 x = asanyarray(x) 

586 n2 = len(funclist) 

587 

588 # undocumented: single condition is promoted to a list of one condition 

589 if isscalar(condlist) or ( 

590 not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): 

591 condlist = [condlist] 

592 

593 condlist = array(condlist, dtype=bool) 

594 n = len(condlist) 

595 

596 if n == n2 - 1: # compute the "otherwise" condition. 

597 condelse = ~np.any(condlist, axis=0, keepdims=True) 

598 condlist = np.concatenate([condlist, condelse], axis=0) 

599 n += 1 

600 elif n != n2: 

601 raise ValueError( 

602 "with {} condition(s), either {} or {} functions are expected" 

603 .format(n, n, n+1) 

604 ) 

605 

606 y = zeros(x.shape, x.dtype) 

607 for k in range(n): 

608 item = funclist[k] 

609 if not isinstance(item, collections.abc.Callable): 

610 y[condlist[k]] = item 

611 else: 

612 vals = x[condlist[k]] 

613 if vals.size > 0: 

614 y[condlist[k]] = item(vals, *args, **kw) 

615 

616 return y 

617 

618 

619def _select_dispatcher(condlist, choicelist, default=None): 

620 yield from condlist 

621 yield from choicelist 

622 

623 

624@array_function_dispatch(_select_dispatcher) 

625def select(condlist, choicelist, default=0): 

626 """ 

627 Return an array drawn from elements in choicelist, depending on conditions. 

628 

629 Parameters 

630 ---------- 

631 condlist : list of bool ndarrays 

632 The list of conditions which determine from which array in `choicelist` 

633 the output elements are taken. When multiple conditions are satisfied, 

634 the first one encountered in `condlist` is used. 

635 choicelist : list of ndarrays 

636 The list of arrays from which the output elements are taken. It has 

637 to be of the same length as `condlist`. 

638 default : scalar, optional 

639 The element inserted in `output` when all conditions evaluate to False. 

640 

641 Returns 

642 ------- 

643 output : ndarray 

644 The output at position m is the m-th element of the array in 

645 `choicelist` where the m-th element of the corresponding array in 

646 `condlist` is True. 

647 

648 See Also 

649 -------- 

650 where : Return elements from one of two arrays depending on condition. 

651 take, choose, compress, diag, diagonal 

652 

653 Examples 

654 -------- 

655 >>> x = np.arange(10) 

656 >>> condlist = [x<3, x>5] 

657 >>> choicelist = [x, x**2] 

658 >>> np.select(condlist, choicelist) 

659 array([ 0, 1, 2, ..., 49, 64, 81]) 

660 

661 """ 

662 # Check the size of condlist and choicelist are the same, or abort. 

663 if len(condlist) != len(choicelist): 

664 raise ValueError( 

665 'list of cases must be same length as list of conditions') 

666 

667 # Now that the dtype is known, handle the deprecated select([], []) case 

668 if len(condlist) == 0: 

669 raise ValueError("select with an empty condition list is not possible") 

670 

671 choicelist = [np.asarray(choice) for choice in choicelist] 

672 choicelist.append(np.asarray(default)) 

673 

674 # need to get the result type before broadcasting for correct scalar 

675 # behaviour 

676 dtype = np.result_type(*choicelist) 

677 

678 # Convert conditions to arrays and broadcast conditions and choices 

679 # as the shape is needed for the result. Doing it separately optimizes 

680 # for example when all choices are scalars. 

681 condlist = np.broadcast_arrays(*condlist) 

682 choicelist = np.broadcast_arrays(*choicelist) 

683 

684 # If cond array is not an ndarray in boolean format or scalar bool, abort. 

685 for i in range(len(condlist)): 

686 cond = condlist[i] 

687 if cond.dtype.type is not np.bool_: 

688 raise TypeError( 

689 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) 

690 

691 if choicelist[0].ndim == 0: 

692 # This may be common, so avoid the call. 

693 result_shape = condlist[0].shape 

694 else: 

695 result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape 

696 

697 result = np.full(result_shape, choicelist[-1], dtype) 

698 

699 # Use np.copyto to burn each choicelist array onto result, using the 

700 # corresponding condlist as a boolean mask. This is done in reverse 

701 # order since the first choice should take precedence. 

702 choicelist = choicelist[-2::-1] 

703 condlist = condlist[::-1] 

704 for choice, cond in zip(choicelist, condlist): 

705 np.copyto(result, choice, where=cond) 

706 

707 return result 

708 

709 

710def _copy_dispatcher(a, order=None, subok=None): 

711 return (a,) 

712 

713 

714@array_function_dispatch(_copy_dispatcher) 

715def copy(a, order='K', subok=False): 

716 """ 

717 Return an array copy of the given object. 

718 

719 Parameters 

720 ---------- 

721 a : array_like 

722 Input data. 

723 order : {'C', 'F', 'A', 'K'}, optional 

724 Controls the memory layout of the copy. 'C' means C-order, 

725 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 

726 'C' otherwise. 'K' means match the layout of `a` as closely 

727 as possible. (Note that this function and :meth:`ndarray.copy` are very 

728 similar, but have different default values for their order= 

729 arguments.) 

730 subok : bool, optional 

731 If True, then sub-classes will be passed-through, otherwise the 

732 returned array will be forced to be a base-class array (defaults to False). 

733 

734 .. versionadded:: 1.19.0 

735 

736 Returns 

737 ------- 

738 arr : ndarray 

739 Array interpretation of `a`. 

740 

741 See Also 

742 -------- 

743 ndarray.copy : Preferred method for creating an array copy 

744 

745 Notes 

746 ----- 

747 This is equivalent to: 

748 

749 >>> np.array(a, copy=True) #doctest: +SKIP 

750 

751 Examples 

752 -------- 

753 Create an array x, with a reference y and a copy z: 

754 

755 >>> x = np.array([1, 2, 3]) 

756 >>> y = x 

757 >>> z = np.copy(x) 

758 

759 Note that, when we modify x, y changes, but not z: 

760 

761 >>> x[0] = 10 

762 >>> x[0] == y[0] 

763 True 

764 >>> x[0] == z[0] 

765 False 

766 

767 Note that np.copy is a shallow copy and will not copy object 

768 elements within arrays. This is mainly important for arrays 

769 containing Python objects. The new array will contain the 

770 same object which may lead to surprises if that object can 

771 be modified (is mutable): 

772 

773 >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) 

774 >>> b = np.copy(a) 

775 >>> b[2][0] = 10 

776 >>> a 

777 array([1, 'm', list([10, 3, 4])], dtype=object) 

778 

779 To ensure all elements within an ``object`` array are copied, 

780 use `copy.deepcopy`: 

781 

782 >>> import copy 

783 >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) 

784 >>> c = copy.deepcopy(a) 

785 >>> c[2][0] = 10 

786 >>> c 

787 array([1, 'm', list([10, 3, 4])], dtype=object) 

788 >>> a 

789 array([1, 'm', list([2, 3, 4])], dtype=object) 

790 

791 """ 

792 return array(a, order=order, subok=subok, copy=True) 

793 

794# Basic operations 

795 

796 

797def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): 

798 yield f 

799 yield from varargs 

800 

801 

802@array_function_dispatch(_gradient_dispatcher) 

803def gradient(f, *varargs, axis=None, edge_order=1): 

804 """ 

805 Return the gradient of an N-dimensional array. 

806 

807 The gradient is computed using second order accurate central differences 

808 in the interior points and either first or second order accurate one-sides 

809 (forward or backwards) differences at the boundaries. 

810 The returned gradient hence has the same shape as the input array. 

811 

812 Parameters 

813 ---------- 

814 f : array_like 

815 An N-dimensional array containing samples of a scalar function. 

816 varargs : list of scalar or array, optional 

817 Spacing between f values. Default unitary spacing for all dimensions. 

818 Spacing can be specified using: 

819 

820 1. single scalar to specify a sample distance for all dimensions. 

821 2. N scalars to specify a constant sample distance for each dimension. 

822 i.e. `dx`, `dy`, `dz`, ... 

823 3. N arrays to specify the coordinates of the values along each 

824 dimension of F. The length of the array must match the size of 

825 the corresponding dimension 

826 4. Any combination of N scalars/arrays with the meaning of 2. and 3. 

827 

828 If `axis` is given, the number of varargs must equal the number of axes. 

829 Default: 1. 

830 

831 edge_order : {1, 2}, optional 

832 Gradient is calculated using N-th order accurate differences 

833 at the boundaries. Default: 1. 

834 

835 .. versionadded:: 1.9.1 

836 

837 axis : None or int or tuple of ints, optional 

838 Gradient is calculated only along the given axis or axes 

839 The default (axis = None) is to calculate the gradient for all the axes 

840 of the input array. axis may be negative, in which case it counts from 

841 the last to the first axis. 

842 

843 .. versionadded:: 1.11.0 

844 

845 Returns 

846 ------- 

847 gradient : ndarray or list of ndarray 

848 A set of ndarrays (or a single ndarray if there is only one dimension) 

849 corresponding to the derivatives of f with respect to each dimension. 

850 Each derivative has the same shape as f. 

851 

852 Examples 

853 -------- 

854 >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) 

855 >>> np.gradient(f) 

856 array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) 

857 >>> np.gradient(f, 2) 

858 array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) 

859 

860 Spacing can be also specified with an array that represents the coordinates 

861 of the values F along the dimensions. 

862 For instance a uniform spacing: 

863 

864 >>> x = np.arange(f.size) 

865 >>> np.gradient(f, x) 

866 array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) 

867 

868 Or a non uniform one: 

869 

870 >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) 

871 >>> np.gradient(f, x) 

872 array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) 

873 

874 For two dimensional arrays, the return will be two arrays ordered by 

875 axis. In this example the first array stands for the gradient in 

876 rows and the second one in columns direction: 

877 

878 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) 

879 [array([[ 2., 2., -1.], 

880 [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], 

881 [1. , 1. , 1. ]])] 

882 

883 In this example the spacing is also specified: 

884 uniform for axis=0 and non uniform for axis=1 

885 

886 >>> dx = 2. 

887 >>> y = [1., 1.5, 3.5] 

888 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) 

889 [array([[ 1. , 1. , -0.5], 

890 [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], 

891 [2. , 1.7, 0.5]])] 

892 

893 It is possible to specify how boundaries are treated using `edge_order` 

894 

895 >>> x = np.array([0, 1, 2, 3, 4]) 

896 >>> f = x**2 

897 >>> np.gradient(f, edge_order=1) 

898 array([1., 2., 4., 6., 7.]) 

899 >>> np.gradient(f, edge_order=2) 

900 array([0., 2., 4., 6., 8.]) 

901 

902 The `axis` keyword can be used to specify a subset of axes of which the 

903 gradient is calculated 

904 

905 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) 

906 array([[ 2., 2., -1.], 

907 [ 2., 2., -1.]]) 

908 

909 Notes 

910 ----- 

911 Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous 

912 derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we 

913 minimize the "consistency error" :math:`\\eta_{i}` between the true gradient 

914 and its estimate from a linear combination of the neighboring grid-points: 

915 

916 .. math:: 

917 

918 \\eta_{i} = f_{i}^{\\left(1\\right)} - 

919 \\left[ \\alpha f\\left(x_{i}\\right) + 

920 \\beta f\\left(x_{i} + h_{d}\\right) + 

921 \\gamma f\\left(x_{i}-h_{s}\\right) 

922 \\right] 

923 

924 By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` 

925 with their Taylor series expansion, this translates into solving 

926 the following the linear system: 

927 

928 .. math:: 

929 

930 \\left\\{ 

931 \\begin{array}{r} 

932 \\alpha+\\beta+\\gamma=0 \\\\ 

933 \\beta h_{d}-\\gamma h_{s}=1 \\\\ 

934 \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 

935 \\end{array} 

936 \\right. 

937 

938 The resulting approximation of :math:`f_{i}^{(1)}` is the following: 

939 

940 .. math:: 

941 

942 \\hat f_{i}^{(1)} = 

943 \\frac{ 

944 h_{s}^{2}f\\left(x_{i} + h_{d}\\right) 

945 + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) 

946 - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} 

947 { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} 

948 + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} 

949 + h_{s}h_{d}^{2}}{h_{d} 

950 + h_{s}}\\right) 

951 

952 It is worth noting that if :math:`h_{s}=h_{d}` 

953 (i.e., data are evenly spaced) 

954 we find the standard second order approximation: 

955 

956 .. math:: 

957 

958 \\hat f_{i}^{(1)}= 

959 \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} 

960 + \\mathcal{O}\\left(h^{2}\\right) 

961 

962 With a similar procedure the forward/backward approximations used for 

963 boundaries can be derived. 

964 

965 References 

966 ---------- 

967 .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics 

968 (Texts in Applied Mathematics). New York: Springer. 

969 .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations 

970 in Geophysical Fluid Dynamics. New York: Springer. 

971 .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on 

972 Arbitrarily Spaced Grids, 

973 Mathematics of Computation 51, no. 184 : 699-706. 

974 `PDF <http://www.ams.org/journals/mcom/1988-51-184/ 

975 S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. 

976 """ 

977 f = np.asanyarray(f) 

978 N = f.ndim # number of dimensions 

979 

980 if axis is None: 

981 axes = tuple(range(N)) 

982 else: 

983 axes = _nx.normalize_axis_tuple(axis, N) 

984 

985 len_axes = len(axes) 

986 n = len(varargs) 

987 if n == 0: 

988 # no spacing argument - use 1 in all axes 

989 dx = [1.0] * len_axes 

990 elif n == 1 and np.ndim(varargs[0]) == 0: 

991 # single scalar for all axes 

992 dx = varargs * len_axes 

993 elif n == len_axes: 

994 # scalar or 1d array for each axis 

995 dx = list(varargs) 

996 for i, distances in enumerate(dx): 

997 distances = np.asanyarray(distances) 

998 if distances.ndim == 0: 

999 continue 

1000 elif distances.ndim != 1: 

1001 raise ValueError("distances must be either scalars or 1d") 

1002 if len(distances) != f.shape[axes[i]]: 

1003 raise ValueError("when 1d, distances must match " 

1004 "the length of the corresponding dimension") 

1005 if np.issubdtype(distances.dtype, np.integer): 

1006 # Convert numpy integer types to float64 to avoid modular 

1007 # arithmetic in np.diff(distances). 

1008 distances = distances.astype(np.float64) 

1009 diffx = np.diff(distances) 

1010 # if distances are constant reduce to the scalar case 

1011 # since it brings a consistent speedup 

1012 if (diffx == diffx[0]).all(): 

1013 diffx = diffx[0] 

1014 dx[i] = diffx 

1015 else: 

1016 raise TypeError("invalid number of arguments") 

1017 

1018 if edge_order > 2: 

1019 raise ValueError("'edge_order' greater than 2 not supported") 

1020 

1021 # use central differences on interior and one-sided differences on the 

1022 # endpoints. This preserves second order-accuracy over the full domain. 

1023 

1024 outvals = [] 

1025 

1026 # create slice objects --- initially all are [:, :, ..., :] 

1027 slice1 = [slice(None)]*N 

1028 slice2 = [slice(None)]*N 

1029 slice3 = [slice(None)]*N 

1030 slice4 = [slice(None)]*N 

1031 

1032 otype = f.dtype 

1033 if otype.type is np.datetime64: 

1034 # the timedelta dtype with the same unit information 

1035 otype = np.dtype(otype.name.replace('datetime', 'timedelta')) 

1036 # view as timedelta to allow addition 

1037 f = f.view(otype) 

1038 elif otype.type is np.timedelta64: 

1039 pass 

1040 elif np.issubdtype(otype, np.inexact): 

1041 pass 

1042 else: 

1043 # All other types convert to floating point. 

1044 # First check if f is a numpy integer type; if so, convert f to float64 

1045 # to avoid modular arithmetic when computing the changes in f. 

1046 if np.issubdtype(otype, np.integer): 

1047 f = f.astype(np.float64) 

1048 otype = np.float64 

1049 

1050 for axis, ax_dx in zip(axes, dx): 

1051 if f.shape[axis] < edge_order + 1: 

1052 raise ValueError( 

1053 "Shape of array too small to calculate a numerical gradient, " 

1054 "at least (edge_order + 1) elements are required.") 

1055 # result allocation 

1056 out = np.empty_like(f, dtype=otype) 

1057 

1058 # spacing for the current axis 

1059 uniform_spacing = np.ndim(ax_dx) == 0 

1060 

1061 # Numerical differentiation: 2nd order interior 

1062 slice1[axis] = slice(1, -1) 

1063 slice2[axis] = slice(None, -2) 

1064 slice3[axis] = slice(1, -1) 

1065 slice4[axis] = slice(2, None) 

1066 

1067 if uniform_spacing: 

1068 out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) 

1069 else: 

1070 dx1 = ax_dx[0:-1] 

1071 dx2 = ax_dx[1:] 

1072 a = -(dx2)/(dx1 * (dx1 + dx2)) 

1073 b = (dx2 - dx1) / (dx1 * dx2) 

1074 c = dx1 / (dx2 * (dx1 + dx2)) 

1075 # fix the shape for broadcasting 

1076 shape = np.ones(N, dtype=int) 

1077 shape[axis] = -1 

1078 a.shape = b.shape = c.shape = shape 

1079 # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] 

1080 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1081 

1082 # Numerical differentiation: 1st order edges 

1083 if edge_order == 1: 

1084 slice1[axis] = 0 

1085 slice2[axis] = 1 

1086 slice3[axis] = 0 

1087 dx_0 = ax_dx if uniform_spacing else ax_dx[0] 

1088 # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) 

1089 out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 

1090 

1091 slice1[axis] = -1 

1092 slice2[axis] = -1 

1093 slice3[axis] = -2 

1094 dx_n = ax_dx if uniform_spacing else ax_dx[-1] 

1095 # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) 

1096 out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n 

1097 

1098 # Numerical differentiation: 2nd order edges 

1099 else: 

1100 slice1[axis] = 0 

1101 slice2[axis] = 0 

1102 slice3[axis] = 1 

1103 slice4[axis] = 2 

1104 if uniform_spacing: 

1105 a = -1.5 / ax_dx 

1106 b = 2. / ax_dx 

1107 c = -0.5 / ax_dx 

1108 else: 

1109 dx1 = ax_dx[0] 

1110 dx2 = ax_dx[1] 

1111 a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) 

1112 b = (dx1 + dx2) / (dx1 * dx2) 

1113 c = - dx1 / (dx2 * (dx1 + dx2)) 

1114 # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] 

1115 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1116 

1117 slice1[axis] = -1 

1118 slice2[axis] = -3 

1119 slice3[axis] = -2 

1120 slice4[axis] = -1 

1121 if uniform_spacing: 

1122 a = 0.5 / ax_dx 

1123 b = -2. / ax_dx 

1124 c = 1.5 / ax_dx 

1125 else: 

1126 dx1 = ax_dx[-2] 

1127 dx2 = ax_dx[-1] 

1128 a = (dx2) / (dx1 * (dx1 + dx2)) 

1129 b = - (dx2 + dx1) / (dx1 * dx2) 

1130 c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) 

1131 # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] 

1132 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1133 

1134 outvals.append(out) 

1135 

1136 # reset the slice object in this dimension to ":" 

1137 slice1[axis] = slice(None) 

1138 slice2[axis] = slice(None) 

1139 slice3[axis] = slice(None) 

1140 slice4[axis] = slice(None) 

1141 

1142 if len_axes == 1: 

1143 return outvals[0] 

1144 else: 

1145 return outvals 

1146 

1147 

1148def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): 

1149 return (a, prepend, append) 

1150 

1151 

1152@array_function_dispatch(_diff_dispatcher) 

1153def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): 

1154 """ 

1155 Calculate the n-th discrete difference along the given axis. 

1156 

1157 The first difference is given by ``out[i] = a[i+1] - a[i]`` along 

1158 the given axis, higher differences are calculated by using `diff` 

1159 recursively. 

1160 

1161 Parameters 

1162 ---------- 

1163 a : array_like 

1164 Input array 

1165 n : int, optional 

1166 The number of times values are differenced. If zero, the input 

1167 is returned as-is. 

1168 axis : int, optional 

1169 The axis along which the difference is taken, default is the 

1170 last axis. 

1171 prepend, append : array_like, optional 

1172 Values to prepend or append to `a` along axis prior to 

1173 performing the difference. Scalar values are expanded to 

1174 arrays with length 1 in the direction of axis and the shape 

1175 of the input array in along all other axes. Otherwise the 

1176 dimension and shape must match `a` except along axis. 

1177 

1178 .. versionadded:: 1.16.0 

1179 

1180 Returns 

1181 ------- 

1182 diff : ndarray 

1183 The n-th differences. The shape of the output is the same as `a` 

1184 except along `axis` where the dimension is smaller by `n`. The 

1185 type of the output is the same as the type of the difference 

1186 between any two elements of `a`. This is the same as the type of 

1187 `a` in most cases. A notable exception is `datetime64`, which 

1188 results in a `timedelta64` output array. 

1189 

1190 See Also 

1191 -------- 

1192 gradient, ediff1d, cumsum 

1193 

1194 Notes 

1195 ----- 

1196 Type is preserved for boolean arrays, so the result will contain 

1197 `False` when consecutive elements are the same and `True` when they 

1198 differ. 

1199 

1200 For unsigned integer arrays, the results will also be unsigned. This 

1201 should not be surprising, as the result is consistent with 

1202 calculating the difference directly: 

1203 

1204 >>> u8_arr = np.array([1, 0], dtype=np.uint8) 

1205 >>> np.diff(u8_arr) 

1206 array([255], dtype=uint8) 

1207 >>> u8_arr[1,...] - u8_arr[0,...] 

1208 255 

1209 

1210 If this is not desirable, then the array should be cast to a larger 

1211 integer type first: 

1212 

1213 >>> i16_arr = u8_arr.astype(np.int16) 

1214 >>> np.diff(i16_arr) 

1215 array([-1], dtype=int16) 

1216 

1217 Examples 

1218 -------- 

1219 >>> x = np.array([1, 2, 4, 7, 0]) 

1220 >>> np.diff(x) 

1221 array([ 1, 2, 3, -7]) 

1222 >>> np.diff(x, n=2) 

1223 array([ 1, 1, -10]) 

1224 

1225 >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) 

1226 >>> np.diff(x) 

1227 array([[2, 3, 4], 

1228 [5, 1, 2]]) 

1229 >>> np.diff(x, axis=0) 

1230 array([[-1, 2, 0, -2]]) 

1231 

1232 >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) 

1233 >>> np.diff(x) 

1234 array([1, 1], dtype='timedelta64[D]') 

1235 

1236 """ 

1237 if n == 0: 

1238 return a 

1239 if n < 0: 

1240 raise ValueError( 

1241 "order must be non-negative but got " + repr(n)) 

1242 

1243 a = asanyarray(a) 

1244 nd = a.ndim 

1245 if nd == 0: 

1246 raise ValueError("diff requires input that is at least one dimensional") 

1247 axis = normalize_axis_index(axis, nd) 

1248 

1249 combined = [] 

1250 if prepend is not np._NoValue: 

1251 prepend = np.asanyarray(prepend) 

1252 if prepend.ndim == 0: 

1253 shape = list(a.shape) 

1254 shape[axis] = 1 

1255 prepend = np.broadcast_to(prepend, tuple(shape)) 

1256 combined.append(prepend) 

1257 

1258 combined.append(a) 

1259 

1260 if append is not np._NoValue: 

1261 append = np.asanyarray(append) 

1262 if append.ndim == 0: 

1263 shape = list(a.shape) 

1264 shape[axis] = 1 

1265 append = np.broadcast_to(append, tuple(shape)) 

1266 combined.append(append) 

1267 

1268 if len(combined) > 1: 

1269 a = np.concatenate(combined, axis) 

1270 

1271 slice1 = [slice(None)] * nd 

1272 slice2 = [slice(None)] * nd 

1273 slice1[axis] = slice(1, None) 

1274 slice2[axis] = slice(None, -1) 

1275 slice1 = tuple(slice1) 

1276 slice2 = tuple(slice2) 

1277 

1278 op = not_equal if a.dtype == np.bool_ else subtract 

1279 for _ in range(n): 

1280 a = op(a[slice1], a[slice2]) 

1281 

1282 return a 

1283 

1284 

1285def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): 

1286 return (x, xp, fp) 

1287 

1288 

1289@array_function_dispatch(_interp_dispatcher) 

1290def interp(x, xp, fp, left=None, right=None, period=None): 

1291 """ 

1292 One-dimensional linear interpolation. 

1293 

1294 Returns the one-dimensional piecewise linear interpolant to a function 

1295 with given discrete data points (`xp`, `fp`), evaluated at `x`. 

1296 

1297 Parameters 

1298 ---------- 

1299 x : array_like 

1300 The x-coordinates at which to evaluate the interpolated values. 

1301 

1302 xp : 1-D sequence of floats 

1303 The x-coordinates of the data points, must be increasing if argument 

1304 `period` is not specified. Otherwise, `xp` is internally sorted after 

1305 normalizing the periodic boundaries with ``xp = xp % period``. 

1306 

1307 fp : 1-D sequence of float or complex 

1308 The y-coordinates of the data points, same length as `xp`. 

1309 

1310 left : optional float or complex corresponding to fp 

1311 Value to return for `x < xp[0]`, default is `fp[0]`. 

1312 

1313 right : optional float or complex corresponding to fp 

1314 Value to return for `x > xp[-1]`, default is `fp[-1]`. 

1315 

1316 period : None or float, optional 

1317 A period for the x-coordinates. This parameter allows the proper 

1318 interpolation of angular x-coordinates. Parameters `left` and `right` 

1319 are ignored if `period` is specified. 

1320 

1321 .. versionadded:: 1.10.0 

1322 

1323 Returns 

1324 ------- 

1325 y : float or complex (corresponding to fp) or ndarray 

1326 The interpolated values, same shape as `x`. 

1327 

1328 Raises 

1329 ------ 

1330 ValueError 

1331 If `xp` and `fp` have different length 

1332 If `xp` or `fp` are not 1-D sequences 

1333 If `period == 0` 

1334 

1335 Notes 

1336 ----- 

1337 The x-coordinate sequence is expected to be increasing, but this is not 

1338 explicitly enforced. However, if the sequence `xp` is non-increasing, 

1339 interpolation results are meaningless. 

1340 

1341 Note that, since NaN is unsortable, `xp` also cannot contain NaNs. 

1342 

1343 A simple check for `xp` being strictly increasing is:: 

1344 

1345 np.all(np.diff(xp) > 0) 

1346 

1347 Examples 

1348 -------- 

1349 >>> xp = [1, 2, 3] 

1350 >>> fp = [3, 2, 0] 

1351 >>> np.interp(2.5, xp, fp) 

1352 1.0 

1353 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) 

1354 array([3. , 3. , 2.5 , 0.56, 0. ]) 

1355 >>> UNDEF = -99.0 

1356 >>> np.interp(3.14, xp, fp, right=UNDEF) 

1357 -99.0 

1358 

1359 Plot an interpolant to the sine function: 

1360 

1361 >>> x = np.linspace(0, 2*np.pi, 10) 

1362 >>> y = np.sin(x) 

1363 >>> xvals = np.linspace(0, 2*np.pi, 50) 

1364 >>> yinterp = np.interp(xvals, x, y) 

1365 >>> import matplotlib.pyplot as plt 

1366 >>> plt.plot(x, y, 'o') 

1367 [<matplotlib.lines.Line2D object at 0x...>] 

1368 >>> plt.plot(xvals, yinterp, '-x') 

1369 [<matplotlib.lines.Line2D object at 0x...>] 

1370 >>> plt.show() 

1371 

1372 Interpolation with periodic x-coordinates: 

1373 

1374 >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] 

1375 >>> xp = [190, -190, 350, -350] 

1376 >>> fp = [5, 10, 3, 4] 

1377 >>> np.interp(x, xp, fp, period=360) 

1378 array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) 

1379 

1380 Complex interpolation: 

1381 

1382 >>> x = [1.5, 4.0] 

1383 >>> xp = [2,3,5] 

1384 >>> fp = [1.0j, 0, 2+3j] 

1385 >>> np.interp(x, xp, fp) 

1386 array([0.+1.j , 1.+1.5j]) 

1387 

1388 """ 

1389 

1390 fp = np.asarray(fp) 

1391 

1392 if np.iscomplexobj(fp): 

1393 interp_func = compiled_interp_complex 

1394 input_dtype = np.complex128 

1395 else: 

1396 interp_func = compiled_interp 

1397 input_dtype = np.float64 

1398 

1399 if period is not None: 

1400 if period == 0: 

1401 raise ValueError("period must be a non-zero value") 

1402 period = abs(period) 

1403 left = None 

1404 right = None 

1405 

1406 x = np.asarray(x, dtype=np.float64) 

1407 xp = np.asarray(xp, dtype=np.float64) 

1408 fp = np.asarray(fp, dtype=input_dtype) 

1409 

1410 if xp.ndim != 1 or fp.ndim != 1: 

1411 raise ValueError("Data points must be 1-D sequences") 

1412 if xp.shape[0] != fp.shape[0]: 

1413 raise ValueError("fp and xp are not of the same length") 

1414 # normalizing periodic boundaries 

1415 x = x % period 

1416 xp = xp % period 

1417 asort_xp = np.argsort(xp) 

1418 xp = xp[asort_xp] 

1419 fp = fp[asort_xp] 

1420 xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) 

1421 fp = np.concatenate((fp[-1:], fp, fp[0:1])) 

1422 

1423 return interp_func(x, xp, fp, left, right) 

1424 

1425 

1426def _angle_dispatcher(z, deg=None): 

1427 return (z,) 

1428 

1429 

1430@array_function_dispatch(_angle_dispatcher) 

1431def angle(z, deg=False): 

1432 """ 

1433 Return the angle of the complex argument. 

1434 

1435 Parameters 

1436 ---------- 

1437 z : array_like 

1438 A complex number or sequence of complex numbers. 

1439 deg : bool, optional 

1440 Return angle in degrees if True, radians if False (default). 

1441 

1442 Returns 

1443 ------- 

1444 angle : ndarray or scalar 

1445 The counterclockwise angle from the positive real axis on the complex 

1446 plane in the range ``(-pi, pi]``, with dtype as numpy.float64. 

1447 

1448 ..versionchanged:: 1.16.0 

1449 This function works on subclasses of ndarray like `ma.array`. 

1450 

1451 See Also 

1452 -------- 

1453 arctan2 

1454 absolute 

1455 

1456 Notes 

1457 ----- 

1458 Although the angle of the complex number 0 is undefined, ``numpy.angle(0)`` 

1459 returns the value 0. 

1460 

1461 Examples 

1462 -------- 

1463 >>> np.angle([1.0, 1.0j, 1+1j]) # in radians 

1464 array([ 0. , 1.57079633, 0.78539816]) # may vary 

1465 >>> np.angle(1+1j, deg=True) # in degrees 

1466 45.0 

1467 

1468 """ 

1469 z = asanyarray(z) 

1470 if issubclass(z.dtype.type, _nx.complexfloating): 

1471 zimag = z.imag 

1472 zreal = z.real 

1473 else: 

1474 zimag = 0 

1475 zreal = z 

1476 

1477 a = arctan2(zimag, zreal) 

1478 if deg: 

1479 a *= 180/pi 

1480 return a 

1481 

1482 

1483def _unwrap_dispatcher(p, discont=None, axis=None): 

1484 return (p,) 

1485 

1486 

1487@array_function_dispatch(_unwrap_dispatcher) 

1488def unwrap(p, discont=pi, axis=-1): 

1489 """ 

1490 Unwrap by changing deltas between values to 2*pi complement. 

1491 

1492 Unwrap radian phase `p` by changing absolute jumps greater than 

1493 `discont` to their 2*pi complement along the given axis. 

1494 

1495 Parameters 

1496 ---------- 

1497 p : array_like 

1498 Input array. 

1499 discont : float, optional 

1500 Maximum discontinuity between values, default is ``pi``. 

1501 axis : int, optional 

1502 Axis along which unwrap will operate, default is the last axis. 

1503 

1504 Returns 

1505 ------- 

1506 out : ndarray 

1507 Output array. 

1508 

1509 See Also 

1510 -------- 

1511 rad2deg, deg2rad 

1512 

1513 Notes 

1514 ----- 

1515 If the discontinuity in `p` is smaller than ``pi``, but larger than 

1516 `discont`, no unwrapping is done because taking the 2*pi complement 

1517 would only make the discontinuity larger. 

1518 

1519 Examples 

1520 -------- 

1521 >>> phase = np.linspace(0, np.pi, num=5) 

1522 >>> phase[3:] += np.pi 

1523 >>> phase 

1524 array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary 

1525 >>> np.unwrap(phase) 

1526 array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary 

1527 

1528 """ 

1529 p = asarray(p) 

1530 nd = p.ndim 

1531 dd = diff(p, axis=axis) 

1532 slice1 = [slice(None, None)]*nd # full slices 

1533 slice1[axis] = slice(1, None) 

1534 slice1 = tuple(slice1) 

1535 ddmod = mod(dd + pi, 2*pi) - pi 

1536 _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) 

1537 ph_correct = ddmod - dd 

1538 _nx.copyto(ph_correct, 0, where=abs(dd) < discont) 

1539 up = array(p, copy=True, dtype='d') 

1540 up[slice1] = p[slice1] + ph_correct.cumsum(axis) 

1541 return up 

1542 

1543 

1544def _sort_complex(a): 

1545 return (a,) 

1546 

1547 

1548@array_function_dispatch(_sort_complex) 

1549def sort_complex(a): 

1550 """ 

1551 Sort a complex array using the real part first, then the imaginary part. 

1552 

1553 Parameters 

1554 ---------- 

1555 a : array_like 

1556 Input array 

1557 

1558 Returns 

1559 ------- 

1560 out : complex ndarray 

1561 Always returns a sorted complex array. 

1562 

1563 Examples 

1564 -------- 

1565 >>> np.sort_complex([5, 3, 6, 2, 1]) 

1566 array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) 

1567 

1568 >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) 

1569 array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) 

1570 

1571 """ 

1572 b = array(a, copy=True) 

1573 b.sort() 

1574 if not issubclass(b.dtype.type, _nx.complexfloating): 

1575 if b.dtype.char in 'bhBH': 

1576 return b.astype('F') 

1577 elif b.dtype.char == 'g': 

1578 return b.astype('G') 

1579 else: 

1580 return b.astype('D') 

1581 else: 

1582 return b 

1583 

1584 

1585def _trim_zeros(filt, trim=None): 

1586 return (filt,) 

1587 

1588 

1589@array_function_dispatch(_trim_zeros) 

1590def trim_zeros(filt, trim='fb'): 

1591 """ 

1592 Trim the leading and/or trailing zeros from a 1-D array or sequence. 

1593 

1594 Parameters 

1595 ---------- 

1596 filt : 1-D array or sequence 

1597 Input array. 

1598 trim : str, optional 

1599 A string with 'f' representing trim from front and 'b' to trim from 

1600 back. Default is 'fb', trim zeros from both front and back of the 

1601 array. 

1602 

1603 Returns 

1604 ------- 

1605 trimmed : 1-D array or sequence 

1606 The result of trimming the input. The input data type is preserved. 

1607 

1608 Examples 

1609 -------- 

1610 >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) 

1611 >>> np.trim_zeros(a) 

1612 array([1, 2, 3, 0, 2, 1]) 

1613 

1614 >>> np.trim_zeros(a, 'b') 

1615 array([0, 0, 0, ..., 0, 2, 1]) 

1616 

1617 The input data type is preserved, list/tuple in means list/tuple out. 

1618 

1619 >>> np.trim_zeros([0, 1, 2, 0]) 

1620 [1, 2] 

1621 

1622 """ 

1623 first = 0 

1624 trim = trim.upper() 

1625 if 'F' in trim: 

1626 for i in filt: 

1627 if i != 0.: 

1628 break 

1629 else: 

1630 first = first + 1 

1631 last = len(filt) 

1632 if 'B' in trim: 

1633 for i in filt[::-1]: 

1634 if i != 0.: 

1635 break 

1636 else: 

1637 last = last - 1 

1638 return filt[first:last] 

1639 

1640 

1641def _extract_dispatcher(condition, arr): 

1642 return (condition, arr) 

1643 

1644 

1645@array_function_dispatch(_extract_dispatcher) 

1646def extract(condition, arr): 

1647 """ 

1648 Return the elements of an array that satisfy some condition. 

1649 

1650 This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If 

1651 `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. 

1652 

1653 Note that `place` does the exact opposite of `extract`. 

1654 

1655 Parameters 

1656 ---------- 

1657 condition : array_like 

1658 An array whose nonzero or True entries indicate the elements of `arr` 

1659 to extract. 

1660 arr : array_like 

1661 Input array of the same size as `condition`. 

1662 

1663 Returns 

1664 ------- 

1665 extract : ndarray 

1666 Rank 1 array of values from `arr` where `condition` is True. 

1667 

1668 See Also 

1669 -------- 

1670 take, put, copyto, compress, place 

1671 

1672 Examples 

1673 -------- 

1674 >>> arr = np.arange(12).reshape((3, 4)) 

1675 >>> arr 

1676 array([[ 0, 1, 2, 3], 

1677 [ 4, 5, 6, 7], 

1678 [ 8, 9, 10, 11]]) 

1679 >>> condition = np.mod(arr, 3)==0 

1680 >>> condition 

1681 array([[ True, False, False, True], 

1682 [False, False, True, False], 

1683 [False, True, False, False]]) 

1684 >>> np.extract(condition, arr) 

1685 array([0, 3, 6, 9]) 

1686 

1687 

1688 If `condition` is boolean: 

1689 

1690 >>> arr[condition] 

1691 array([0, 3, 6, 9]) 

1692 

1693 """ 

1694 return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) 

1695 

1696 

1697def _place_dispatcher(arr, mask, vals): 

1698 return (arr, mask, vals) 

1699 

1700 

1701@array_function_dispatch(_place_dispatcher) 

1702def place(arr, mask, vals): 

1703 """ 

1704 Change elements of an array based on conditional and input values. 

1705 

1706 Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that 

1707 `place` uses the first N elements of `vals`, where N is the number of 

1708 True values in `mask`, while `copyto` uses the elements where `mask` 

1709 is True. 

1710 

1711 Note that `extract` does the exact opposite of `place`. 

1712 

1713 Parameters 

1714 ---------- 

1715 arr : ndarray 

1716 Array to put data into. 

1717 mask : array_like 

1718 Boolean mask array. Must have the same size as `a`. 

1719 vals : 1-D sequence 

1720 Values to put into `a`. Only the first N elements are used, where 

1721 N is the number of True values in `mask`. If `vals` is smaller 

1722 than N, it will be repeated, and if elements of `a` are to be masked, 

1723 this sequence must be non-empty. 

1724 

1725 See Also 

1726 -------- 

1727 copyto, put, take, extract 

1728 

1729 Examples 

1730 -------- 

1731 >>> arr = np.arange(6).reshape(2, 3) 

1732 >>> np.place(arr, arr>2, [44, 55]) 

1733 >>> arr 

1734 array([[ 0, 1, 2], 

1735 [44, 55, 44]]) 

1736 

1737 """ 

1738 if not isinstance(arr, np.ndarray): 

1739 raise TypeError("argument 1 must be numpy.ndarray, " 

1740 "not {name}".format(name=type(arr).__name__)) 

1741 

1742 return _insert(arr, mask, vals) 

1743 

1744 

1745def disp(mesg, device=None, linefeed=True): 

1746 """ 

1747 Display a message on a device. 

1748 

1749 Parameters 

1750 ---------- 

1751 mesg : str 

1752 Message to display. 

1753 device : object 

1754 Device to write message. If None, defaults to ``sys.stdout`` which is 

1755 very similar to ``print``. `device` needs to have ``write()`` and 

1756 ``flush()`` methods. 

1757 linefeed : bool, optional 

1758 Option whether to print a line feed or not. Defaults to True. 

1759 

1760 Raises 

1761 ------ 

1762 AttributeError 

1763 If `device` does not have a ``write()`` or ``flush()`` method. 

1764 

1765 Examples 

1766 -------- 

1767 Besides ``sys.stdout``, a file-like object can also be used as it has 

1768 both required methods: 

1769 

1770 >>> from io import StringIO 

1771 >>> buf = StringIO() 

1772 >>> np.disp(u'"Display" in a file', device=buf) 

1773 >>> buf.getvalue() 

1774 '"Display" in a file\\n' 

1775 

1776 """ 

1777 if device is None: 

1778 device = sys.stdout 

1779 if linefeed: 

1780 device.write('%s\n' % mesg) 

1781 else: 

1782 device.write('%s' % mesg) 

1783 device.flush() 

1784 return 

1785 

1786 

1787# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html 

1788_DIMENSION_NAME = r'\w+' 

1789_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) 

1790_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) 

1791_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) 

1792_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) 

1793 

1794 

1795def _parse_gufunc_signature(signature): 

1796 """ 

1797 Parse string signatures for a generalized universal function. 

1798 

1799 Arguments 

1800 --------- 

1801 signature : string 

1802 Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` 

1803 for ``np.matmul``. 

1804 

1805 Returns 

1806 ------- 

1807 Tuple of input and output core dimensions parsed from the signature, each 

1808 of the form List[Tuple[str, ...]]. 

1809 """ 

1810 if not re.match(_SIGNATURE, signature): 

1811 raise ValueError( 

1812 'not a valid gufunc signature: {}'.format(signature)) 

1813 return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) 

1814 for arg in re.findall(_ARGUMENT, arg_list)] 

1815 for arg_list in signature.split('->')) 

1816 

1817 

1818def _update_dim_sizes(dim_sizes, arg, core_dims): 

1819 """ 

1820 Incrementally check and update core dimension sizes for a single argument. 

1821 

1822 Arguments 

1823 --------- 

1824 dim_sizes : Dict[str, int] 

1825 Sizes of existing core dimensions. Will be updated in-place. 

1826 arg : ndarray 

1827 Argument to examine. 

1828 core_dims : Tuple[str, ...] 

1829 Core dimensions for this argument. 

1830 """ 

1831 if not core_dims: 

1832 return 

1833 

1834 num_core_dims = len(core_dims) 

1835 if arg.ndim < num_core_dims: 

1836 raise ValueError( 

1837 '%d-dimensional argument does not have enough ' 

1838 'dimensions for all core dimensions %r' 

1839 % (arg.ndim, core_dims)) 

1840 

1841 core_shape = arg.shape[-num_core_dims:] 

1842 for dim, size in zip(core_dims, core_shape): 

1843 if dim in dim_sizes: 

1844 if size != dim_sizes[dim]: 

1845 raise ValueError( 

1846 'inconsistent size for core dimension %r: %r vs %r' 

1847 % (dim, size, dim_sizes[dim])) 

1848 else: 

1849 dim_sizes[dim] = size 

1850 

1851 

1852def _parse_input_dimensions(args, input_core_dims): 

1853 """ 

1854 Parse broadcast and core dimensions for vectorize with a signature. 

1855 

1856 Arguments 

1857 --------- 

1858 args : Tuple[ndarray, ...] 

1859 Tuple of input arguments to examine. 

1860 input_core_dims : List[Tuple[str, ...]] 

1861 List of core dimensions corresponding to each input. 

1862 

1863 Returns 

1864 ------- 

1865 broadcast_shape : Tuple[int, ...] 

1866 Common shape to broadcast all non-core dimensions to. 

1867 dim_sizes : Dict[str, int] 

1868 Common sizes for named core dimensions. 

1869 """ 

1870 broadcast_args = [] 

1871 dim_sizes = {} 

1872 for arg, core_dims in zip(args, input_core_dims): 

1873 _update_dim_sizes(dim_sizes, arg, core_dims) 

1874 ndim = arg.ndim - len(core_dims) 

1875 dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) 

1876 broadcast_args.append(dummy_array) 

1877 broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) 

1878 return broadcast_shape, dim_sizes 

1879 

1880 

1881def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): 

1882 """Helper for calculating broadcast shapes with core dimensions.""" 

1883 return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) 

1884 for core_dims in list_of_core_dims] 

1885 

1886 

1887def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): 

1888 """Helper for creating output arrays in vectorize.""" 

1889 shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) 

1890 arrays = tuple(np.empty(shape, dtype=dtype) 

1891 for shape, dtype in zip(shapes, dtypes)) 

1892 return arrays 

1893 

1894 

1895@set_module('numpy') 

1896class vectorize: 

1897 """ 

1898 vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, 

1899 signature=None) 

1900 

1901 Generalized function class. 

1902 

1903 Define a vectorized function which takes a nested sequence of objects or 

1904 numpy arrays as inputs and returns a single numpy array or a tuple of numpy 

1905 arrays. The vectorized function evaluates `pyfunc` over successive tuples 

1906 of the input arrays like the python map function, except it uses the 

1907 broadcasting rules of numpy. 

1908 

1909 The data type of the output of `vectorized` is determined by calling 

1910 the function with the first element of the input. This can be avoided 

1911 by specifying the `otypes` argument. 

1912 

1913 Parameters 

1914 ---------- 

1915 pyfunc : callable 

1916 A python function or method. 

1917 otypes : str or list of dtypes, optional 

1918 The output data type. It must be specified as either a string of 

1919 typecode characters or a list of data type specifiers. There should 

1920 be one data type specifier for each output. 

1921 doc : str, optional 

1922 The docstring for the function. If None, the docstring will be the 

1923 ``pyfunc.__doc__``. 

1924 excluded : set, optional 

1925 Set of strings or integers representing the positional or keyword 

1926 arguments for which the function will not be vectorized. These will be 

1927 passed directly to `pyfunc` unmodified. 

1928 

1929 .. versionadded:: 1.7.0 

1930 

1931 cache : bool, optional 

1932 If `True`, then cache the first function call that determines the number 

1933 of outputs if `otypes` is not provided. 

1934 

1935 .. versionadded:: 1.7.0 

1936 

1937 signature : string, optional 

1938 Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for 

1939 vectorized matrix-vector multiplication. If provided, ``pyfunc`` will 

1940 be called with (and expected to return) arrays with shapes given by the 

1941 size of corresponding core dimensions. By default, ``pyfunc`` is 

1942 assumed to take scalars as input and output. 

1943 

1944 .. versionadded:: 1.12.0 

1945 

1946 Returns 

1947 ------- 

1948 vectorized : callable 

1949 Vectorized function. 

1950 

1951 See Also 

1952 -------- 

1953 frompyfunc : Takes an arbitrary Python function and returns a ufunc 

1954 

1955 Notes 

1956 ----- 

1957 The `vectorize` function is provided primarily for convenience, not for 

1958 performance. The implementation is essentially a for loop. 

1959 

1960 If `otypes` is not specified, then a call to the function with the 

1961 first argument will be used to determine the number of outputs. The 

1962 results of this call will be cached if `cache` is `True` to prevent 

1963 calling the function twice. However, to implement the cache, the 

1964 original function must be wrapped which will slow down subsequent 

1965 calls, so only do this if your function is expensive. 

1966 

1967 The new keyword argument interface and `excluded` argument support 

1968 further degrades performance. 

1969 

1970 References 

1971 ---------- 

1972 .. [1] NumPy Reference, section `Generalized Universal Function API 

1973 <https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_. 

1974 

1975 Examples 

1976 -------- 

1977 >>> def myfunc(a, b): 

1978 ... "Return a-b if a>b, otherwise return a+b" 

1979 ... if a > b: 

1980 ... return a - b 

1981 ... else: 

1982 ... return a + b 

1983 

1984 >>> vfunc = np.vectorize(myfunc) 

1985 >>> vfunc([1, 2, 3, 4], 2) 

1986 array([3, 4, 1, 2]) 

1987 

1988 The docstring is taken from the input function to `vectorize` unless it 

1989 is specified: 

1990 

1991 >>> vfunc.__doc__ 

1992 'Return a-b if a>b, otherwise return a+b' 

1993 >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') 

1994 >>> vfunc.__doc__ 

1995 'Vectorized `myfunc`' 

1996 

1997 The output type is determined by evaluating the first element of the input, 

1998 unless it is specified: 

1999 

2000 >>> out = vfunc([1, 2, 3, 4], 2) 

2001 >>> type(out[0]) 

2002 <class 'numpy.int64'> 

2003 >>> vfunc = np.vectorize(myfunc, otypes=[float]) 

2004 >>> out = vfunc([1, 2, 3, 4], 2) 

2005 >>> type(out[0]) 

2006 <class 'numpy.float64'> 

2007 

2008 The `excluded` argument can be used to prevent vectorizing over certain 

2009 arguments. This can be useful for array-like arguments of a fixed length 

2010 such as the coefficients for a polynomial as in `polyval`: 

2011 

2012 >>> def mypolyval(p, x): 

2013 ... _p = list(p) 

2014 ... res = _p.pop(0) 

2015 ... while _p: 

2016 ... res = res*x + _p.pop(0) 

2017 ... return res 

2018 >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) 

2019 >>> vpolyval(p=[1, 2, 3], x=[0, 1]) 

2020 array([3, 6]) 

2021 

2022 Positional arguments may also be excluded by specifying their position: 

2023 

2024 >>> vpolyval.excluded.add(0) 

2025 >>> vpolyval([1, 2, 3], x=[0, 1]) 

2026 array([3, 6]) 

2027 

2028 The `signature` argument allows for vectorizing functions that act on 

2029 non-scalar arrays of fixed length. For example, you can use it for a 

2030 vectorized calculation of Pearson correlation coefficient and its p-value: 

2031 

2032 >>> import scipy.stats 

2033 >>> pearsonr = np.vectorize(scipy.stats.pearsonr, 

2034 ... signature='(n),(n)->(),()') 

2035 >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) 

2036 (array([ 1., -1.]), array([ 0., 0.])) 

2037 

2038 Or for a vectorized convolution: 

2039 

2040 >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') 

2041 >>> convolve(np.eye(4), [1, 2, 1]) 

2042 array([[1., 2., 1., 0., 0., 0.], 

2043 [0., 1., 2., 1., 0., 0.], 

2044 [0., 0., 1., 2., 1., 0.], 

2045 [0., 0., 0., 1., 2., 1.]]) 

2046 

2047 """ 

2048 def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, 

2049 cache=False, signature=None): 

2050 self.pyfunc = pyfunc 

2051 self.cache = cache 

2052 self.signature = signature 

2053 self._ufunc = {} # Caching to improve default performance 

2054 

2055 if doc is None: 

2056 self.__doc__ = pyfunc.__doc__ 

2057 else: 

2058 self.__doc__ = doc 

2059 

2060 if isinstance(otypes, str): 

2061 for char in otypes: 

2062 if char not in typecodes['All']: 

2063 raise ValueError("Invalid otype specified: %s" % (char,)) 

2064 elif iterable(otypes): 

2065 otypes = ''.join([_nx.dtype(x).char for x in otypes]) 

2066 elif otypes is not None: 

2067 raise ValueError("Invalid otype specification") 

2068 self.otypes = otypes 

2069 

2070 # Excluded variable support 

2071 if excluded is None: 

2072 excluded = set() 

2073 self.excluded = set(excluded) 

2074 

2075 if signature is not None: 

2076 self._in_and_out_core_dims = _parse_gufunc_signature(signature) 

2077 else: 

2078 self._in_and_out_core_dims = None 

2079 

2080 def __call__(self, *args, **kwargs): 

2081 """ 

2082 Return arrays with the results of `pyfunc` broadcast (vectorized) over 

2083 `args` and `kwargs` not in `excluded`. 

2084 """ 

2085 excluded = self.excluded 

2086 if not kwargs and not excluded: 

2087 func = self.pyfunc 

2088 vargs = args 

2089 else: 

2090 # The wrapper accepts only positional arguments: we use `names` and 

2091 # `inds` to mutate `the_args` and `kwargs` to pass to the original 

2092 # function. 

2093 nargs = len(args) 

2094 

2095 names = [_n for _n in kwargs if _n not in excluded] 

2096 inds = [_i for _i in range(nargs) if _i not in excluded] 

2097 the_args = list(args) 

2098 

2099 def func(*vargs): 

2100 for _n, _i in enumerate(inds): 

2101 the_args[_i] = vargs[_n] 

2102 kwargs.update(zip(names, vargs[len(inds):])) 

2103 return self.pyfunc(*the_args, **kwargs) 

2104 

2105 vargs = [args[_i] for _i in inds] 

2106 vargs.extend([kwargs[_n] for _n in names]) 

2107 

2108 return self._vectorize_call(func=func, args=vargs) 

2109 

2110 def _get_ufunc_and_otypes(self, func, args): 

2111 """Return (ufunc, otypes).""" 

2112 # frompyfunc will fail if args is empty 

2113 if not args: 

2114 raise ValueError('args can not be empty') 

2115 

2116 if self.otypes is not None: 

2117 otypes = self.otypes 

2118 

2119 # self._ufunc is a dictionary whose keys are the number of 

2120 # arguments (i.e. len(args)) and whose values are ufuncs created 

2121 # by frompyfunc. len(args) can be different for different calls if 

2122 # self.pyfunc has parameters with default values. We only use the 

2123 # cache when func is self.pyfunc, which occurs when the call uses 

2124 # only positional arguments and no arguments are excluded. 

2125 

2126 nin = len(args) 

2127 nout = len(self.otypes) 

2128 if func is not self.pyfunc or nin not in self._ufunc: 

2129 ufunc = frompyfunc(func, nin, nout) 

2130 else: 

2131 ufunc = None # We'll get it from self._ufunc 

2132 if func is self.pyfunc: 

2133 ufunc = self._ufunc.setdefault(nin, ufunc) 

2134 else: 

2135 # Get number of outputs and output types by calling the function on 

2136 # the first entries of args. We also cache the result to prevent 

2137 # the subsequent call when the ufunc is evaluated. 

2138 # Assumes that ufunc first evaluates the 0th elements in the input 

2139 # arrays (the input values are not checked to ensure this) 

2140 args = [asarray(arg) for arg in args] 

2141 if builtins.any(arg.size == 0 for arg in args): 

2142 raise ValueError('cannot call `vectorize` on size 0 inputs ' 

2143 'unless `otypes` is set') 

2144 

2145 inputs = [arg.flat[0] for arg in args] 

2146 outputs = func(*inputs) 

2147 

2148 # Performance note: profiling indicates that -- for simple 

2149 # functions at least -- this wrapping can almost double the 

2150 # execution time. 

2151 # Hence we make it optional. 

2152 if self.cache: 

2153 _cache = [outputs] 

2154 

2155 def _func(*vargs): 

2156 if _cache: 

2157 return _cache.pop() 

2158 else: 

2159 return func(*vargs) 

2160 else: 

2161 _func = func 

2162 

2163 if isinstance(outputs, tuple): 

2164 nout = len(outputs) 

2165 else: 

2166 nout = 1 

2167 outputs = (outputs,) 

2168 

2169 otypes = ''.join([asarray(outputs[_k]).dtype.char 

2170 for _k in range(nout)]) 

2171 

2172 # Performance note: profiling indicates that creating the ufunc is 

2173 # not a significant cost compared with wrapping so it seems not 

2174 # worth trying to cache this. 

2175 ufunc = frompyfunc(_func, len(args), nout) 

2176 

2177 return ufunc, otypes 

2178 

2179 def _vectorize_call(self, func, args): 

2180 """Vectorized call to `func` over positional `args`.""" 

2181 if self.signature is not None: 

2182 res = self._vectorize_call_with_signature(func, args) 

2183 elif not args: 

2184 res = func() 

2185 else: 

2186 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) 

2187 

2188 # Convert args to object arrays first 

2189 inputs = [array(a, copy=False, subok=True, dtype=object) 

2190 for a in args] 

2191 

2192 outputs = ufunc(*inputs) 

2193 

2194 if ufunc.nout == 1: 

2195 res = array(outputs, copy=False, subok=True, dtype=otypes[0]) 

2196 else: 

2197 res = tuple([array(x, copy=False, subok=True, dtype=t) 

2198 for x, t in zip(outputs, otypes)]) 

2199 return res 

2200 

2201 def _vectorize_call_with_signature(self, func, args): 

2202 """Vectorized call over positional arguments with a signature.""" 

2203 input_core_dims, output_core_dims = self._in_and_out_core_dims 

2204 

2205 if len(args) != len(input_core_dims): 

2206 raise TypeError('wrong number of positional arguments: ' 

2207 'expected %r, got %r' 

2208 % (len(input_core_dims), len(args))) 

2209 args = tuple(asanyarray(arg) for arg in args) 

2210 

2211 broadcast_shape, dim_sizes = _parse_input_dimensions( 

2212 args, input_core_dims) 

2213 input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, 

2214 input_core_dims) 

2215 args = [np.broadcast_to(arg, shape, subok=True) 

2216 for arg, shape in zip(args, input_shapes)] 

2217 

2218 outputs = None 

2219 otypes = self.otypes 

2220 nout = len(output_core_dims) 

2221 

2222 for index in np.ndindex(*broadcast_shape): 

2223 results = func(*(arg[index] for arg in args)) 

2224 

2225 n_results = len(results) if isinstance(results, tuple) else 1 

2226 

2227 if nout != n_results: 

2228 raise ValueError( 

2229 'wrong number of outputs from pyfunc: expected %r, got %r' 

2230 % (nout, n_results)) 

2231 

2232 if nout == 1: 

2233 results = (results,) 

2234 

2235 if outputs is None: 

2236 for result, core_dims in zip(results, output_core_dims): 

2237 _update_dim_sizes(dim_sizes, result, core_dims) 

2238 

2239 if otypes is None: 

2240 otypes = [asarray(result).dtype for result in results] 

2241 

2242 outputs = _create_arrays(broadcast_shape, dim_sizes, 

2243 output_core_dims, otypes) 

2244 

2245 for output, result in zip(outputs, results): 

2246 output[index] = result 

2247 

2248 if outputs is None: 

2249 # did not call the function even once 

2250 if otypes is None: 

2251 raise ValueError('cannot call `vectorize` on size 0 inputs ' 

2252 'unless `otypes` is set') 

2253 if builtins.any(dim not in dim_sizes 

2254 for dims in output_core_dims 

2255 for dim in dims): 

2256 raise ValueError('cannot call `vectorize` with a signature ' 

2257 'including new output dimensions on size 0 ' 

2258 'inputs') 

2259 outputs = _create_arrays(broadcast_shape, dim_sizes, 

2260 output_core_dims, otypes) 

2261 

2262 return outputs[0] if nout == 1 else outputs 

2263 

2264 

2265def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, 

2266 fweights=None, aweights=None): 

2267 return (m, y, fweights, aweights) 

2268 

2269 

2270@array_function_dispatch(_cov_dispatcher) 

2271def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, 

2272 aweights=None): 

2273 """ 

2274 Estimate a covariance matrix, given data and weights. 

2275 

2276 Covariance indicates the level to which two variables vary together. 

2277 If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, 

2278 then the covariance matrix element :math:`C_{ij}` is the covariance of 

2279 :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance 

2280 of :math:`x_i`. 

2281 

2282 See the notes for an outline of the algorithm. 

2283 

2284 Parameters 

2285 ---------- 

2286 m : array_like 

2287 A 1-D or 2-D array containing multiple variables and observations. 

2288 Each row of `m` represents a variable, and each column a single 

2289 observation of all those variables. Also see `rowvar` below. 

2290 y : array_like, optional 

2291 An additional set of variables and observations. `y` has the same form 

2292 as that of `m`. 

2293 rowvar : bool, optional 

2294 If `rowvar` is True (default), then each row represents a 

2295 variable, with observations in the columns. Otherwise, the relationship 

2296 is transposed: each column represents a variable, while the rows 

2297 contain observations. 

2298 bias : bool, optional 

2299 Default normalization (False) is by ``(N - 1)``, where ``N`` is the 

2300 number of observations given (unbiased estimate). If `bias` is True, 

2301 then normalization is by ``N``. These values can be overridden by using 

2302 the keyword ``ddof`` in numpy versions >= 1.5. 

2303 ddof : int, optional 

2304 If not ``None`` the default value implied by `bias` is overridden. 

2305 Note that ``ddof=1`` will return the unbiased estimate, even if both 

2306 `fweights` and `aweights` are specified, and ``ddof=0`` will return 

2307 the simple average. See the notes for the details. The default value 

2308 is ``None``. 

2309 

2310 .. versionadded:: 1.5 

2311 fweights : array_like, int, optional 

2312 1-D array of integer frequency weights; the number of times each 

2313 observation vector should be repeated. 

2314 

2315 .. versionadded:: 1.10 

2316 aweights : array_like, optional 

2317 1-D array of observation vector weights. These relative weights are 

2318 typically large for observations considered "important" and smaller for 

2319 observations considered less "important". If ``ddof=0`` the array of 

2320 weights can be used to assign probabilities to observation vectors. 

2321 

2322 .. versionadded:: 1.10 

2323 

2324 Returns 

2325 ------- 

2326 out : ndarray 

2327 The covariance matrix of the variables. 

2328 

2329 See Also 

2330 -------- 

2331 corrcoef : Normalized covariance matrix 

2332 

2333 Notes 

2334 ----- 

2335 Assume that the observations are in the columns of the observation 

2336 array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The 

2337 steps to compute the weighted covariance are as follows:: 

2338 

2339 >>> m = np.arange(10, dtype=np.float64) 

2340 >>> f = np.arange(10) * 2 

2341 >>> a = np.arange(10) ** 2. 

2342 >>> ddof = 1 

2343 >>> w = f * a 

2344 >>> v1 = np.sum(w) 

2345 >>> v2 = np.sum(w * a) 

2346 >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 

2347 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) 

2348 

2349 Note that when ``a == 1``, the normalization factor 

2350 ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` 

2351 as it should. 

2352 

2353 Examples 

2354 -------- 

2355 Consider two variables, :math:`x_0` and :math:`x_1`, which 

2356 correlate perfectly, but in opposite directions: 

2357 

2358 >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T 

2359 >>> x 

2360 array([[0, 1, 2], 

2361 [2, 1, 0]]) 

2362 

2363 Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance 

2364 matrix shows this clearly: 

2365 

2366 >>> np.cov(x) 

2367 array([[ 1., -1.], 

2368 [-1., 1.]]) 

2369 

2370 Note that element :math:`C_{0,1}`, which shows the correlation between 

2371 :math:`x_0` and :math:`x_1`, is negative. 

2372 

2373 Further, note how `x` and `y` are combined: 

2374 

2375 >>> x = [-2.1, -1, 4.3] 

2376 >>> y = [3, 1.1, 0.12] 

2377 >>> X = np.stack((x, y), axis=0) 

2378 >>> np.cov(X) 

2379 array([[11.71 , -4.286 ], # may vary 

2380 [-4.286 , 2.144133]]) 

2381 >>> np.cov(x, y) 

2382 array([[11.71 , -4.286 ], # may vary 

2383 [-4.286 , 2.144133]]) 

2384 >>> np.cov(x) 

2385 array(11.71) 

2386 

2387 """ 

2388 # Check inputs 

2389 if ddof is not None and ddof != int(ddof): 

2390 raise ValueError( 

2391 "ddof must be integer") 

2392 

2393 # Handles complex arrays too 

2394 m = np.asarray(m) 

2395 if m.ndim > 2: 

2396 raise ValueError("m has more than 2 dimensions") 

2397 

2398 if y is None: 

2399 dtype = np.result_type(m, np.float64) 

2400 else: 

2401 y = np.asarray(y) 

2402 if y.ndim > 2: 

2403 raise ValueError("y has more than 2 dimensions") 

2404 dtype = np.result_type(m, y, np.float64) 

2405 

2406 X = array(m, ndmin=2, dtype=dtype) 

2407 if not rowvar and X.shape[0] != 1: 

2408 X = X.T 

2409 if X.shape[0] == 0: 

2410 return np.array([]).reshape(0, 0) 

2411 if y is not None: 

2412 y = array(y, copy=False, ndmin=2, dtype=dtype) 

2413 if not rowvar and y.shape[0] != 1: 

2414 y = y.T 

2415 X = np.concatenate((X, y), axis=0) 

2416 

2417 if ddof is None: 

2418 if bias == 0: 

2419 ddof = 1 

2420 else: 

2421 ddof = 0 

2422 

2423 # Get the product of frequencies and weights 

2424 w = None 

2425 if fweights is not None: 

2426 fweights = np.asarray(fweights, dtype=float) 

2427 if not np.all(fweights == np.around(fweights)): 

2428 raise TypeError( 

2429 "fweights must be integer") 

2430 if fweights.ndim > 1: 

2431 raise RuntimeError( 

2432 "cannot handle multidimensional fweights") 

2433 if fweights.shape[0] != X.shape[1]: 

2434 raise RuntimeError( 

2435 "incompatible numbers of samples and fweights") 

2436 if any(fweights < 0): 

2437 raise ValueError( 

2438 "fweights cannot be negative") 

2439 w = fweights 

2440 if aweights is not None: 

2441 aweights = np.asarray(aweights, dtype=float) 

2442 if aweights.ndim > 1: 

2443 raise RuntimeError( 

2444 "cannot handle multidimensional aweights") 

2445 if aweights.shape[0] != X.shape[1]: 

2446 raise RuntimeError( 

2447 "incompatible numbers of samples and aweights") 

2448 if any(aweights < 0): 

2449 raise ValueError( 

2450 "aweights cannot be negative") 

2451 if w is None: 

2452 w = aweights 

2453 else: 

2454 w *= aweights 

2455 

2456 avg, w_sum = average(X, axis=1, weights=w, returned=True) 

2457 w_sum = w_sum[0] 

2458 

2459 # Determine the normalization 

2460 if w is None: 

2461 fact = X.shape[1] - ddof 

2462 elif ddof == 0: 

2463 fact = w_sum 

2464 elif aweights is None: 

2465 fact = w_sum - ddof 

2466 else: 

2467 fact = w_sum - ddof*sum(w*aweights)/w_sum 

2468 

2469 if fact <= 0: 

2470 warnings.warn("Degrees of freedom <= 0 for slice", 

2471 RuntimeWarning, stacklevel=3) 

2472 fact = 0.0 

2473 

2474 X -= avg[:, None] 

2475 if w is None: 

2476 X_T = X.T 

2477 else: 

2478 X_T = (X*w).T 

2479 c = dot(X, X_T.conj()) 

2480 c *= np.true_divide(1, fact) 

2481 return c.squeeze() 

2482 

2483 

2484def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None): 

2485 return (x, y) 

2486 

2487 

2488@array_function_dispatch(_corrcoef_dispatcher) 

2489def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): 

2490 """ 

2491 Return Pearson product-moment correlation coefficients. 

2492 

2493 Please refer to the documentation for `cov` for more detail. The 

2494 relationship between the correlation coefficient matrix, `R`, and the 

2495 covariance matrix, `C`, is 

2496 

2497 .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } 

2498 

2499 The values of `R` are between -1 and 1, inclusive. 

2500 

2501 Parameters 

2502 ---------- 

2503 x : array_like 

2504 A 1-D or 2-D array containing multiple variables and observations. 

2505 Each row of `x` represents a variable, and each column a single 

2506 observation of all those variables. Also see `rowvar` below. 

2507 y : array_like, optional 

2508 An additional set of variables and observations. `y` has the same 

2509 shape as `x`. 

2510 rowvar : bool, optional 

2511 If `rowvar` is True (default), then each row represents a 

2512 variable, with observations in the columns. Otherwise, the relationship 

2513 is transposed: each column represents a variable, while the rows 

2514 contain observations. 

2515 bias : _NoValue, optional 

2516 Has no effect, do not use. 

2517 

2518 .. deprecated:: 1.10.0 

2519 ddof : _NoValue, optional 

2520 Has no effect, do not use. 

2521 

2522 .. deprecated:: 1.10.0 

2523 

2524 Returns 

2525 ------- 

2526 R : ndarray 

2527 The correlation coefficient matrix of the variables. 

2528 

2529 See Also 

2530 -------- 

2531 cov : Covariance matrix 

2532 

2533 Notes 

2534 ----- 

2535 Due to floating point rounding the resulting array may not be Hermitian, 

2536 the diagonal elements may not be 1, and the elements may not satisfy the 

2537 inequality abs(a) <= 1. The real and imaginary parts are clipped to the 

2538 interval [-1, 1] in an attempt to improve on that situation but is not 

2539 much help in the complex case. 

2540 

2541 This function accepts but discards arguments `bias` and `ddof`. This is 

2542 for backwards compatibility with previous versions of this function. These 

2543 arguments had no effect on the return values of the function and can be 

2544 safely ignored in this and previous versions of numpy. 

2545 

2546 """ 

2547 if bias is not np._NoValue or ddof is not np._NoValue: 

2548 # 2015-03-15, 1.10 

2549 warnings.warn('bias and ddof have no effect and are deprecated', 

2550 DeprecationWarning, stacklevel=3) 

2551 c = cov(x, y, rowvar) 

2552 try: 

2553 d = diag(c) 

2554 except ValueError: 

2555 # scalar covariance 

2556 # nan if incorrect value (nan, inf, 0), 1 otherwise 

2557 return c / c 

2558 stddev = sqrt(d.real) 

2559 c /= stddev[:, None] 

2560 c /= stddev[None, :] 

2561 

2562 # Clip real and imaginary parts to [-1, 1]. This does not guarantee 

2563 # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without 

2564 # excessive work. 

2565 np.clip(c.real, -1, 1, out=c.real) 

2566 if np.iscomplexobj(c): 

2567 np.clip(c.imag, -1, 1, out=c.imag) 

2568 

2569 return c 

2570 

2571 

2572@set_module('numpy') 

2573def blackman(M): 

2574 """ 

2575 Return the Blackman window. 

2576 

2577 The Blackman window is a taper formed by using the first three 

2578 terms of a summation of cosines. It was designed to have close to the 

2579 minimal leakage possible. It is close to optimal, only slightly worse 

2580 than a Kaiser window. 

2581 

2582 Parameters 

2583 ---------- 

2584 M : int 

2585 Number of points in the output window. If zero or less, an empty 

2586 array is returned. 

2587 

2588 Returns 

2589 ------- 

2590 out : ndarray 

2591 The window, with the maximum value normalized to one (the value one 

2592 appears only if the number of samples is odd). 

2593 

2594 See Also 

2595 -------- 

2596 bartlett, hamming, hanning, kaiser 

2597 

2598 Notes 

2599 ----- 

2600 The Blackman window is defined as 

2601 

2602 .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) 

2603 

2604 Most references to the Blackman window come from the signal processing 

2605 literature, where it is used as one of many windowing functions for 

2606 smoothing values. It is also known as an apodization (which means 

2607 "removing the foot", i.e. smoothing discontinuities at the beginning 

2608 and end of the sampled signal) or tapering function. It is known as a 

2609 "near optimal" tapering function, almost as good (by some measures) 

2610 as the kaiser window. 

2611 

2612 References 

2613 ---------- 

2614 Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, 

2615 Dover Publications, New York. 

2616 

2617 Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. 

2618 Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. 

2619 

2620 Examples 

2621 -------- 

2622 >>> import matplotlib.pyplot as plt 

2623 >>> np.blackman(12) 

2624 array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary 

2625 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 

2626 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 

2627 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) 

2628 

2629 Plot the window and the frequency response: 

2630 

2631 >>> from numpy.fft import fft, fftshift 

2632 >>> window = np.blackman(51) 

2633 >>> plt.plot(window) 

2634 [<matplotlib.lines.Line2D object at 0x...>] 

2635 >>> plt.title("Blackman window") 

2636 Text(0.5, 1.0, 'Blackman window') 

2637 >>> plt.ylabel("Amplitude") 

2638 Text(0, 0.5, 'Amplitude') 

2639 >>> plt.xlabel("Sample") 

2640 Text(0.5, 0, 'Sample') 

2641 >>> plt.show() 

2642 

2643 >>> plt.figure() 

2644 <Figure size 640x480 with 0 Axes> 

2645 >>> A = fft(window, 2048) / 25.5 

2646 >>> mag = np.abs(fftshift(A)) 

2647 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

2648 >>> with np.errstate(divide='ignore', invalid='ignore'): 

2649 ... response = 20 * np.log10(mag) 

2650 ... 

2651 >>> response = np.clip(response, -100, 100) 

2652 >>> plt.plot(freq, response) 

2653 [<matplotlib.lines.Line2D object at 0x...>] 

2654 >>> plt.title("Frequency response of Blackman window") 

2655 Text(0.5, 1.0, 'Frequency response of Blackman window') 

2656 >>> plt.ylabel("Magnitude [dB]") 

2657 Text(0, 0.5, 'Magnitude [dB]') 

2658 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

2659 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

2660 >>> _ = plt.axis('tight') 

2661 >>> plt.show() 

2662 

2663 """ 

2664 if M < 1: 

2665 return array([]) 

2666 if M == 1: 

2667 return ones(1, float) 

2668 n = arange(0, M) 

2669 return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) 

2670 

2671 

2672@set_module('numpy') 

2673def bartlett(M): 

2674 """ 

2675 Return the Bartlett window. 

2676 

2677 The Bartlett window is very similar to a triangular window, except 

2678 that the end points are at zero. It is often used in signal 

2679 processing for tapering a signal, without generating too much 

2680 ripple in the frequency domain. 

2681 

2682 Parameters 

2683 ---------- 

2684 M : int 

2685 Number of points in the output window. If zero or less, an 

2686 empty array is returned. 

2687 

2688 Returns 

2689 ------- 

2690 out : array 

2691 The triangular window, with the maximum value normalized to one 

2692 (the value one appears only if the number of samples is odd), with 

2693 the first and last samples equal to zero. 

2694 

2695 See Also 

2696 -------- 

2697 blackman, hamming, hanning, kaiser 

2698 

2699 Notes 

2700 ----- 

2701 The Bartlett window is defined as 

2702 

2703 .. math:: w(n) = \\frac{2}{M-1} \\left( 

2704 \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| 

2705 \\right) 

2706 

2707 Most references to the Bartlett window come from the signal 

2708 processing literature, where it is used as one of many windowing 

2709 functions for smoothing values. Note that convolution with this 

2710 window produces linear interpolation. It is also known as an 

2711 apodization (which means"removing the foot", i.e. smoothing 

2712 discontinuities at the beginning and end of the sampled signal) or 

2713 tapering function. The fourier transform of the Bartlett is the product 

2714 of two sinc functions. 

2715 Note the excellent discussion in Kanasewich. 

2716 

2717 References 

2718 ---------- 

2719 .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", 

2720 Biometrika 37, 1-16, 1950. 

2721 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", 

2722 The University of Alberta Press, 1975, pp. 109-110. 

2723 .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal 

2724 Processing", Prentice-Hall, 1999, pp. 468-471. 

2725 .. [4] Wikipedia, "Window function", 

2726 https://en.wikipedia.org/wiki/Window_function 

2727 .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

2728 "Numerical Recipes", Cambridge University Press, 1986, page 429. 

2729 

2730 Examples 

2731 -------- 

2732 >>> import matplotlib.pyplot as plt 

2733 >>> np.bartlett(12) 

2734 array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary 

2735 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 

2736 0.18181818, 0. ]) 

2737 

2738 Plot the window and its frequency response (requires SciPy and matplotlib): 

2739 

2740 >>> from numpy.fft import fft, fftshift 

2741 >>> window = np.bartlett(51) 

2742 >>> plt.plot(window) 

2743 [<matplotlib.lines.Line2D object at 0x...>] 

2744 >>> plt.title("Bartlett window") 

2745 Text(0.5, 1.0, 'Bartlett window') 

2746 >>> plt.ylabel("Amplitude") 

2747 Text(0, 0.5, 'Amplitude') 

2748 >>> plt.xlabel("Sample") 

2749 Text(0.5, 0, 'Sample') 

2750 >>> plt.show() 

2751 

2752 >>> plt.figure() 

2753 <Figure size 640x480 with 0 Axes> 

2754 >>> A = fft(window, 2048) / 25.5 

2755 >>> mag = np.abs(fftshift(A)) 

2756 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

2757 >>> with np.errstate(divide='ignore', invalid='ignore'): 

2758 ... response = 20 * np.log10(mag) 

2759 ... 

2760 >>> response = np.clip(response, -100, 100) 

2761 >>> plt.plot(freq, response) 

2762 [<matplotlib.lines.Line2D object at 0x...>] 

2763 >>> plt.title("Frequency response of Bartlett window") 

2764 Text(0.5, 1.0, 'Frequency response of Bartlett window') 

2765 >>> plt.ylabel("Magnitude [dB]") 

2766 Text(0, 0.5, 'Magnitude [dB]') 

2767 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

2768 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

2769 >>> _ = plt.axis('tight') 

2770 >>> plt.show() 

2771 

2772 """ 

2773 if M < 1: 

2774 return array([]) 

2775 if M == 1: 

2776 return ones(1, float) 

2777 n = arange(0, M) 

2778 return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) 

2779 

2780 

2781@set_module('numpy') 

2782def hanning(M): 

2783 """ 

2784 Return the Hanning window. 

2785 

2786 The Hanning window is a taper formed by using a weighted cosine. 

2787 

2788 Parameters 

2789 ---------- 

2790 M : int 

2791 Number of points in the output window. If zero or less, an 

2792 empty array is returned. 

2793 

2794 Returns 

2795 ------- 

2796 out : ndarray, shape(M,) 

2797 The window, with the maximum value normalized to one (the value 

2798 one appears only if `M` is odd). 

2799 

2800 See Also 

2801 -------- 

2802 bartlett, blackman, hamming, kaiser 

2803 

2804 Notes 

2805 ----- 

2806 The Hanning window is defined as 

2807 

2808 .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) 

2809 \\qquad 0 \\leq n \\leq M-1 

2810 

2811 The Hanning was named for Julius von Hann, an Austrian meteorologist. 

2812 It is also known as the Cosine Bell. Some authors prefer that it be 

2813 called a Hann window, to help avoid confusion with the very similar 

2814 Hamming window. 

2815 

2816 Most references to the Hanning window come from the signal processing 

2817 literature, where it is used as one of many windowing functions for 

2818 smoothing values. It is also known as an apodization (which means 

2819 "removing the foot", i.e. smoothing discontinuities at the beginning 

2820 and end of the sampled signal) or tapering function. 

2821 

2822 References 

2823 ---------- 

2824 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power 

2825 spectra, Dover Publications, New York. 

2826 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", 

2827 The University of Alberta Press, 1975, pp. 106-108. 

2828 .. [3] Wikipedia, "Window function", 

2829 https://en.wikipedia.org/wiki/Window_function 

2830 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

2831 "Numerical Recipes", Cambridge University Press, 1986, page 425. 

2832 

2833 Examples 

2834 -------- 

2835 >>> np.hanning(12) 

2836 array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 

2837 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 

2838 0.07937323, 0. ]) 

2839 

2840 Plot the window and its frequency response: 

2841 

2842 >>> import matplotlib.pyplot as plt 

2843 >>> from numpy.fft import fft, fftshift 

2844 >>> window = np.hanning(51) 

2845 >>> plt.plot(window) 

2846 [<matplotlib.lines.Line2D object at 0x...>] 

2847 >>> plt.title("Hann window") 

2848 Text(0.5, 1.0, 'Hann window') 

2849 >>> plt.ylabel("Amplitude") 

2850 Text(0, 0.5, 'Amplitude') 

2851 >>> plt.xlabel("Sample") 

2852 Text(0.5, 0, 'Sample') 

2853 >>> plt.show() 

2854 

2855 >>> plt.figure() 

2856 <Figure size 640x480 with 0 Axes> 

2857 >>> A = fft(window, 2048) / 25.5 

2858 >>> mag = np.abs(fftshift(A)) 

2859 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

2860 >>> with np.errstate(divide='ignore', invalid='ignore'): 

2861 ... response = 20 * np.log10(mag) 

2862 ... 

2863 >>> response = np.clip(response, -100, 100) 

2864 >>> plt.plot(freq, response) 

2865 [<matplotlib.lines.Line2D object at 0x...>] 

2866 >>> plt.title("Frequency response of the Hann window") 

2867 Text(0.5, 1.0, 'Frequency response of the Hann window') 

2868 >>> plt.ylabel("Magnitude [dB]") 

2869 Text(0, 0.5, 'Magnitude [dB]') 

2870 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

2871 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

2872 >>> plt.axis('tight') 

2873 ... 

2874 >>> plt.show() 

2875 

2876 """ 

2877 if M < 1: 

2878 return array([]) 

2879 if M == 1: 

2880 return ones(1, float) 

2881 n = arange(0, M) 

2882 return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) 

2883 

2884 

2885@set_module('numpy') 

2886def hamming(M): 

2887 """ 

2888 Return the Hamming window. 

2889 

2890 The Hamming window is a taper formed by using a weighted cosine. 

2891 

2892 Parameters 

2893 ---------- 

2894 M : int 

2895 Number of points in the output window. If zero or less, an 

2896 empty array is returned. 

2897 

2898 Returns 

2899 ------- 

2900 out : ndarray 

2901 The window, with the maximum value normalized to one (the value 

2902 one appears only if the number of samples is odd). 

2903 

2904 See Also 

2905 -------- 

2906 bartlett, blackman, hanning, kaiser 

2907 

2908 Notes 

2909 ----- 

2910 The Hamming window is defined as 

2911 

2912 .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) 

2913 \\qquad 0 \\leq n \\leq M-1 

2914 

2915 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey 

2916 and is described in Blackman and Tukey. It was recommended for 

2917 smoothing the truncated autocovariance function in the time domain. 

2918 Most references to the Hamming window come from the signal processing 

2919 literature, where it is used as one of many windowing functions for 

2920 smoothing values. It is also known as an apodization (which means 

2921 "removing the foot", i.e. smoothing discontinuities at the beginning 

2922 and end of the sampled signal) or tapering function. 

2923 

2924 References 

2925 ---------- 

2926 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power 

2927 spectra, Dover Publications, New York. 

2928 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The 

2929 University of Alberta Press, 1975, pp. 109-110. 

2930 .. [3] Wikipedia, "Window function", 

2931 https://en.wikipedia.org/wiki/Window_function 

2932 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

2933 "Numerical Recipes", Cambridge University Press, 1986, page 425. 

2934 

2935 Examples 

2936 -------- 

2937 >>> np.hamming(12) 

2938 array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 

2939 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 

2940 0.15302337, 0.08 ]) 

2941 

2942 Plot the window and the frequency response: 

2943 

2944 >>> import matplotlib.pyplot as plt 

2945 >>> from numpy.fft import fft, fftshift 

2946 >>> window = np.hamming(51) 

2947 >>> plt.plot(window) 

2948 [<matplotlib.lines.Line2D object at 0x...>] 

2949 >>> plt.title("Hamming window") 

2950 Text(0.5, 1.0, 'Hamming window') 

2951 >>> plt.ylabel("Amplitude") 

2952 Text(0, 0.5, 'Amplitude') 

2953 >>> plt.xlabel("Sample") 

2954 Text(0.5, 0, 'Sample') 

2955 >>> plt.show() 

2956 

2957 >>> plt.figure() 

2958 <Figure size 640x480 with 0 Axes> 

2959 >>> A = fft(window, 2048) / 25.5 

2960 >>> mag = np.abs(fftshift(A)) 

2961 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

2962 >>> response = 20 * np.log10(mag) 

2963 >>> response = np.clip(response, -100, 100) 

2964 >>> plt.plot(freq, response) 

2965 [<matplotlib.lines.Line2D object at 0x...>] 

2966 >>> plt.title("Frequency response of Hamming window") 

2967 Text(0.5, 1.0, 'Frequency response of Hamming window') 

2968 >>> plt.ylabel("Magnitude [dB]") 

2969 Text(0, 0.5, 'Magnitude [dB]') 

2970 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

2971 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

2972 >>> plt.axis('tight') 

2973 ... 

2974 >>> plt.show() 

2975 

2976 """ 

2977 if M < 1: 

2978 return array([]) 

2979 if M == 1: 

2980 return ones(1, float) 

2981 n = arange(0, M) 

2982 return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) 

2983 

2984 

2985## Code from cephes for i0 

2986 

2987_i0A = [ 

2988 -4.41534164647933937950E-18, 

2989 3.33079451882223809783E-17, 

2990 -2.43127984654795469359E-16, 

2991 1.71539128555513303061E-15, 

2992 -1.16853328779934516808E-14, 

2993 7.67618549860493561688E-14, 

2994 -4.85644678311192946090E-13, 

2995 2.95505266312963983461E-12, 

2996 -1.72682629144155570723E-11, 

2997 9.67580903537323691224E-11, 

2998 -5.18979560163526290666E-10, 

2999 2.65982372468238665035E-9, 

3000 -1.30002500998624804212E-8, 

3001 6.04699502254191894932E-8, 

3002 -2.67079385394061173391E-7, 

3003 1.11738753912010371815E-6, 

3004 -4.41673835845875056359E-6, 

3005 1.64484480707288970893E-5, 

3006 -5.75419501008210370398E-5, 

3007 1.88502885095841655729E-4, 

3008 -5.76375574538582365885E-4, 

3009 1.63947561694133579842E-3, 

3010 -4.32430999505057594430E-3, 

3011 1.05464603945949983183E-2, 

3012 -2.37374148058994688156E-2, 

3013 4.93052842396707084878E-2, 

3014 -9.49010970480476444210E-2, 

3015 1.71620901522208775349E-1, 

3016 -3.04682672343198398683E-1, 

3017 6.76795274409476084995E-1 

3018 ] 

3019 

3020_i0B = [ 

3021 -7.23318048787475395456E-18, 

3022 -4.83050448594418207126E-18, 

3023 4.46562142029675999901E-17, 

3024 3.46122286769746109310E-17, 

3025 -2.82762398051658348494E-16, 

3026 -3.42548561967721913462E-16, 

3027 1.77256013305652638360E-15, 

3028 3.81168066935262242075E-15, 

3029 -9.55484669882830764870E-15, 

3030 -4.15056934728722208663E-14, 

3031 1.54008621752140982691E-14, 

3032 3.85277838274214270114E-13, 

3033 7.18012445138366623367E-13, 

3034 -1.79417853150680611778E-12, 

3035 -1.32158118404477131188E-11, 

3036 -3.14991652796324136454E-11, 

3037 1.18891471078464383424E-11, 

3038 4.94060238822496958910E-10, 

3039 3.39623202570838634515E-9, 

3040 2.26666899049817806459E-8, 

3041 2.04891858946906374183E-7, 

3042 2.89137052083475648297E-6, 

3043 6.88975834691682398426E-5, 

3044 3.36911647825569408990E-3, 

3045 8.04490411014108831608E-1 

3046 ] 

3047 

3048 

3049def _chbevl(x, vals): 

3050 b0 = vals[0] 

3051 b1 = 0.0 

3052 

3053 for i in range(1, len(vals)): 

3054 b2 = b1 

3055 b1 = b0 

3056 b0 = x*b1 - b2 + vals[i] 

3057 

3058 return 0.5*(b0 - b2) 

3059 

3060 

3061def _i0_1(x): 

3062 return exp(x) * _chbevl(x/2.0-2, _i0A) 

3063 

3064 

3065def _i0_2(x): 

3066 return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) 

3067 

3068 

3069def _i0_dispatcher(x): 

3070 return (x,) 

3071 

3072 

3073@array_function_dispatch(_i0_dispatcher) 

3074def i0(x): 

3075 """ 

3076 Modified Bessel function of the first kind, order 0. 

3077 

3078 Usually denoted :math:`I_0`. This function does broadcast, but will *not* 

3079 "up-cast" int dtype arguments unless accompanied by at least one float or 

3080 complex dtype argument (see Raises below). 

3081 

3082 Parameters 

3083 ---------- 

3084 x : array_like, dtype float or complex 

3085 Argument of the Bessel function. 

3086 

3087 Returns 

3088 ------- 

3089 out : ndarray, shape = x.shape, dtype = x.dtype 

3090 The modified Bessel function evaluated at each of the elements of `x`. 

3091 

3092 Raises 

3093 ------ 

3094 TypeError: array cannot be safely cast to required type 

3095 If argument consists exclusively of int dtypes. 

3096 

3097 See Also 

3098 -------- 

3099 scipy.special.i0, scipy.special.iv, scipy.special.ive 

3100 

3101 Notes 

3102 ----- 

3103 The scipy implementation is recommended over this function: it is a 

3104 proper ufunc written in C, and more than an order of magnitude faster. 

3105 

3106 We use the algorithm published by Clenshaw [1]_ and referenced by 

3107 Abramowitz and Stegun [2]_, for which the function domain is 

3108 partitioned into the two intervals [0,8] and (8,inf), and Chebyshev 

3109 polynomial expansions are employed in each interval. Relative error on 

3110 the domain [0,30] using IEEE arithmetic is documented [3]_ as having a 

3111 peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). 

3112 

3113 References 

3114 ---------- 

3115 .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in 

3116 *National Physical Laboratory Mathematical Tables*, vol. 5, London: 

3117 Her Majesty's Stationery Office, 1962. 

3118 .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical 

3119 Functions*, 10th printing, New York: Dover, 1964, pp. 379. 

3120 http://www.math.sfu.ca/~cbm/aands/page_379.htm 

3121 .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html 

3122 

3123 Examples 

3124 -------- 

3125 >>> np.i0(0.) 

3126 array(1.0) # may vary 

3127 >>> np.i0([0., 1. + 2j]) 

3128 array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary 

3129 

3130 """ 

3131 x = np.asanyarray(x) 

3132 x = np.abs(x) 

3133 return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) 

3134 

3135## End of cephes code for i0 

3136 

3137 

3138@set_module('numpy') 

3139def kaiser(M, beta): 

3140 """ 

3141 Return the Kaiser window. 

3142 

3143 The Kaiser window is a taper formed by using a Bessel function. 

3144 

3145 Parameters 

3146 ---------- 

3147 M : int 

3148 Number of points in the output window. If zero or less, an 

3149 empty array is returned. 

3150 beta : float 

3151 Shape parameter for window. 

3152 

3153 Returns 

3154 ------- 

3155 out : array 

3156 The window, with the maximum value normalized to one (the value 

3157 one appears only if the number of samples is odd). 

3158 

3159 See Also 

3160 -------- 

3161 bartlett, blackman, hamming, hanning 

3162 

3163 Notes 

3164 ----- 

3165 The Kaiser window is defined as 

3166 

3167 .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} 

3168 \\right)/I_0(\\beta) 

3169 

3170 with 

3171 

3172 .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, 

3173 

3174 where :math:`I_0` is the modified zeroth-order Bessel function. 

3175 

3176 The Kaiser was named for Jim Kaiser, who discovered a simple 

3177 approximation to the DPSS window based on Bessel functions. The Kaiser 

3178 window is a very good approximation to the Digital Prolate Spheroidal 

3179 Sequence, or Slepian window, which is the transform which maximizes the 

3180 energy in the main lobe of the window relative to total energy. 

3181 

3182 The Kaiser can approximate many other windows by varying the beta 

3183 parameter. 

3184 

3185 ==== ======================= 

3186 beta Window shape 

3187 ==== ======================= 

3188 0 Rectangular 

3189 5 Similar to a Hamming 

3190 6 Similar to a Hanning 

3191 8.6 Similar to a Blackman 

3192 ==== ======================= 

3193 

3194 A beta value of 14 is probably a good starting point. Note that as beta 

3195 gets large, the window narrows, and so the number of samples needs to be 

3196 large enough to sample the increasingly narrow spike, otherwise NaNs will 

3197 get returned. 

3198 

3199 Most references to the Kaiser window come from the signal processing 

3200 literature, where it is used as one of many windowing functions for 

3201 smoothing values. It is also known as an apodization (which means 

3202 "removing the foot", i.e. smoothing discontinuities at the beginning 

3203 and end of the sampled signal) or tapering function. 

3204 

3205 References 

3206 ---------- 

3207 .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by 

3208 digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. 

3209 John Wiley and Sons, New York, (1966). 

3210 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The 

3211 University of Alberta Press, 1975, pp. 177-178. 

3212 .. [3] Wikipedia, "Window function", 

3213 https://en.wikipedia.org/wiki/Window_function 

3214 

3215 Examples 

3216 -------- 

3217 >>> import matplotlib.pyplot as plt 

3218 >>> np.kaiser(12, 14) 

3219 array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary 

3220 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 

3221 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 

3222 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) 

3223 

3224 

3225 Plot the window and the frequency response: 

3226 

3227 >>> from numpy.fft import fft, fftshift 

3228 >>> window = np.kaiser(51, 14) 

3229 >>> plt.plot(window) 

3230 [<matplotlib.lines.Line2D object at 0x...>] 

3231 >>> plt.title("Kaiser window") 

3232 Text(0.5, 1.0, 'Kaiser window') 

3233 >>> plt.ylabel("Amplitude") 

3234 Text(0, 0.5, 'Amplitude') 

3235 >>> plt.xlabel("Sample") 

3236 Text(0.5, 0, 'Sample') 

3237 >>> plt.show() 

3238 

3239 >>> plt.figure() 

3240 <Figure size 640x480 with 0 Axes> 

3241 >>> A = fft(window, 2048) / 25.5 

3242 >>> mag = np.abs(fftshift(A)) 

3243 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

3244 >>> response = 20 * np.log10(mag) 

3245 >>> response = np.clip(response, -100, 100) 

3246 >>> plt.plot(freq, response) 

3247 [<matplotlib.lines.Line2D object at 0x...>] 

3248 >>> plt.title("Frequency response of Kaiser window") 

3249 Text(0.5, 1.0, 'Frequency response of Kaiser window') 

3250 >>> plt.ylabel("Magnitude [dB]") 

3251 Text(0, 0.5, 'Magnitude [dB]') 

3252 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

3253 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

3254 >>> plt.axis('tight') 

3255 (-0.5, 0.5, -100.0, ...) # may vary 

3256 >>> plt.show() 

3257 

3258 """ 

3259 from numpy.dual import i0 

3260 if M == 1: 

3261 return np.array([1.]) 

3262 n = arange(0, M) 

3263 alpha = (M-1)/2.0 

3264 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) 

3265 

3266 

3267def _sinc_dispatcher(x): 

3268 return (x,) 

3269 

3270 

3271@array_function_dispatch(_sinc_dispatcher) 

3272def sinc(x): 

3273 """ 

3274 Return the sinc function. 

3275 

3276 The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. 

3277 

3278 Parameters 

3279 ---------- 

3280 x : ndarray 

3281 Array (possibly multi-dimensional) of values for which to to 

3282 calculate ``sinc(x)``. 

3283 

3284 Returns 

3285 ------- 

3286 out : ndarray 

3287 ``sinc(x)``, which has the same shape as the input. 

3288 

3289 Notes 

3290 ----- 

3291 ``sinc(0)`` is the limit value 1. 

3292 

3293 The name sinc is short for "sine cardinal" or "sinus cardinalis". 

3294 

3295 The sinc function is used in various signal processing applications, 

3296 including in anti-aliasing, in the construction of a Lanczos resampling 

3297 filter, and in interpolation. 

3298 

3299 For bandlimited interpolation of discrete-time signals, the ideal 

3300 interpolation kernel is proportional to the sinc function. 

3301 

3302 References 

3303 ---------- 

3304 .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web 

3305 Resource. http://mathworld.wolfram.com/SincFunction.html 

3306 .. [2] Wikipedia, "Sinc function", 

3307 https://en.wikipedia.org/wiki/Sinc_function 

3308 

3309 Examples 

3310 -------- 

3311 >>> import matplotlib.pyplot as plt 

3312 >>> x = np.linspace(-4, 4, 41) 

3313 >>> np.sinc(x) 

3314 array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary 

3315 -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 

3316 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 

3317 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, 

3318 -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 

3319 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 

3320 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 

3321 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 

3322 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, 

3323 -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, 

3324 -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 

3325 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, 

3326 -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, 

3327 -4.92362781e-02, -3.89804309e-17]) 

3328 

3329 >>> plt.plot(x, np.sinc(x)) 

3330 [<matplotlib.lines.Line2D object at 0x...>] 

3331 >>> plt.title("Sinc Function") 

3332 Text(0.5, 1.0, 'Sinc Function') 

3333 >>> plt.ylabel("Amplitude") 

3334 Text(0, 0.5, 'Amplitude') 

3335 >>> plt.xlabel("X") 

3336 Text(0.5, 0, 'X') 

3337 >>> plt.show() 

3338 

3339 """ 

3340 x = np.asanyarray(x) 

3341 y = pi * where(x == 0, 1.0e-20, x) 

3342 return sin(y)/y 

3343 

3344 

3345def _msort_dispatcher(a): 

3346 return (a,) 

3347 

3348 

3349@array_function_dispatch(_msort_dispatcher) 

3350def msort(a): 

3351 """ 

3352 Return a copy of an array sorted along the first axis. 

3353 

3354 Parameters 

3355 ---------- 

3356 a : array_like 

3357 Array to be sorted. 

3358 

3359 Returns 

3360 ------- 

3361 sorted_array : ndarray 

3362 Array of the same type and shape as `a`. 

3363 

3364 See Also 

3365 -------- 

3366 sort 

3367 

3368 Notes 

3369 ----- 

3370 ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. 

3371 

3372 """ 

3373 b = array(a, subok=True, copy=True) 

3374 b.sort(0) 

3375 return b 

3376 

3377 

3378def _ureduce(a, func, **kwargs): 

3379 """ 

3380 Internal Function. 

3381 Call `func` with `a` as first argument swapping the axes to use extended 

3382 axis on functions that don't support it natively. 

3383 

3384 Returns result and a.shape with axis dims set to 1. 

3385 

3386 Parameters 

3387 ---------- 

3388 a : array_like 

3389 Input array or object that can be converted to an array. 

3390 func : callable 

3391 Reduction function capable of receiving a single axis argument. 

3392 It is called with `a` as first argument followed by `kwargs`. 

3393 kwargs : keyword arguments 

3394 additional keyword arguments to pass to `func`. 

3395 

3396 Returns 

3397 ------- 

3398 result : tuple 

3399 Result of func(a, **kwargs) and a.shape with axis dims set to 1 

3400 which can be used to reshape the result to the same shape a ufunc with 

3401 keepdims=True would produce. 

3402 

3403 """ 

3404 a = np.asanyarray(a) 

3405 axis = kwargs.get('axis', None) 

3406 if axis is not None: 

3407 keepdim = list(a.shape) 

3408 nd = a.ndim 

3409 axis = _nx.normalize_axis_tuple(axis, nd) 

3410 

3411 for ax in axis: 

3412 keepdim[ax] = 1 

3413 

3414 if len(axis) == 1: 

3415 kwargs['axis'] = axis[0] 

3416 else: 

3417 keep = set(range(nd)) - set(axis) 

3418 nkeep = len(keep) 

3419 # swap axis that should not be reduced to front 

3420 for i, s in enumerate(sorted(keep)): 

3421 a = a.swapaxes(i, s) 

3422 # merge reduced axis 

3423 a = a.reshape(a.shape[:nkeep] + (-1,)) 

3424 kwargs['axis'] = -1 

3425 keepdim = tuple(keepdim) 

3426 else: 

3427 keepdim = (1,) * a.ndim 

3428 

3429 r = func(a, **kwargs) 

3430 return r, keepdim 

3431 

3432 

3433def _median_dispatcher( 

3434 a, axis=None, out=None, overwrite_input=None, keepdims=None): 

3435 return (a, out) 

3436 

3437 

3438@array_function_dispatch(_median_dispatcher) 

3439def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): 

3440 """ 

3441 Compute the median along the specified axis. 

3442 

3443 Returns the median of the array elements. 

3444 

3445 Parameters 

3446 ---------- 

3447 a : array_like 

3448 Input array or object that can be converted to an array. 

3449 axis : {int, sequence of int, None}, optional 

3450 Axis or axes along which the medians are computed. The default 

3451 is to compute the median along a flattened version of the array. 

3452 A sequence of axes is supported since version 1.9.0. 

3453 out : ndarray, optional 

3454 Alternative output array in which to place the result. It must 

3455 have the same shape and buffer length as the expected output, 

3456 but the type (of the output) will be cast if necessary. 

3457 overwrite_input : bool, optional 

3458 If True, then allow use of memory of input array `a` for 

3459 calculations. The input array will be modified by the call to 

3460 `median`. This will save memory when you do not need to preserve 

3461 the contents of the input array. Treat the input as undefined, 

3462 but it will probably be fully or partially sorted. Default is 

3463 False. If `overwrite_input` is ``True`` and `a` is not already an 

3464 `ndarray`, an error will be raised. 

3465 keepdims : bool, optional 

3466 If this is set to True, the axes which are reduced are left 

3467 in the result as dimensions with size one. With this option, 

3468 the result will broadcast correctly against the original `arr`. 

3469 

3470 .. versionadded:: 1.9.0 

3471 

3472 Returns 

3473 ------- 

3474 median : ndarray 

3475 A new array holding the result. If the input contains integers 

3476 or floats smaller than ``float64``, then the output data-type is 

3477 ``np.float64``. Otherwise, the data-type of the output is the 

3478 same as that of the input. If `out` is specified, that array is 

3479 returned instead. 

3480 

3481 See Also 

3482 -------- 

3483 mean, percentile 

3484 

3485 Notes 

3486 ----- 

3487 Given a vector ``V`` of length ``N``, the median of ``V`` is the 

3488 middle value of a sorted copy of ``V``, ``V_sorted`` - i 

3489 e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the 

3490 two middle values of ``V_sorted`` when ``N`` is even. 

3491 

3492 Examples 

3493 -------- 

3494 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

3495 >>> a 

3496 array([[10, 7, 4], 

3497 [ 3, 2, 1]]) 

3498 >>> np.median(a) 

3499 3.5 

3500 >>> np.median(a, axis=0) 

3501 array([6.5, 4.5, 2.5]) 

3502 >>> np.median(a, axis=1) 

3503 array([7., 2.]) 

3504 >>> m = np.median(a, axis=0) 

3505 >>> out = np.zeros_like(m) 

3506 >>> np.median(a, axis=0, out=m) 

3507 array([6.5, 4.5, 2.5]) 

3508 >>> m 

3509 array([6.5, 4.5, 2.5]) 

3510 >>> b = a.copy() 

3511 >>> np.median(b, axis=1, overwrite_input=True) 

3512 array([7., 2.]) 

3513 >>> assert not np.all(a==b) 

3514 >>> b = a.copy() 

3515 >>> np.median(b, axis=None, overwrite_input=True) 

3516 3.5 

3517 >>> assert not np.all(a==b) 

3518 

3519 """ 

3520 r, k = _ureduce(a, func=_median, axis=axis, out=out, 

3521 overwrite_input=overwrite_input) 

3522 if keepdims: 

3523 return r.reshape(k) 

3524 else: 

3525 return r 

3526 

3527 

3528def _median(a, axis=None, out=None, overwrite_input=False): 

3529 # can't be reasonably be implemented in terms of percentile as we have to 

3530 # call mean to not break astropy 

3531 a = np.asanyarray(a) 

3532 

3533 # Set the partition indexes 

3534 if axis is None: 

3535 sz = a.size 

3536 else: 

3537 sz = a.shape[axis] 

3538 if sz % 2 == 0: 

3539 szh = sz // 2 

3540 kth = [szh - 1, szh] 

3541 else: 

3542 kth = [(sz - 1) // 2] 

3543 # Check if the array contains any nan's 

3544 if np.issubdtype(a.dtype, np.inexact): 

3545 kth.append(-1) 

3546 

3547 if overwrite_input: 

3548 if axis is None: 

3549 part = a.ravel() 

3550 part.partition(kth) 

3551 else: 

3552 a.partition(kth, axis=axis) 

3553 part = a 

3554 else: 

3555 part = partition(a, kth, axis=axis) 

3556 

3557 if part.shape == (): 

3558 # make 0-D arrays work 

3559 return part.item() 

3560 if axis is None: 

3561 axis = 0 

3562 

3563 indexer = [slice(None)] * part.ndim 

3564 index = part.shape[axis] // 2 

3565 if part.shape[axis] % 2 == 1: 

3566 # index with slice to allow mean (below) to work 

3567 indexer[axis] = slice(index, index+1) 

3568 else: 

3569 indexer[axis] = slice(index-1, index+1) 

3570 indexer = tuple(indexer) 

3571 

3572 # Check if the array contains any nan's 

3573 if np.issubdtype(a.dtype, np.inexact) and sz > 0: 

3574 # warn and return nans like mean would 

3575 rout = mean(part[indexer], axis=axis, out=out) 

3576 return np.lib.utils._median_nancheck(part, rout, axis, out) 

3577 else: 

3578 # if there are no nans 

3579 # Use mean in odd and even case to coerce data type 

3580 # and check, use out array. 

3581 return mean(part[indexer], axis=axis, out=out) 

3582 

3583 

3584def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, 

3585 interpolation=None, keepdims=None): 

3586 return (a, q, out) 

3587 

3588 

3589@array_function_dispatch(_percentile_dispatcher) 

3590def percentile(a, q, axis=None, out=None, 

3591 overwrite_input=False, interpolation='linear', keepdims=False): 

3592 """ 

3593 Compute the q-th percentile of the data along the specified axis. 

3594 

3595 Returns the q-th percentile(s) of the array elements. 

3596 

3597 Parameters 

3598 ---------- 

3599 a : array_like 

3600 Input array or object that can be converted to an array. 

3601 q : array_like of float 

3602 Percentile or sequence of percentiles to compute, which must be between 

3603 0 and 100 inclusive. 

3604 axis : {int, tuple of int, None}, optional 

3605 Axis or axes along which the percentiles are computed. The 

3606 default is to compute the percentile(s) along a flattened 

3607 version of the array. 

3608 

3609 .. versionchanged:: 1.9.0 

3610 A tuple of axes is supported 

3611 out : ndarray, optional 

3612 Alternative output array in which to place the result. It must 

3613 have the same shape and buffer length as the expected output, 

3614 but the type (of the output) will be cast if necessary. 

3615 overwrite_input : bool, optional 

3616 If True, then allow the input array `a` to be modified by intermediate 

3617 calculations, to save memory. In this case, the contents of the input 

3618 `a` after this function completes is undefined. 

3619 

3620 interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} 

3621 This optional parameter specifies the interpolation method to 

3622 use when the desired percentile lies between two data points 

3623 ``i < j``: 

3624 

3625 * 'linear': ``i + (j - i) * fraction``, where ``fraction`` 

3626 is the fractional part of the index surrounded by ``i`` 

3627 and ``j``. 

3628 * 'lower': ``i``. 

3629 * 'higher': ``j``. 

3630 * 'nearest': ``i`` or ``j``, whichever is nearest. 

3631 * 'midpoint': ``(i + j) / 2``. 

3632 

3633 .. versionadded:: 1.9.0 

3634 keepdims : bool, optional 

3635 If this is set to True, the axes which are reduced are left in 

3636 the result as dimensions with size one. With this option, the 

3637 result will broadcast correctly against the original array `a`. 

3638 

3639 .. versionadded:: 1.9.0 

3640 

3641 Returns 

3642 ------- 

3643 percentile : scalar or ndarray 

3644 If `q` is a single percentile and `axis=None`, then the result 

3645 is a scalar. If multiple percentiles are given, first axis of 

3646 the result corresponds to the percentiles. The other axes are 

3647 the axes that remain after the reduction of `a`. If the input 

3648 contains integers or floats smaller than ``float64``, the output 

3649 data-type is ``float64``. Otherwise, the output data-type is the 

3650 same as that of the input. If `out` is specified, that array is 

3651 returned instead. 

3652 

3653 See Also 

3654 -------- 

3655 mean 

3656 median : equivalent to ``percentile(..., 50)`` 

3657 nanpercentile 

3658 quantile : equivalent to percentile, except with q in the range [0, 1]. 

3659 

3660 Notes 

3661 ----- 

3662 Given a vector ``V`` of length ``N``, the q-th percentile of 

3663 ``V`` is the value ``q/100`` of the way from the minimum to the 

3664 maximum in a sorted copy of ``V``. The values and distances of 

3665 the two nearest neighbors as well as the `interpolation` parameter 

3666 will determine the percentile if the normalized ranking does not 

3667 match the location of ``q`` exactly. This function is the same as 

3668 the median if ``q=50``, the same as the minimum if ``q=0`` and the 

3669 same as the maximum if ``q=100``. 

3670 

3671 Examples 

3672 -------- 

3673 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

3674 >>> a 

3675 array([[10, 7, 4], 

3676 [ 3, 2, 1]]) 

3677 >>> np.percentile(a, 50) 

3678 3.5 

3679 >>> np.percentile(a, 50, axis=0) 

3680 array([6.5, 4.5, 2.5]) 

3681 >>> np.percentile(a, 50, axis=1) 

3682 array([7., 2.]) 

3683 >>> np.percentile(a, 50, axis=1, keepdims=True) 

3684 array([[7.], 

3685 [2.]]) 

3686 

3687 >>> m = np.percentile(a, 50, axis=0) 

3688 >>> out = np.zeros_like(m) 

3689 >>> np.percentile(a, 50, axis=0, out=out) 

3690 array([6.5, 4.5, 2.5]) 

3691 >>> m 

3692 array([6.5, 4.5, 2.5]) 

3693 

3694 >>> b = a.copy() 

3695 >>> np.percentile(b, 50, axis=1, overwrite_input=True) 

3696 array([7., 2.]) 

3697 >>> assert not np.all(a == b) 

3698 

3699 The different types of interpolation can be visualized graphically: 

3700 

3701 .. plot:: 

3702 

3703 import matplotlib.pyplot as plt 

3704 

3705 a = np.arange(4) 

3706 p = np.linspace(0, 100, 6001) 

3707 ax = plt.gca() 

3708 lines = [ 

3709 ('linear', None), 

3710 ('higher', '--'), 

3711 ('lower', '--'), 

3712 ('nearest', '-.'), 

3713 ('midpoint', '-.'), 

3714 ] 

3715 for interpolation, style in lines: 

3716 ax.plot( 

3717 p, np.percentile(a, p, interpolation=interpolation), 

3718 label=interpolation, linestyle=style) 

3719 ax.set( 

3720 title='Interpolation methods for list: ' + str(a), 

3721 xlabel='Percentile', 

3722 ylabel='List item returned', 

3723 yticks=a) 

3724 ax.legend() 

3725 plt.show() 

3726 

3727 """ 

3728 q = np.true_divide(q, 100) 

3729 q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) 

3730 if not _quantile_is_valid(q): 

3731 raise ValueError("Percentiles must be in the range [0, 100]") 

3732 return _quantile_unchecked( 

3733 a, q, axis, out, overwrite_input, interpolation, keepdims) 

3734 

3735 

3736def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, 

3737 interpolation=None, keepdims=None): 

3738 return (a, q, out) 

3739 

3740 

3741@array_function_dispatch(_quantile_dispatcher) 

3742def quantile(a, q, axis=None, out=None, 

3743 overwrite_input=False, interpolation='linear', keepdims=False): 

3744 """ 

3745 Compute the q-th quantile of the data along the specified axis. 

3746 

3747 .. versionadded:: 1.15.0 

3748 

3749 Parameters 

3750 ---------- 

3751 a : array_like 

3752 Input array or object that can be converted to an array. 

3753 q : array_like of float 

3754 Quantile or sequence of quantiles to compute, which must be between 

3755 0 and 1 inclusive. 

3756 axis : {int, tuple of int, None}, optional 

3757 Axis or axes along which the quantiles are computed. The 

3758 default is to compute the quantile(s) along a flattened 

3759 version of the array. 

3760 out : ndarray, optional 

3761 Alternative output array in which to place the result. It must 

3762 have the same shape and buffer length as the expected output, 

3763 but the type (of the output) will be cast if necessary. 

3764 overwrite_input : bool, optional 

3765 If True, then allow the input array `a` to be modified by intermediate 

3766 calculations, to save memory. In this case, the contents of the input 

3767 `a` after this function completes is undefined. 

3768 interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} 

3769 This optional parameter specifies the interpolation method to 

3770 use when the desired quantile lies between two data points 

3771 ``i < j``: 

3772 

3773 * linear: ``i + (j - i) * fraction``, where ``fraction`` 

3774 is the fractional part of the index surrounded by ``i`` 

3775 and ``j``. 

3776 * lower: ``i``. 

3777 * higher: ``j``. 

3778 * nearest: ``i`` or ``j``, whichever is nearest. 

3779 * midpoint: ``(i + j) / 2``. 

3780 keepdims : bool, optional 

3781 If this is set to True, the axes which are reduced are left in 

3782 the result as dimensions with size one. With this option, the 

3783 result will broadcast correctly against the original array `a`. 

3784 

3785 Returns 

3786 ------- 

3787 quantile : scalar or ndarray 

3788 If `q` is a single quantile and `axis=None`, then the result 

3789 is a scalar. If multiple quantiles are given, first axis of 

3790 the result corresponds to the quantiles. The other axes are 

3791 the axes that remain after the reduction of `a`. If the input 

3792 contains integers or floats smaller than ``float64``, the output 

3793 data-type is ``float64``. Otherwise, the output data-type is the 

3794 same as that of the input. If `out` is specified, that array is 

3795 returned instead. 

3796 

3797 See Also 

3798 -------- 

3799 mean 

3800 percentile : equivalent to quantile, but with q in the range [0, 100]. 

3801 median : equivalent to ``quantile(..., 0.5)`` 

3802 nanquantile 

3803 

3804 Notes 

3805 ----- 

3806 Given a vector ``V`` of length ``N``, the q-th quantile of 

3807 ``V`` is the value ``q`` of the way from the minimum to the 

3808 maximum in a sorted copy of ``V``. The values and distances of 

3809 the two nearest neighbors as well as the `interpolation` parameter 

3810 will determine the quantile if the normalized ranking does not 

3811 match the location of ``q`` exactly. This function is the same as 

3812 the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the 

3813 same as the maximum if ``q=1.0``. 

3814 

3815 Examples 

3816 -------- 

3817 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

3818 >>> a 

3819 array([[10, 7, 4], 

3820 [ 3, 2, 1]]) 

3821 >>> np.quantile(a, 0.5) 

3822 3.5 

3823 >>> np.quantile(a, 0.5, axis=0) 

3824 array([6.5, 4.5, 2.5]) 

3825 >>> np.quantile(a, 0.5, axis=1) 

3826 array([7., 2.]) 

3827 >>> np.quantile(a, 0.5, axis=1, keepdims=True) 

3828 array([[7.], 

3829 [2.]]) 

3830 >>> m = np.quantile(a, 0.5, axis=0) 

3831 >>> out = np.zeros_like(m) 

3832 >>> np.quantile(a, 0.5, axis=0, out=out) 

3833 array([6.5, 4.5, 2.5]) 

3834 >>> m 

3835 array([6.5, 4.5, 2.5]) 

3836 >>> b = a.copy() 

3837 >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) 

3838 array([7., 2.]) 

3839 >>> assert not np.all(a == b) 

3840 """ 

3841 q = np.asanyarray(q) 

3842 if not _quantile_is_valid(q): 

3843 raise ValueError("Quantiles must be in the range [0, 1]") 

3844 return _quantile_unchecked( 

3845 a, q, axis, out, overwrite_input, interpolation, keepdims) 

3846 

3847 

3848def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, 

3849 interpolation='linear', keepdims=False): 

3850 """Assumes that q is in [0, 1], and is an ndarray""" 

3851 r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out, 

3852 overwrite_input=overwrite_input, 

3853 interpolation=interpolation) 

3854 if keepdims: 

3855 return r.reshape(q.shape + k) 

3856 else: 

3857 return r 

3858 

3859 

3860def _quantile_is_valid(q): 

3861 # avoid expensive reductions, relevant for arrays with < O(1000) elements 

3862 if q.ndim == 1 and q.size < 10: 

3863 for i in range(q.size): 

3864 if q[i] < 0.0 or q[i] > 1.0: 

3865 return False 

3866 else: 

3867 # faster than any() 

3868 if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0): 

3869 return False 

3870 return True 

3871 

3872 

3873def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, 

3874 interpolation='linear', keepdims=False): 

3875 a = asarray(a) 

3876 if q.ndim == 0: 

3877 # Do not allow 0-d arrays because following code fails for scalar 

3878 zerod = True 

3879 q = q[None] 

3880 else: 

3881 zerod = False 

3882 

3883 # prepare a for partitioning 

3884 if overwrite_input: 

3885 if axis is None: 

3886 ap = a.ravel() 

3887 else: 

3888 ap = a 

3889 else: 

3890 if axis is None: 

3891 ap = a.flatten() 

3892 else: 

3893 ap = a.copy() 

3894 

3895 if axis is None: 

3896 axis = 0 

3897 

3898 Nx = ap.shape[axis] 

3899 indices = q * (Nx - 1) 

3900 

3901 # round fractional indices according to interpolation method 

3902 if interpolation == 'lower': 

3903 indices = floor(indices).astype(intp) 

3904 elif interpolation == 'higher': 

3905 indices = ceil(indices).astype(intp) 

3906 elif interpolation == 'midpoint': 

3907 indices = 0.5 * (floor(indices) + ceil(indices)) 

3908 elif interpolation == 'nearest': 

3909 indices = around(indices).astype(intp) 

3910 elif interpolation == 'linear': 

3911 pass # keep index as fraction and interpolate 

3912 else: 

3913 raise ValueError( 

3914 "interpolation can only be 'linear', 'lower' 'higher', " 

3915 "'midpoint', or 'nearest'") 

3916 

3917 n = np.array(False, dtype=bool) # check for nan's flag 

3918 if np.issubdtype(indices.dtype, np.integer): # take the points along axis 

3919 # Check if the array contains any nan's 

3920 if np.issubdtype(a.dtype, np.inexact): 

3921 indices = concatenate((indices, [-1])) 

3922 

3923 ap.partition(indices, axis=axis) 

3924 # ensure axis with q-th is first 

3925 ap = np.moveaxis(ap, axis, 0) 

3926 axis = 0 

3927 

3928 # Check if the array contains any nan's 

3929 if np.issubdtype(a.dtype, np.inexact): 

3930 indices = indices[:-1] 

3931 n = np.isnan(ap[-1:, ...]) 

3932 

3933 if zerod: 

3934 indices = indices[0] 

3935 r = take(ap, indices, axis=axis, out=out) 

3936 

3937 else: # weight the points above and below the indices 

3938 indices_below = floor(indices).astype(intp) 

3939 indices_above = indices_below + 1 

3940 indices_above[indices_above > Nx - 1] = Nx - 1 

3941 

3942 # Check if the array contains any nan's 

3943 if np.issubdtype(a.dtype, np.inexact): 

3944 indices_above = concatenate((indices_above, [-1])) 

3945 

3946 weights_above = indices - indices_below 

3947 weights_below = 1 - weights_above 

3948 

3949 weights_shape = [1, ] * ap.ndim 

3950 weights_shape[axis] = len(indices) 

3951 weights_below.shape = weights_shape 

3952 weights_above.shape = weights_shape 

3953 

3954 ap.partition(concatenate((indices_below, indices_above)), axis=axis) 

3955 

3956 # ensure axis with q-th is first 

3957 ap = np.moveaxis(ap, axis, 0) 

3958 weights_below = np.moveaxis(weights_below, axis, 0) 

3959 weights_above = np.moveaxis(weights_above, axis, 0) 

3960 axis = 0 

3961 

3962 # Check if the array contains any nan's 

3963 if np.issubdtype(a.dtype, np.inexact): 

3964 indices_above = indices_above[:-1] 

3965 n = np.isnan(ap[-1:, ...]) 

3966 

3967 x1 = take(ap, indices_below, axis=axis) * weights_below 

3968 x2 = take(ap, indices_above, axis=axis) * weights_above 

3969 

3970 # ensure axis with q-th is first 

3971 x1 = np.moveaxis(x1, axis, 0) 

3972 x2 = np.moveaxis(x2, axis, 0) 

3973 

3974 if zerod: 

3975 x1 = x1.squeeze(0) 

3976 x2 = x2.squeeze(0) 

3977 

3978 if out is not None: 

3979 r = add(x1, x2, out=out) 

3980 else: 

3981 r = add(x1, x2) 

3982 

3983 if np.any(n): 

3984 if zerod: 

3985 if ap.ndim == 1: 

3986 if out is not None: 

3987 out[...] = a.dtype.type(np.nan) 

3988 r = out 

3989 else: 

3990 r = a.dtype.type(np.nan) 

3991 else: 

3992 r[..., n.squeeze(0)] = a.dtype.type(np.nan) 

3993 else: 

3994 if r.ndim == 1: 

3995 r[:] = a.dtype.type(np.nan) 

3996 else: 

3997 r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) 

3998 

3999 return r 

4000 

4001 

4002def _trapz_dispatcher(y, x=None, dx=None, axis=None): 

4003 return (y, x) 

4004 

4005 

4006@array_function_dispatch(_trapz_dispatcher) 

4007def trapz(y, x=None, dx=1.0, axis=-1): 

4008 """ 

4009 Integrate along the given axis using the composite trapezoidal rule. 

4010 

4011 Integrate `y` (`x`) along given axis. 

4012 

4013 Parameters 

4014 ---------- 

4015 y : array_like 

4016 Input array to integrate. 

4017 x : array_like, optional 

4018 The sample points corresponding to the `y` values. If `x` is None, 

4019 the sample points are assumed to be evenly spaced `dx` apart. The 

4020 default is None. 

4021 dx : scalar, optional 

4022 The spacing between sample points when `x` is None. The default is 1. 

4023 axis : int, optional 

4024 The axis along which to integrate. 

4025 

4026 Returns 

4027 ------- 

4028 trapz : float 

4029 Definite integral as approximated by trapezoidal rule. 

4030 

4031 See Also 

4032 -------- 

4033 sum, cumsum 

4034 

4035 Notes 

4036 ----- 

4037 Image [2]_ illustrates trapezoidal rule -- y-axis locations of points 

4038 will be taken from `y` array, by default x-axis distances between 

4039 points will be 1.0, alternatively they can be provided with `x` array 

4040 or with `dx` scalar. Return value will be equal to combined area under 

4041 the red lines. 

4042 

4043 

4044 References 

4045 ---------- 

4046 .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule 

4047 

4048 .. [2] Illustration image: 

4049 https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png 

4050 

4051 Examples 

4052 -------- 

4053 >>> np.trapz([1,2,3]) 

4054 4.0 

4055 >>> np.trapz([1,2,3], x=[4,6,8]) 

4056 8.0 

4057 >>> np.trapz([1,2,3], dx=2) 

4058 8.0 

4059 >>> a = np.arange(6).reshape(2, 3) 

4060 >>> a 

4061 array([[0, 1, 2], 

4062 [3, 4, 5]]) 

4063 >>> np.trapz(a, axis=0) 

4064 array([1.5, 2.5, 3.5]) 

4065 >>> np.trapz(a, axis=1) 

4066 array([2., 8.]) 

4067 

4068 """ 

4069 y = asanyarray(y) 

4070 if x is None: 

4071 d = dx 

4072 else: 

4073 x = asanyarray(x) 

4074 if x.ndim == 1: 

4075 d = diff(x) 

4076 # reshape to correct shape 

4077 shape = [1]*y.ndim 

4078 shape[axis] = d.shape[0] 

4079 d = d.reshape(shape) 

4080 else: 

4081 d = diff(x, axis=axis) 

4082 nd = y.ndim 

4083 slice1 = [slice(None)]*nd 

4084 slice2 = [slice(None)]*nd 

4085 slice1[axis] = slice(1, None) 

4086 slice2[axis] = slice(None, -1) 

4087 try: 

4088 ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) 

4089 except ValueError: 

4090 # Operations didn't work, cast to ndarray 

4091 d = np.asarray(d) 

4092 y = np.asarray(y) 

4093 ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) 

4094 return ret 

4095 

4096 

4097def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): 

4098 return xi 

4099 

4100 

4101# Based on scitools meshgrid 

4102@array_function_dispatch(_meshgrid_dispatcher) 

4103def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): 

4104 """ 

4105 Return coordinate matrices from coordinate vectors. 

4106 

4107 Make N-D coordinate arrays for vectorized evaluations of 

4108 N-D scalar/vector fields over N-D grids, given 

4109 one-dimensional coordinate arrays x1, x2,..., xn. 

4110 

4111 .. versionchanged:: 1.9 

4112 1-D and 0-D cases are allowed. 

4113 

4114 Parameters 

4115 ---------- 

4116 x1, x2,..., xn : array_like 

4117 1-D arrays representing the coordinates of a grid. 

4118 indexing : {'xy', 'ij'}, optional 

4119 Cartesian ('xy', default) or matrix ('ij') indexing of output. 

4120 See Notes for more details. 

4121 

4122 .. versionadded:: 1.7.0 

4123 sparse : bool, optional 

4124 If True a sparse grid is returned in order to conserve memory. 

4125 Default is False. 

4126 

4127 .. versionadded:: 1.7.0 

4128 copy : bool, optional 

4129 If False, a view into the original arrays are returned in order to 

4130 conserve memory. Default is True. Please note that 

4131 ``sparse=False, copy=False`` will likely return non-contiguous 

4132 arrays. Furthermore, more than one element of a broadcast array 

4133 may refer to a single memory location. If you need to write to the 

4134 arrays, make copies first. 

4135 

4136 .. versionadded:: 1.7.0 

4137 

4138 Returns 

4139 ------- 

4140 X1, X2,..., XN : ndarray 

4141 For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , 

4142 return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' 

4143 or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' 

4144 with the elements of `xi` repeated to fill the matrix along 

4145 the first dimension for `x1`, the second for `x2` and so on. 

4146 

4147 Notes 

4148 ----- 

4149 This function supports both indexing conventions through the indexing 

4150 keyword argument. Giving the string 'ij' returns a meshgrid with 

4151 matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. 

4152 In the 2-D case with inputs of length M and N, the outputs are of shape 

4153 (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case 

4154 with inputs of length M, N and P, outputs are of shape (N, M, P) for 

4155 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is 

4156 illustrated by the following code snippet:: 

4157 

4158 xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') 

4159 for i in range(nx): 

4160 for j in range(ny): 

4161 # treat xv[i,j], yv[i,j] 

4162 

4163 xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') 

4164 for i in range(nx): 

4165 for j in range(ny): 

4166 # treat xv[j,i], yv[j,i] 

4167 

4168 In the 1-D and 0-D case, the indexing and sparse keywords have no effect. 

4169 

4170 See Also 

4171 -------- 

4172 index_tricks.mgrid : Construct a multi-dimensional "meshgrid" 

4173 using indexing notation. 

4174 index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" 

4175 using indexing notation. 

4176 

4177 Examples 

4178 -------- 

4179 >>> nx, ny = (3, 2) 

4180 >>> x = np.linspace(0, 1, nx) 

4181 >>> y = np.linspace(0, 1, ny) 

4182 >>> xv, yv = np.meshgrid(x, y) 

4183 >>> xv 

4184 array([[0. , 0.5, 1. ], 

4185 [0. , 0.5, 1. ]]) 

4186 >>> yv 

4187 array([[0., 0., 0.], 

4188 [1., 1., 1.]]) 

4189 >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays 

4190 >>> xv 

4191 array([[0. , 0.5, 1. ]]) 

4192 >>> yv 

4193 array([[0.], 

4194 [1.]]) 

4195 

4196 `meshgrid` is very useful to evaluate functions on a grid. 

4197 

4198 >>> import matplotlib.pyplot as plt 

4199 >>> x = np.arange(-5, 5, 0.1) 

4200 >>> y = np.arange(-5, 5, 0.1) 

4201 >>> xx, yy = np.meshgrid(x, y, sparse=True) 

4202 >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) 

4203 >>> h = plt.contourf(x,y,z) 

4204 >>> plt.show() 

4205 

4206 """ 

4207 ndim = len(xi) 

4208 

4209 if indexing not in ['xy', 'ij']: 

4210 raise ValueError( 

4211 "Valid values for `indexing` are 'xy' and 'ij'.") 

4212 

4213 s0 = (1,) * ndim 

4214 output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) 

4215 for i, x in enumerate(xi)] 

4216 

4217 if indexing == 'xy' and ndim > 1: 

4218 # switch first and second axis 

4219 output[0].shape = (1, -1) + s0[2:] 

4220 output[1].shape = (-1, 1) + s0[2:] 

4221 

4222 if not sparse: 

4223 # Return the full N-D matrix (not only the 1-D vector) 

4224 output = np.broadcast_arrays(*output, subok=True) 

4225 

4226 if copy: 

4227 output = [x.copy() for x in output] 

4228 

4229 return output 

4230 

4231 

4232def _delete_dispatcher(arr, obj, axis=None): 

4233 return (arr, obj) 

4234 

4235 

4236@array_function_dispatch(_delete_dispatcher) 

4237def delete(arr, obj, axis=None): 

4238 """ 

4239 Return a new array with sub-arrays along an axis deleted. For a one 

4240 dimensional array, this returns those entries not returned by 

4241 `arr[obj]`. 

4242 

4243 Parameters 

4244 ---------- 

4245 arr : array_like 

4246 Input array. 

4247 obj : slice, int or array of ints 

4248 Indicate indices of sub-arrays to remove along the specified axis. 

4249 

4250 .. versionchanged:: 1.19.0 

4251 Boolean indices are now treated as a mask of elements to remove, 

4252 rather than being cast to the integers 0 and 1. 

4253 

4254 axis : int, optional 

4255 The axis along which to delete the subarray defined by `obj`. 

4256 If `axis` is None, `obj` is applied to the flattened array. 

4257 

4258 Returns 

4259 ------- 

4260 out : ndarray 

4261 A copy of `arr` with the elements specified by `obj` removed. Note 

4262 that `delete` does not occur in-place. If `axis` is None, `out` is 

4263 a flattened array. 

4264 

4265 See Also 

4266 -------- 

4267 insert : Insert elements into an array. 

4268 append : Append elements at the end of an array. 

4269 

4270 Notes 

4271 ----- 

4272 Often it is preferable to use a boolean mask. For example: 

4273 

4274 >>> arr = np.arange(12) + 1 

4275 >>> mask = np.ones(len(arr), dtype=bool) 

4276 >>> mask[[0,2,4]] = False 

4277 >>> result = arr[mask,...] 

4278 

4279 Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further 

4280 use of `mask`. 

4281 

4282 Examples 

4283 -------- 

4284 >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) 

4285 >>> arr 

4286 array([[ 1, 2, 3, 4], 

4287 [ 5, 6, 7, 8], 

4288 [ 9, 10, 11, 12]]) 

4289 >>> np.delete(arr, 1, 0) 

4290 array([[ 1, 2, 3, 4], 

4291 [ 9, 10, 11, 12]]) 

4292 

4293 >>> np.delete(arr, np.s_[::2], 1) 

4294 array([[ 2, 4], 

4295 [ 6, 8], 

4296 [10, 12]]) 

4297 >>> np.delete(arr, [1,3,5], None) 

4298 array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) 

4299 

4300 """ 

4301 wrap = None 

4302 if type(arr) is not ndarray: 

4303 try: 

4304 wrap = arr.__array_wrap__ 

4305 except AttributeError: 

4306 pass 

4307 

4308 arr = asarray(arr) 

4309 ndim = arr.ndim 

4310 arrorder = 'F' if arr.flags.fnc else 'C' 

4311 if axis is None: 

4312 if ndim != 1: 

4313 arr = arr.ravel() 

4314 # needed for np.matrix, which is still not 1d after being ravelled 

4315 ndim = arr.ndim 

4316 axis = ndim - 1 

4317 else: 

4318 axis = normalize_axis_index(axis, ndim) 

4319 

4320 slobj = [slice(None)]*ndim 

4321 N = arr.shape[axis] 

4322 newshape = list(arr.shape) 

4323 

4324 if isinstance(obj, slice): 

4325 start, stop, step = obj.indices(N) 

4326 xr = range(start, stop, step) 

4327 numtodel = len(xr) 

4328 

4329 if numtodel <= 0: 

4330 if wrap: 

4331 return wrap(arr.copy(order=arrorder)) 

4332 else: 

4333 return arr.copy(order=arrorder) 

4334 

4335 # Invert if step is negative: 

4336 if step < 0: 

4337 step = -step 

4338 start = xr[-1] 

4339 stop = xr[0] + 1 

4340 

4341 newshape[axis] -= numtodel 

4342 new = empty(newshape, arr.dtype, arrorder) 

4343 # copy initial chunk 

4344 if start == 0: 

4345 pass 

4346 else: 

4347 slobj[axis] = slice(None, start) 

4348 new[tuple(slobj)] = arr[tuple(slobj)] 

4349 # copy end chunk 

4350 if stop == N: 

4351 pass 

4352 else: 

4353 slobj[axis] = slice(stop-numtodel, None) 

4354 slobj2 = [slice(None)]*ndim 

4355 slobj2[axis] = slice(stop, None) 

4356 new[tuple(slobj)] = arr[tuple(slobj2)] 

4357 # copy middle pieces 

4358 if step == 1: 

4359 pass 

4360 else: # use array indexing. 

4361 keep = ones(stop-start, dtype=bool) 

4362 keep[:stop-start:step] = False 

4363 slobj[axis] = slice(start, stop-numtodel) 

4364 slobj2 = [slice(None)]*ndim 

4365 slobj2[axis] = slice(start, stop) 

4366 arr = arr[tuple(slobj2)] 

4367 slobj2[axis] = keep 

4368 new[tuple(slobj)] = arr[tuple(slobj2)] 

4369 if wrap: 

4370 return wrap(new) 

4371 else: 

4372 return new 

4373 

4374 if isinstance(obj, (int, integer)) and not isinstance(obj, bool): 

4375 # optimization for a single value 

4376 if (obj < -N or obj >= N): 

4377 raise IndexError( 

4378 "index %i is out of bounds for axis %i with " 

4379 "size %i" % (obj, axis, N)) 

4380 if (obj < 0): 

4381 obj += N 

4382 newshape[axis] -= 1 

4383 new = empty(newshape, arr.dtype, arrorder) 

4384 slobj[axis] = slice(None, obj) 

4385 new[tuple(slobj)] = arr[tuple(slobj)] 

4386 slobj[axis] = slice(obj, None) 

4387 slobj2 = [slice(None)]*ndim 

4388 slobj2[axis] = slice(obj+1, None) 

4389 new[tuple(slobj)] = arr[tuple(slobj2)] 

4390 else: 

4391 _obj = obj 

4392 obj = np.asarray(obj) 

4393 if obj.size == 0 and not isinstance(_obj, np.ndarray): 

4394 obj = obj.astype(intp) 

4395 

4396 if obj.dtype == bool: 

4397 if obj.shape != (N,): 

4398 raise ValueError('boolean array argument obj to delete ' 

4399 'must be one dimensional and match the axis ' 

4400 'length of {}'.format(N)) 

4401 

4402 # optimization, the other branch is slower 

4403 keep = ~obj 

4404 else: 

4405 keep = ones(N, dtype=bool) 

4406 keep[obj,] = False 

4407 

4408 slobj[axis] = keep 

4409 new = arr[tuple(slobj)] 

4410 

4411 if wrap: 

4412 return wrap(new) 

4413 else: 

4414 return new 

4415 

4416 

4417def _insert_dispatcher(arr, obj, values, axis=None): 

4418 return (arr, obj, values) 

4419 

4420 

4421@array_function_dispatch(_insert_dispatcher) 

4422def insert(arr, obj, values, axis=None): 

4423 """ 

4424 Insert values along the given axis before the given indices. 

4425 

4426 Parameters 

4427 ---------- 

4428 arr : array_like 

4429 Input array. 

4430 obj : int, slice or sequence of ints 

4431 Object that defines the index or indices before which `values` is 

4432 inserted. 

4433 

4434 .. versionadded:: 1.8.0 

4435 

4436 Support for multiple insertions when `obj` is a single scalar or a 

4437 sequence with one element (similar to calling insert multiple 

4438 times). 

4439 values : array_like 

4440 Values to insert into `arr`. If the type of `values` is different 

4441 from that of `arr`, `values` is converted to the type of `arr`. 

4442 `values` should be shaped so that ``arr[...,obj,...] = values`` 

4443 is legal. 

4444 axis : int, optional 

4445 Axis along which to insert `values`. If `axis` is None then `arr` 

4446 is flattened first. 

4447 

4448 Returns 

4449 ------- 

4450 out : ndarray 

4451 A copy of `arr` with `values` inserted. Note that `insert` 

4452 does not occur in-place: a new array is returned. If 

4453 `axis` is None, `out` is a flattened array. 

4454 

4455 See Also 

4456 -------- 

4457 append : Append elements at the end of an array. 

4458 concatenate : Join a sequence of arrays along an existing axis. 

4459 delete : Delete elements from an array. 

4460 

4461 Notes 

4462 ----- 

4463 Note that for higher dimensional inserts `obj=0` behaves very different 

4464 from `obj=[0]` just like `arr[:,0,:] = values` is different from 

4465 `arr[:,[0],:] = values`. 

4466 

4467 Examples 

4468 -------- 

4469 >>> a = np.array([[1, 1], [2, 2], [3, 3]]) 

4470 >>> a 

4471 array([[1, 1], 

4472 [2, 2], 

4473 [3, 3]]) 

4474 >>> np.insert(a, 1, 5) 

4475 array([1, 5, 1, ..., 2, 3, 3]) 

4476 >>> np.insert(a, 1, 5, axis=1) 

4477 array([[1, 5, 1], 

4478 [2, 5, 2], 

4479 [3, 5, 3]]) 

4480 

4481 Difference between sequence and scalars: 

4482 

4483 >>> np.insert(a, [1], [[1],[2],[3]], axis=1) 

4484 array([[1, 1, 1], 

4485 [2, 2, 2], 

4486 [3, 3, 3]]) 

4487 >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), 

4488 ... np.insert(a, [1], [[1],[2],[3]], axis=1)) 

4489 True 

4490 

4491 >>> b = a.flatten() 

4492 >>> b 

4493 array([1, 1, 2, 2, 3, 3]) 

4494 >>> np.insert(b, [2, 2], [5, 6]) 

4495 array([1, 1, 5, ..., 2, 3, 3]) 

4496 

4497 >>> np.insert(b, slice(2, 4), [5, 6]) 

4498 array([1, 1, 5, ..., 2, 3, 3]) 

4499 

4500 >>> np.insert(b, [2, 2], [7.13, False]) # type casting 

4501 array([1, 1, 7, ..., 2, 3, 3]) 

4502 

4503 >>> x = np.arange(8).reshape(2, 4) 

4504 >>> idx = (1, 3) 

4505 >>> np.insert(x, idx, 999, axis=1) 

4506 array([[ 0, 999, 1, 2, 999, 3], 

4507 [ 4, 999, 5, 6, 999, 7]]) 

4508 

4509 """ 

4510 wrap = None 

4511 if type(arr) is not ndarray: 

4512 try: 

4513 wrap = arr.__array_wrap__ 

4514 except AttributeError: 

4515 pass 

4516 

4517 arr = asarray(arr) 

4518 ndim = arr.ndim 

4519 arrorder = 'F' if arr.flags.fnc else 'C' 

4520 if axis is None: 

4521 if ndim != 1: 

4522 arr = arr.ravel() 

4523 # needed for np.matrix, which is still not 1d after being ravelled 

4524 ndim = arr.ndim 

4525 axis = ndim - 1 

4526 else: 

4527 axis = normalize_axis_index(axis, ndim) 

4528 slobj = [slice(None)]*ndim 

4529 N = arr.shape[axis] 

4530 newshape = list(arr.shape) 

4531 

4532 if isinstance(obj, slice): 

4533 # turn it into a range object 

4534 indices = arange(*obj.indices(N), dtype=intp) 

4535 else: 

4536 # need to copy obj, because indices will be changed in-place 

4537 indices = np.array(obj) 

4538 if indices.dtype == bool: 

4539 # See also delete 

4540 # 2012-10-11, NumPy 1.8 

4541 warnings.warn( 

4542 "in the future insert will treat boolean arrays and " 

4543 "array-likes as a boolean index instead of casting it to " 

4544 "integer", FutureWarning, stacklevel=3) 

4545 indices = indices.astype(intp) 

4546 # Code after warning period: 

4547 #if obj.ndim != 1: 

4548 # raise ValueError('boolean array argument obj to insert ' 

4549 # 'must be one dimensional') 

4550 #indices = np.flatnonzero(obj) 

4551 elif indices.ndim > 1: 

4552 raise ValueError( 

4553 "index array argument obj to insert must be one dimensional " 

4554 "or scalar") 

4555 if indices.size == 1: 

4556 index = indices.item() 

4557 if index < -N or index > N: 

4558 raise IndexError( 

4559 "index %i is out of bounds for axis %i with " 

4560 "size %i" % (obj, axis, N)) 

4561 if (index < 0): 

4562 index += N 

4563 

4564 # There are some object array corner cases here, but we cannot avoid 

4565 # that: 

4566 values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) 

4567 if indices.ndim == 0: 

4568 # broadcasting is very different here, since a[:,0,:] = ... behaves 

4569 # very different from a[:,[0],:] = ...! This changes values so that 

4570 # it works likes the second case. (here a[:,0:1,:]) 

4571 values = np.moveaxis(values, 0, axis) 

4572 numnew = values.shape[axis] 

4573 newshape[axis] += numnew 

4574 new = empty(newshape, arr.dtype, arrorder) 

4575 slobj[axis] = slice(None, index) 

4576 new[tuple(slobj)] = arr[tuple(slobj)] 

4577 slobj[axis] = slice(index, index+numnew) 

4578 new[tuple(slobj)] = values 

4579 slobj[axis] = slice(index+numnew, None) 

4580 slobj2 = [slice(None)] * ndim 

4581 slobj2[axis] = slice(index, None) 

4582 new[tuple(slobj)] = arr[tuple(slobj2)] 

4583 if wrap: 

4584 return wrap(new) 

4585 return new 

4586 elif indices.size == 0 and not isinstance(obj, np.ndarray): 

4587 # Can safely cast the empty list to intp 

4588 indices = indices.astype(intp) 

4589 

4590 indices[indices < 0] += N 

4591 

4592 numnew = len(indices) 

4593 order = indices.argsort(kind='mergesort') # stable sort 

4594 indices[order] += np.arange(numnew) 

4595 

4596 newshape[axis] += numnew 

4597 old_mask = ones(newshape[axis], dtype=bool) 

4598 old_mask[indices] = False 

4599 

4600 new = empty(newshape, arr.dtype, arrorder) 

4601 slobj2 = [slice(None)]*ndim 

4602 slobj[axis] = indices 

4603 slobj2[axis] = old_mask 

4604 new[tuple(slobj)] = values 

4605 new[tuple(slobj2)] = arr 

4606 

4607 if wrap: 

4608 return wrap(new) 

4609 return new 

4610 

4611 

4612def _append_dispatcher(arr, values, axis=None): 

4613 return (arr, values) 

4614 

4615 

4616@array_function_dispatch(_append_dispatcher) 

4617def append(arr, values, axis=None): 

4618 """ 

4619 Append values to the end of an array. 

4620 

4621 Parameters 

4622 ---------- 

4623 arr : array_like 

4624 Values are appended to a copy of this array. 

4625 values : array_like 

4626 These values are appended to a copy of `arr`. It must be of the 

4627 correct shape (the same shape as `arr`, excluding `axis`). If 

4628 `axis` is not specified, `values` can be any shape and will be 

4629 flattened before use. 

4630 axis : int, optional 

4631 The axis along which `values` are appended. If `axis` is not 

4632 given, both `arr` and `values` are flattened before use. 

4633 

4634 Returns 

4635 ------- 

4636 append : ndarray 

4637 A copy of `arr` with `values` appended to `axis`. Note that 

4638 `append` does not occur in-place: a new array is allocated and 

4639 filled. If `axis` is None, `out` is a flattened array. 

4640 

4641 See Also 

4642 -------- 

4643 insert : Insert elements into an array. 

4644 delete : Delete elements from an array. 

4645 

4646 Examples 

4647 -------- 

4648 >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) 

4649 array([1, 2, 3, ..., 7, 8, 9]) 

4650 

4651 When `axis` is specified, `values` must have the correct shape. 

4652 

4653 >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) 

4654 array([[1, 2, 3], 

4655 [4, 5, 6], 

4656 [7, 8, 9]]) 

4657 >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) 

4658 Traceback (most recent call last): 

4659 ... 

4660 ValueError: all the input arrays must have same number of dimensions, but 

4661 the array at index 0 has 2 dimension(s) and the array at index 1 has 1 

4662 dimension(s) 

4663 

4664 """ 

4665 arr = asanyarray(arr) 

4666 if axis is None: 

4667 if arr.ndim != 1: 

4668 arr = arr.ravel() 

4669 values = ravel(values) 

4670 axis = arr.ndim-1 

4671 return concatenate((arr, values), axis=axis) 

4672 

4673 

4674def _digitize_dispatcher(x, bins, right=None): 

4675 return (x, bins) 

4676 

4677 

4678@array_function_dispatch(_digitize_dispatcher) 

4679def digitize(x, bins, right=False): 

4680 """ 

4681 Return the indices of the bins to which each value in input array belongs. 

4682 

4683 ========= ============= ============================ 

4684 `right` order of bins returned index `i` satisfies 

4685 ========= ============= ============================ 

4686 ``False`` increasing ``bins[i-1] <= x < bins[i]`` 

4687 ``True`` increasing ``bins[i-1] < x <= bins[i]`` 

4688 ``False`` decreasing ``bins[i-1] > x >= bins[i]`` 

4689 ``True`` decreasing ``bins[i-1] >= x > bins[i]`` 

4690 ========= ============= ============================ 

4691 

4692 If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is 

4693 returned as appropriate. 

4694 

4695 Parameters 

4696 ---------- 

4697 x : array_like 

4698 Input array to be binned. Prior to NumPy 1.10.0, this array had to 

4699 be 1-dimensional, but can now have any shape. 

4700 bins : array_like 

4701 Array of bins. It has to be 1-dimensional and monotonic. 

4702 right : bool, optional 

4703 Indicating whether the intervals include the right or the left bin 

4704 edge. Default behavior is (right==False) indicating that the interval 

4705 does not include the right edge. The left bin end is open in this 

4706 case, i.e., bins[i-1] <= x < bins[i] is the default behavior for 

4707 monotonically increasing bins. 

4708 

4709 Returns 

4710 ------- 

4711 indices : ndarray of ints 

4712 Output array of indices, of same shape as `x`. 

4713 

4714 Raises 

4715 ------ 

4716 ValueError 

4717 If `bins` is not monotonic. 

4718 TypeError 

4719 If the type of the input is complex. 

4720 

4721 See Also 

4722 -------- 

4723 bincount, histogram, unique, searchsorted 

4724 

4725 Notes 

4726 ----- 

4727 If values in `x` are such that they fall outside the bin range, 

4728 attempting to index `bins` with the indices that `digitize` returns 

4729 will result in an IndexError. 

4730 

4731 .. versionadded:: 1.10.0 

4732 

4733 `np.digitize` is implemented in terms of `np.searchsorted`. This means 

4734 that a binary search is used to bin the values, which scales much better 

4735 for larger number of bins than the previous linear search. It also removes 

4736 the requirement for the input array to be 1-dimensional. 

4737 

4738 For monotonically _increasing_ `bins`, the following are equivalent:: 

4739 

4740 np.digitize(x, bins, right=True) 

4741 np.searchsorted(bins, x, side='left') 

4742 

4743 Note that as the order of the arguments are reversed, the side must be too. 

4744 The `searchsorted` call is marginally faster, as it does not do any 

4745 monotonicity checks. Perhaps more importantly, it supports all dtypes. 

4746 

4747 Examples 

4748 -------- 

4749 >>> x = np.array([0.2, 6.4, 3.0, 1.6]) 

4750 >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) 

4751 >>> inds = np.digitize(x, bins) 

4752 >>> inds 

4753 array([1, 4, 3, 2]) 

4754 >>> for n in range(x.size): 

4755 ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) 

4756 ... 

4757 0.0 <= 0.2 < 1.0 

4758 4.0 <= 6.4 < 10.0 

4759 2.5 <= 3.0 < 4.0 

4760 1.0 <= 1.6 < 2.5 

4761 

4762 >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) 

4763 >>> bins = np.array([0, 5, 10, 15, 20]) 

4764 >>> np.digitize(x,bins,right=True) 

4765 array([1, 2, 3, 4, 4]) 

4766 >>> np.digitize(x,bins,right=False) 

4767 array([1, 3, 3, 4, 5]) 

4768 """ 

4769 x = _nx.asarray(x) 

4770 bins = _nx.asarray(bins) 

4771 

4772 # here for compatibility, searchsorted below is happy to take this 

4773 if np.issubdtype(x.dtype, _nx.complexfloating): 

4774 raise TypeError("x may not be complex") 

4775 

4776 mono = _monotonicity(bins) 

4777 if mono == 0: 

4778 raise ValueError("bins must be monotonically increasing or decreasing") 

4779 

4780 # this is backwards because the arguments below are swapped 

4781 side = 'left' if right else 'right' 

4782 if mono == -1: 

4783 # reverse the bins, and invert the results 

4784 return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) 

4785 else: 

4786 return _nx.searchsorted(bins, x, side=side)