Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1""" 

2A module for converting numbers or color arguments to *RGB* or *RGBA*. 

3 

4*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the 

5range 0-1. 

6 

7This module includes functions and classes for color specification 

8conversions, and for mapping numbers to colors in a 1-D array of colors called 

9a colormap. 

10 

11Mapping data onto colors using a colormap typically involves two steps: 

12a data array is first mapped onto the range 0-1 using a subclass of 

13:class:`Normalize`, then this number is mapped to a color using 

14a subclass of :class:`Colormap`. Two are provided here: 

15:class:`LinearSegmentedColormap`, which uses piecewise-linear interpolation 

16to define colormaps, and :class:`ListedColormap`, which makes a colormap 

17from a list of colors. 

18 

19.. seealso:: 

20 

21 :doc:`/tutorials/colors/colormap-manipulation` for examples of how to 

22 make colormaps and 

23 

24 :doc:`/tutorials/colors/colormaps` for a list of built-in colormaps. 

25 

26 :doc:`/tutorials/colors/colormapnorms` for more details about data 

27 normalization 

28 

29 More colormaps are available at palettable_. 

30 

31The module also provides functions for checking whether an object can be 

32interpreted as a color (:func:`is_color_like`), for converting such an object 

33to an RGBA tuple (:func:`to_rgba`) or to an HTML-like hex string in the 

34`#rrggbb` format (:func:`to_hex`), and a sequence of colors to an `(n, 4)` 

35RGBA array (:func:`to_rgba_array`). Caching is used for efficiency. 

36 

37Matplotlib recognizes the following formats to specify a color: 

38 

39* an RGB or RGBA (red, green, blue, alpha) tuple of float values in closed 

40 interval ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)`` or ``(0.1, 0.2, 0.5, 0.3)``); 

41* a hex RGB or RGBA string (e.g., ``'#0f0f0f'`` or ``'#0f0f0f80'``; 

42 case-insensitive); 

43* a shorthand hex RGB or RGBA string, equivalent to the hex RGB or RGBA 

44 string obtained by duplicating each character, (e.g., ``'#abc'``, equivalent 

45 to ``'#aabbcc'``, or ``'#abcd'``, equivalent to ``'#aabbccdd'``; 

46 case-insensitive); 

47* a string representation of a float value in ``[0, 1]`` inclusive for gray 

48 level (e.g., ``'0.5'``); 

49* one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``, they are the single 

50 character short-hand notations for blue, green, red, cyan, magenta, yellow, 

51 black, and white. 

52* a X11/CSS4 color name (case-insensitive); 

53* a name from the `xkcd color survey`_, prefixed with ``'xkcd:'`` (e.g., 

54 ``'xkcd:sky blue'``; case insensitive); 

55* one of the Tableau Colors from the 'T10' categorical palette (the default 

56 color cycle): ``{'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 

57 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'}`` 

58 (case-insensitive); 

59* a "CN" color spec, i.e. `'C'` followed by a number, which is an index into 

60 the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``); the 

61 indexing is intended to occur at rendering time, and defaults to black if the 

62 cycle does not include color. 

63 

64.. _palettable: https://jiffyclub.github.io/palettable/ 

65.. _xkcd color survey: https://xkcd.com/color/rgb/ 

66""" 

67 

68from collections.abc import Sized 

69import functools 

70import itertools 

71import re 

72 

73import numpy as np 

74import matplotlib.cbook as cbook 

75from matplotlib import docstring 

76from ._color_data import BASE_COLORS, TABLEAU_COLORS, CSS4_COLORS, XKCD_COLORS 

77 

78 

79class _ColorMapping(dict): 

80 def __init__(self, mapping): 

81 super().__init__(mapping) 

82 self.cache = {} 

83 

84 def __setitem__(self, key, value): 

85 super().__setitem__(key, value) 

86 self.cache.clear() 

87 

88 def __delitem__(self, key): 

89 super().__delitem__(key) 

90 self.cache.clear() 

91 

92 

93_colors_full_map = {} 

94# Set by reverse priority order. 

95_colors_full_map.update(XKCD_COLORS) 

96_colors_full_map.update({k.replace('grey', 'gray'): v 

97 for k, v in XKCD_COLORS.items() 

98 if 'grey' in k}) 

99_colors_full_map.update(CSS4_COLORS) 

100_colors_full_map.update(TABLEAU_COLORS) 

101_colors_full_map.update({k.replace('gray', 'grey'): v 

102 for k, v in TABLEAU_COLORS.items() 

103 if 'gray' in k}) 

104_colors_full_map.update(BASE_COLORS) 

105_colors_full_map = _ColorMapping(_colors_full_map) 

106 

107 

108def get_named_colors_mapping(): 

109 """Return the global mapping of names to named colors.""" 

110 return _colors_full_map 

111 

112 

113def _sanitize_extrema(ex): 

114 if ex is None: 

115 return ex 

116 try: 

117 ret = ex.item() 

118 except AttributeError: 

119 ret = float(ex) 

120 return ret 

121 

122 

123def _is_nth_color(c): 

124 """Return whether *c* can be interpreted as an item in the color cycle.""" 

125 return isinstance(c, str) and re.match(r"\AC[0-9]+\Z", c) 

126 

127 

128def is_color_like(c): 

129 """Return whether *c* can be interpreted as an RGB(A) color.""" 

130 # Special-case nth color syntax because it cannot be parsed during setup. 

131 if _is_nth_color(c): 

132 return True 

133 try: 

134 to_rgba(c) 

135 except ValueError: 

136 return False 

137 else: 

138 return True 

139 

140 

141def same_color(c1, c2): 

142 """ 

143 Compare two colors to see if they are the same. 

144 

145 Parameters 

146 ---------- 

147 c1, c2 : Matplotlib colors 

148 

149 Returns 

150 ------- 

151 bool 

152 ``True`` if *c1* and *c2* are the same color, otherwise ``False``. 

153 """ 

154 return (to_rgba_array(c1) == to_rgba_array(c2)).all() 

155 

156 

157def to_rgba(c, alpha=None): 

158 """ 

159 Convert *c* to an RGBA color. 

160 

161 Parameters 

162 ---------- 

163 c : Matplotlib color or ``np.ma.masked`` 

164 

165 alpha : scalar, optional 

166 If *alpha* is not ``None``, it forces the alpha value, except if *c* is 

167 ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``. 

168 

169 Returns 

170 ------- 

171 tuple 

172 Tuple of ``(r, g, b, a)`` scalars. 

173 """ 

174 # Special-case nth color syntax because it should not be cached. 

175 if _is_nth_color(c): 

176 from matplotlib import rcParams 

177 prop_cycler = rcParams['axes.prop_cycle'] 

178 colors = prop_cycler.by_key().get('color', ['k']) 

179 c = colors[int(c[1:]) % len(colors)] 

180 try: 

181 rgba = _colors_full_map.cache[c, alpha] 

182 except (KeyError, TypeError): # Not in cache, or unhashable. 

183 rgba = None 

184 if rgba is None: # Suppress exception chaining of cache lookup failure. 

185 rgba = _to_rgba_no_colorcycle(c, alpha) 

186 try: 

187 _colors_full_map.cache[c, alpha] = rgba 

188 except TypeError: 

189 pass 

190 return rgba 

191 

192 

193def _to_rgba_no_colorcycle(c, alpha=None): 

194 """Convert *c* to an RGBA color, with no support for color-cycle syntax. 

195 

196 If *alpha* is not ``None``, it forces the alpha value, except if *c* is 

197 ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``. 

198 """ 

199 orig_c = c 

200 if c is np.ma.masked: 

201 return (0., 0., 0., 0.) 

202 if isinstance(c, str): 

203 if c.lower() == "none": 

204 return (0., 0., 0., 0.) 

205 # Named color. 

206 try: 

207 # This may turn c into a non-string, so we check again below. 

208 c = _colors_full_map[c] 

209 except KeyError: 

210 try: 

211 c = _colors_full_map[c.lower()] 

212 except KeyError: 

213 pass 

214 else: 

215 if len(orig_c) == 1: 

216 cbook.warn_deprecated( 

217 "3.1", message="Support for uppercase " 

218 "single-letter colors is deprecated since Matplotlib " 

219 "%(since)s and will be removed %(removal)s; please " 

220 "use lowercase instead.") 

221 if isinstance(c, str): 

222 # hex color in #rrggbb format. 

223 match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c) 

224 if match: 

225 return (tuple(int(n, 16) / 255 

226 for n in [c[1:3], c[3:5], c[5:7]]) 

227 + (alpha if alpha is not None else 1.,)) 

228 # hex color in #rgb format, shorthand for #rrggbb. 

229 match = re.match(r"\A#[a-fA-F0-9]{3}\Z", c) 

230 if match: 

231 return (tuple(int(n, 16) / 255 

232 for n in [c[1]*2, c[2]*2, c[3]*2]) 

233 + (alpha if alpha is not None else 1.,)) 

234 # hex color with alpha in #rrggbbaa format. 

235 match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c) 

236 if match: 

237 color = [int(n, 16) / 255 

238 for n in [c[1:3], c[3:5], c[5:7], c[7:9]]] 

239 if alpha is not None: 

240 color[-1] = alpha 

241 return tuple(color) 

242 # hex color with alpha in #rgba format, shorthand for #rrggbbaa. 

243 match = re.match(r"\A#[a-fA-F0-9]{4}\Z", c) 

244 if match: 

245 color = [int(n, 16) / 255 

246 for n in [c[1]*2, c[2]*2, c[3]*2, c[4]*2]] 

247 if alpha is not None: 

248 color[-1] = alpha 

249 return tuple(color) 

250 # string gray. 

251 try: 

252 c = float(c) 

253 except ValueError: 

254 pass 

255 else: 

256 if not (0 <= c <= 1): 

257 raise ValueError( 

258 f"Invalid string grayscale value {orig_c!r}. " 

259 f"Value must be within 0-1 range") 

260 return c, c, c, alpha if alpha is not None else 1. 

261 raise ValueError(f"Invalid RGBA argument: {orig_c!r}") 

262 # tuple color. 

263 c = np.array(c) 

264 if not np.can_cast(c.dtype, float, "same_kind") or c.ndim != 1: 

265 # Test the dtype explicitly as `map(float, ...)`, `np.array(..., 

266 # float)` and `np.array(...).astype(float)` all convert "0.5" to 0.5. 

267 # Test dimensionality to reject single floats. 

268 raise ValueError(f"Invalid RGBA argument: {orig_c!r}") 

269 # Return a tuple to prevent the cached value from being modified. 

270 c = tuple(c.astype(float)) 

271 if len(c) not in [3, 4]: 

272 raise ValueError("RGBA sequence should have length 3 or 4") 

273 if len(c) == 3 and alpha is None: 

274 alpha = 1 

275 if alpha is not None: 

276 c = c[:3] + (alpha,) 

277 if any(elem < 0 or elem > 1 for elem in c): 

278 raise ValueError("RGBA values should be within 0-1 range") 

279 return c 

280 

281 

282def to_rgba_array(c, alpha=None): 

283 """Convert *c* to a (n, 4) array of RGBA colors. 

284 

285 If *alpha* is not ``None``, it forces the alpha value. If *c* is 

286 ``"none"`` (case-insensitive) or an empty list, an empty array is returned. 

287 If *c* is a masked array, an ndarray is returned with a (0, 0, 0, 0) 

288 row for each masked value or row in *c*. 

289 """ 

290 # Special-case inputs that are already arrays, for performance. (If the 

291 # array has the wrong kind or shape, raise the error during one-at-a-time 

292 # conversion.) 

293 if (isinstance(c, np.ndarray) and c.dtype.kind in "if" 

294 and c.ndim == 2 and c.shape[1] in [3, 4]): 

295 mask = c.mask.any(axis=1) if np.ma.is_masked(c) else None 

296 c = np.ma.getdata(c) 

297 if c.shape[1] == 3: 

298 result = np.column_stack([c, np.zeros(len(c))]) 

299 result[:, -1] = alpha if alpha is not None else 1. 

300 elif c.shape[1] == 4: 

301 result = c.copy() 

302 if alpha is not None: 

303 result[:, -1] = alpha 

304 if mask is not None: 

305 result[mask] = 0 

306 if np.any((result < 0) | (result > 1)): 

307 raise ValueError("RGBA values should be within 0-1 range") 

308 return result 

309 # Handle single values. 

310 # Note that this occurs *after* handling inputs that are already arrays, as 

311 # `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need 

312 # to format the array in the ValueError message(!). 

313 if cbook._str_lower_equal(c, "none"): 

314 return np.zeros((0, 4), float) 

315 try: 

316 return np.array([to_rgba(c, alpha)], float) 

317 except (ValueError, TypeError): 

318 pass 

319 

320 # Convert one at a time. 

321 if isinstance(c, str): 

322 # Single string as color sequence. 

323 # This is deprecated and will be removed in the future. 

324 try: 

325 result = np.array([to_rgba(cc, alpha) for cc in c]) 

326 except ValueError: 

327 raise ValueError( 

328 "'%s' is neither a valid single color nor a color sequence " 

329 "consisting of single character color specifiers such as " 

330 "'rgb'. Note also that the latter is deprecated." % c) 

331 else: 

332 cbook.warn_deprecated("3.2", message="Using a string of single " 

333 "character colors as a color sequence is " 

334 "deprecated. Use an explicit list instead.") 

335 return result 

336 

337 if len(c) == 0: 

338 return np.zeros((0, 4), float) 

339 else: 

340 return np.array([to_rgba(cc, alpha) for cc in c]) 

341 

342 

343def to_rgb(c): 

344 """Convert *c* to an RGB color, silently dropping the alpha channel.""" 

345 return to_rgba(c)[:3] 

346 

347 

348def to_hex(c, keep_alpha=False): 

349 """ 

350 Convert *c* to a hex color. 

351 

352 Uses the ``#rrggbb`` format if *keep_alpha* is False (the default), 

353 ``#rrggbbaa`` otherwise. 

354 """ 

355 c = to_rgba(c) 

356 if not keep_alpha: 

357 c = c[:3] 

358 return "#" + "".join(format(int(round(val * 255)), "02x") for val in c) 

359 

360 

361### Backwards-compatible color-conversion API 

362 

363 

364cnames = CSS4_COLORS 

365hexColorPattern = re.compile(r"\A#[a-fA-F0-9]{6}\Z") 

366rgb2hex = to_hex 

367hex2color = to_rgb 

368 

369 

370class ColorConverter: 

371 """ 

372 This class is only kept for backwards compatibility. 

373 

374 Its functionality is entirely provided by module-level functions. 

375 """ 

376 colors = _colors_full_map 

377 cache = _colors_full_map.cache 

378 to_rgb = staticmethod(to_rgb) 

379 to_rgba = staticmethod(to_rgba) 

380 to_rgba_array = staticmethod(to_rgba_array) 

381 

382 

383colorConverter = ColorConverter() 

384 

385 

386### End of backwards-compatible color-conversion API 

387 

388 

389def _create_lookup_table(N, data, gamma=1.0): 

390 r"""Create an *N* -element 1-d lookup table. 

391 

392 This assumes a mapping :math:`f : [0, 1] \rightarrow [0, 1]`. The returned 

393 data is an array of N values :math:`y = f(x)` where x is sampled from 

394 [0, 1]. 

395 

396 By default (*gamma* = 1) x is equidistantly sampled from [0, 1]. The 

397 *gamma* correction factor :math:`\gamma` distorts this equidistant 

398 sampling by :math:`x \rightarrow x^\gamma`. 

399 

400 Parameters 

401 ---------- 

402 N : int 

403 The number of elements of the created lookup table. 

404 This must be N >= 1. 

405 data : Mx3 array-like or callable 

406 Defines the mapping :math:`f`. 

407 

408 If a Mx3 array-like, the rows define values (x, y0, y1). The x values 

409 must start with x=0, end with x=1, and all x values be in increasing 

410 order. 

411 

412 A value between :math:`x_i` and :math:`x_{i+1}` is mapped to the range 

413 :math:`y^1_{i-1} \ldots y^0_i` by linear interpolation. 

414 

415 For the simple case of a y-continuous mapping, y0 and y1 are identical. 

416 

417 The two values of y are to allow for discontinuous mapping functions. 

418 E.g. a sawtooth with a period of 0.2 and an amplitude of 1 would be:: 

419 

420 [(0, 1, 0), (0.2, 1, 0), (0.4, 1, 0), ..., [(1, 1, 0)] 

421 

422 In the special case of ``N == 1``, by convention the returned value 

423 is y0 for x == 1. 

424 

425 If *data* is a callable, it must accept and return numpy arrays:: 

426 

427 data(x : ndarray) -> ndarray 

428 

429 and map values between 0 - 1 to 0 - 1. 

430 gamma : float 

431 Gamma correction factor for input distribution x of the mapping. 

432 

433 See also https://en.wikipedia.org/wiki/Gamma_correction. 

434 

435 Returns 

436 ------- 

437 lut : array 

438 The lookup table where ``lut[x * (N-1)]`` gives the closest value 

439 for values of x between 0 and 1. 

440 

441 Notes 

442 ----- 

443 This function is internally used for `.LinearSegmentedColormaps`. 

444 """ 

445 

446 if callable(data): 

447 xind = np.linspace(0, 1, N) ** gamma 

448 lut = np.clip(np.array(data(xind), dtype=float), 0, 1) 

449 return lut 

450 

451 try: 

452 adata = np.array(data) 

453 except Exception: 

454 raise TypeError("data must be convertible to an array") 

455 shape = adata.shape 

456 if len(shape) != 2 or shape[1] != 3: 

457 raise ValueError("data must be nx3 format") 

458 

459 x = adata[:, 0] 

460 y0 = adata[:, 1] 

461 y1 = adata[:, 2] 

462 

463 if x[0] != 0. or x[-1] != 1.0: 

464 raise ValueError( 

465 "data mapping points must start with x=0 and end with x=1") 

466 if (np.diff(x) < 0).any(): 

467 raise ValueError("data mapping points must have x in increasing order") 

468 # begin generation of lookup table 

469 if N == 1: 

470 # convention: use the y = f(x=1) value for a 1-element lookup table 

471 lut = np.array(y0[-1]) 

472 else: 

473 x = x * (N - 1) 

474 xind = (N - 1) * np.linspace(0, 1, N) ** gamma 

475 ind = np.searchsorted(x, xind)[1:-1] 

476 

477 distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1]) 

478 lut = np.concatenate([ 

479 [y1[0]], 

480 distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1], 

481 [y0[-1]], 

482 ]) 

483 # ensure that the lut is confined to values between 0 and 1 by clipping it 

484 return np.clip(lut, 0.0, 1.0) 

485 

486 

487@cbook.deprecated("3.2", 

488 addendum='This is not considered public API any longer.') 

489@docstring.copy(_create_lookup_table) 

490def makeMappingArray(N, data, gamma=1.0): 

491 return _create_lookup_table(N, data, gamma) 

492 

493 

494class Colormap: 

495 """ 

496 Baseclass for all scalar to RGBA mappings. 

497 

498 Typically Colormap instances are used to convert data values (floats) from 

499 the interval ``[0, 1]`` to the RGBA color that the respective Colormap 

500 represents. For scaling of data into the ``[0, 1]`` interval see 

501 :class:`matplotlib.colors.Normalize`. It is worth noting that 

502 :class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this 

503 ``data->normalize->map-to-color`` processing chain. 

504 

505 """ 

506 def __init__(self, name, N=256): 

507 """ 

508 Parameters 

509 ---------- 

510 name : str 

511 The name of the colormap. 

512 N : int 

513 The number of rgb quantization levels. 

514 

515 """ 

516 self.name = name 

517 self.N = int(N) # ensure that N is always int 

518 self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything. 

519 self._rgba_under = None 

520 self._rgba_over = None 

521 self._i_under = self.N 

522 self._i_over = self.N + 1 

523 self._i_bad = self.N + 2 

524 self._isinit = False 

525 

526 #: When this colormap exists on a scalar mappable and colorbar_extend 

527 #: is not False, colorbar creation will pick up ``colorbar_extend`` as 

528 #: the default value for the ``extend`` keyword in the 

529 #: :class:`matplotlib.colorbar.Colorbar` constructor. 

530 self.colorbar_extend = False 

531 

532 def __call__(self, X, alpha=None, bytes=False): 

533 """ 

534 Parameters 

535 ---------- 

536 X : scalar, ndarray 

537 The data value(s) to convert to RGBA. 

538 For floats, X should be in the interval ``[0.0, 1.0]`` to 

539 return the RGBA values ``X*100`` percent along the Colormap line. 

540 For integers, X should be in the interval ``[0, Colormap.N)`` to 

541 return RGBA values *indexed* from the Colormap with index ``X``. 

542 alpha : float, None 

543 Alpha must be a scalar between 0 and 1, or None. 

544 bytes : bool 

545 If False (default), the returned RGBA values will be floats in the 

546 interval ``[0, 1]`` otherwise they will be uint8s in the interval 

547 ``[0, 255]``. 

548 

549 Returns 

550 ------- 

551 Tuple of RGBA values if X is scalar, otherwise an array of 

552 RGBA values with a shape of ``X.shape + (4, )``. 

553 

554 """ 

555 # See class docstring for arg/kwarg documentation. 

556 if not self._isinit: 

557 self._init() 

558 mask_bad = None 

559 if np.ma.is_masked(X): 

560 mask_bad = X.mask 

561 elif np.any(np.isnan(X)): 

562 # mask nan's 

563 mask_bad = np.isnan(X) 

564 

565 xa = np.array(X, copy=True) 

566 # Fill bad values to avoid warnings 

567 # in the boolean comparisons below. 

568 if mask_bad is not None: 

569 xa[mask_bad] = 0. 

570 

571 # Calculations with native byteorder are faster, and avoid a 

572 # bug that otherwise can occur with putmask when the last 

573 # argument is a numpy scalar. 

574 if not xa.dtype.isnative: 

575 xa = xa.byteswap().newbyteorder() 

576 

577 if xa.dtype.kind == "f": 

578 xa *= self.N 

579 # Negative values are out of range, but astype(int) would truncate 

580 # them towards zero. 

581 xa[xa < 0] = -1 

582 # xa == 1 (== N after multiplication) is not out of range. 

583 xa[xa == self.N] = self.N - 1 

584 # Avoid converting large positive values to negative integers. 

585 np.clip(xa, -1, self.N, out=xa) 

586 xa = xa.astype(int) 

587 # Set the over-range indices before the under-range; 

588 # otherwise the under-range values get converted to over-range. 

589 xa[xa > self.N - 1] = self._i_over 

590 xa[xa < 0] = self._i_under 

591 if mask_bad is not None: 

592 xa[mask_bad] = self._i_bad 

593 

594 if bytes: 

595 lut = (self._lut * 255).astype(np.uint8) 

596 else: 

597 lut = self._lut.copy() # Don't let alpha modify original _lut. 

598 

599 if alpha is not None: 

600 alpha = np.clip(alpha, 0, 1) 

601 if bytes: 

602 alpha = int(alpha * 255) 

603 if (lut[-1] == 0).all(): 

604 lut[:-1, -1] = alpha 

605 # All zeros is taken as a flag for the default bad 

606 # color, which is no color--fully transparent. We 

607 # don't want to override this. 

608 else: 

609 lut[:, -1] = alpha 

610 # If the bad value is set to have a color, then we 

611 # override its alpha just as for any other value. 

612 

613 rgba = lut.take(xa, axis=0, mode='clip') 

614 if not np.iterable(X): 

615 # Return a tuple if the input was a scalar 

616 rgba = tuple(rgba) 

617 return rgba 

618 

619 def __copy__(self): 

620 """Create new object with the same class, update attributes 

621 """ 

622 cls = self.__class__ 

623 cmapobject = cls.__new__(cls) 

624 cmapobject.__dict__.update(self.__dict__) 

625 if self._isinit: 

626 cmapobject._lut = np.copy(self._lut) 

627 return cmapobject 

628 

629 def set_bad(self, color='k', alpha=None): 

630 """Set color to be used for masked values. 

631 """ 

632 self._rgba_bad = to_rgba(color, alpha) 

633 if self._isinit: 

634 self._set_extremes() 

635 

636 def set_under(self, color='k', alpha=None): 

637 """ 

638 Set the color for low out-of-range values when ``norm.clip = False``. 

639 """ 

640 self._rgba_under = to_rgba(color, alpha) 

641 if self._isinit: 

642 self._set_extremes() 

643 

644 def set_over(self, color='k', alpha=None): 

645 """ 

646 Set the color for high out-of-range values when ``norm.clip = False``. 

647 """ 

648 self._rgba_over = to_rgba(color, alpha) 

649 if self._isinit: 

650 self._set_extremes() 

651 

652 def _set_extremes(self): 

653 if self._rgba_under: 

654 self._lut[self._i_under] = self._rgba_under 

655 else: 

656 self._lut[self._i_under] = self._lut[0] 

657 if self._rgba_over: 

658 self._lut[self._i_over] = self._rgba_over 

659 else: 

660 self._lut[self._i_over] = self._lut[self.N - 1] 

661 self._lut[self._i_bad] = self._rgba_bad 

662 

663 def _init(self): 

664 """Generate the lookup table, self._lut""" 

665 raise NotImplementedError("Abstract class only") 

666 

667 def is_gray(self): 

668 if not self._isinit: 

669 self._init() 

670 return (np.all(self._lut[:, 0] == self._lut[:, 1]) and 

671 np.all(self._lut[:, 0] == self._lut[:, 2])) 

672 

673 def _resample(self, lutsize): 

674 """ 

675 Return a new color map with *lutsize* entries. 

676 """ 

677 raise NotImplementedError() 

678 

679 def reversed(self, name=None): 

680 """ 

681 Make a reversed instance of the Colormap. 

682 

683 .. note:: Function not implemented for base class. 

684 

685 Parameters 

686 ---------- 

687 name : str, optional 

688 The name for the reversed colormap. If it's None the 

689 name will be the name of the parent colormap + "_r". 

690 

691 See Also 

692 -------- 

693 LinearSegmentedColormap.reversed 

694 ListedColormap.reversed 

695 """ 

696 raise NotImplementedError() 

697 

698 

699class LinearSegmentedColormap(Colormap): 

700 """ 

701 Colormap objects based on lookup tables using linear segments. 

702 

703 The lookup table is generated using linear interpolation for each 

704 primary color, with the 0-1 domain divided into any number of 

705 segments. 

706 """ 

707 

708 def __init__(self, name, segmentdata, N=256, gamma=1.0): 

709 """ 

710 Create color map from linear mapping segments 

711 

712 segmentdata argument is a dictionary with a red, green and blue 

713 entries. Each entry should be a list of *x*, *y0*, *y1* tuples, 

714 forming rows in a table. Entries for alpha are optional. 

715 

716 Example: suppose you want red to increase from 0 to 1 over 

717 the bottom half, green to do the same over the middle half, 

718 and blue over the top half. Then you would use:: 

719 

720 cdict = {'red': [(0.0, 0.0, 0.0), 

721 (0.5, 1.0, 1.0), 

722 (1.0, 1.0, 1.0)], 

723 

724 'green': [(0.0, 0.0, 0.0), 

725 (0.25, 0.0, 0.0), 

726 (0.75, 1.0, 1.0), 

727 (1.0, 1.0, 1.0)], 

728 

729 'blue': [(0.0, 0.0, 0.0), 

730 (0.5, 0.0, 0.0), 

731 (1.0, 1.0, 1.0)]} 

732 

733 Each row in the table for a given color is a sequence of 

734 *x*, *y0*, *y1* tuples. In each sequence, *x* must increase 

735 monotonically from 0 to 1. For any input value *z* falling 

736 between *x[i]* and *x[i+1]*, the output value of a given color 

737 will be linearly interpolated between *y1[i]* and *y0[i+1]*:: 

738 

739 row i: x y0 y1 

740 / 

741 / 

742 row i+1: x y0 y1 

743 

744 Hence y0 in the first row and y1 in the last row are never used. 

745 

746 See Also 

747 -------- 

748 LinearSegmentedColormap.from_list 

749 Static method; factory function for generating a smoothly-varying 

750 LinearSegmentedColormap. 

751 

752 makeMappingArray 

753 For information about making a mapping array. 

754 """ 

755 # True only if all colors in map are identical; needed for contouring. 

756 self.monochrome = False 

757 Colormap.__init__(self, name, N) 

758 self._segmentdata = segmentdata 

759 self._gamma = gamma 

760 

761 def _init(self): 

762 self._lut = np.ones((self.N + 3, 4), float) 

763 self._lut[:-3, 0] = _create_lookup_table( 

764 self.N, self._segmentdata['red'], self._gamma) 

765 self._lut[:-3, 1] = _create_lookup_table( 

766 self.N, self._segmentdata['green'], self._gamma) 

767 self._lut[:-3, 2] = _create_lookup_table( 

768 self.N, self._segmentdata['blue'], self._gamma) 

769 if 'alpha' in self._segmentdata: 

770 self._lut[:-3, 3] = _create_lookup_table( 

771 self.N, self._segmentdata['alpha'], 1) 

772 self._isinit = True 

773 self._set_extremes() 

774 

775 def set_gamma(self, gamma): 

776 """ 

777 Set a new gamma value and regenerate color map. 

778 """ 

779 self._gamma = gamma 

780 self._init() 

781 

782 @staticmethod 

783 def from_list(name, colors, N=256, gamma=1.0): 

784 """ 

785 Make a linear segmented colormap with *name* from a sequence 

786 of *colors* which evenly transitions from colors[0] at val=0 

787 to colors[-1] at val=1. *N* is the number of rgb quantization 

788 levels. 

789 Alternatively, a list of (value, color) tuples can be given 

790 to divide the range unevenly. 

791 """ 

792 

793 if not np.iterable(colors): 

794 raise ValueError('colors must be iterable') 

795 

796 if (isinstance(colors[0], Sized) and len(colors[0]) == 2 

797 and not isinstance(colors[0], str)): 

798 # List of value, color pairs 

799 vals, colors = zip(*colors) 

800 else: 

801 vals = np.linspace(0, 1, len(colors)) 

802 

803 cdict = dict(red=[], green=[], blue=[], alpha=[]) 

804 for val, color in zip(vals, colors): 

805 r, g, b, a = to_rgba(color) 

806 cdict['red'].append((val, r, r)) 

807 cdict['green'].append((val, g, g)) 

808 cdict['blue'].append((val, b, b)) 

809 cdict['alpha'].append((val, a, a)) 

810 

811 return LinearSegmentedColormap(name, cdict, N, gamma) 

812 

813 def _resample(self, lutsize): 

814 """ 

815 Return a new color map with *lutsize* entries. 

816 """ 

817 return LinearSegmentedColormap(self.name, self._segmentdata, lutsize) 

818 

819 # Helper ensuring picklability of the reversed cmap. 

820 @staticmethod 

821 def _reverser(func, x): 

822 return func(1 - x) 

823 

824 def reversed(self, name=None): 

825 """ 

826 Make a reversed instance of the Colormap. 

827 

828 Parameters 

829 ---------- 

830 name : str, optional 

831 The name for the reversed colormap. If it's None the 

832 name will be the name of the parent colormap + "_r". 

833 

834 Returns 

835 ------- 

836 LinearSegmentedColormap 

837 The reversed colormap. 

838 """ 

839 if name is None: 

840 name = self.name + "_r" 

841 

842 # Using a partial object keeps the cmap picklable. 

843 data_r = {key: (functools.partial(self._reverser, data) 

844 if callable(data) else 

845 [(1.0 - x, y1, y0) for x, y0, y1 in reversed(data)]) 

846 for key, data in self._segmentdata.items()} 

847 

848 return LinearSegmentedColormap(name, data_r, self.N, self._gamma) 

849 

850 

851class ListedColormap(Colormap): 

852 """ 

853 Colormap object generated from a list of colors. 

854 

855 This may be most useful when indexing directly into a colormap, 

856 but it can also be used to generate special colormaps for ordinary 

857 mapping. 

858 

859 Parameters 

860 ---------- 

861 colors : list, array 

862 List of Matplotlib color specifications, or an equivalent Nx3 or Nx4 

863 floating point array (*N* rgb or rgba values). 

864 name : str, optional 

865 String to identify the colormap. 

866 N : int, optional 

867 Number of entries in the map. The default is *None*, in which case 

868 there is one colormap entry for each element in the list of colors. 

869 If :: 

870 

871 N < len(colors) 

872 

873 the list will be truncated at *N*. If :: 

874 

875 N > len(colors) 

876 

877 the list will be extended by repetition. 

878 """ 

879 def __init__(self, colors, name='from_list', N=None): 

880 self.monochrome = False # Are all colors identical? (for contour.py) 

881 if N is None: 

882 self.colors = colors 

883 N = len(colors) 

884 else: 

885 if isinstance(colors, str): 

886 self.colors = [colors] * N 

887 self.monochrome = True 

888 elif np.iterable(colors): 

889 if len(colors) == 1: 

890 self.monochrome = True 

891 self.colors = list( 

892 itertools.islice(itertools.cycle(colors), N)) 

893 else: 

894 try: 

895 gray = float(colors) 

896 except TypeError: 

897 pass 

898 else: 

899 self.colors = [gray] * N 

900 self.monochrome = True 

901 Colormap.__init__(self, name, N) 

902 

903 def _init(self): 

904 self._lut = np.zeros((self.N + 3, 4), float) 

905 self._lut[:-3] = to_rgba_array(self.colors) 

906 self._isinit = True 

907 self._set_extremes() 

908 

909 def _resample(self, lutsize): 

910 """ 

911 Return a new color map with *lutsize* entries. 

912 """ 

913 colors = self(np.linspace(0, 1, lutsize)) 

914 return ListedColormap(colors, name=self.name) 

915 

916 def reversed(self, name=None): 

917 """ 

918 Make a reversed instance of the Colormap. 

919 

920 Parameters 

921 ---------- 

922 name : str, optional 

923 The name for the reversed colormap. If it's None the 

924 name will be the name of the parent colormap + "_r". 

925 

926 Returns 

927 ------- 

928 ListedColormap 

929 A reversed instance of the colormap. 

930 """ 

931 if name is None: 

932 name = self.name + "_r" 

933 

934 colors_r = list(reversed(self.colors)) 

935 return ListedColormap(colors_r, name=name, N=self.N) 

936 

937 

938class Normalize: 

939 """ 

940 A class which, when called, can normalize data into 

941 the ``[0.0, 1.0]`` interval. 

942 

943 """ 

944 def __init__(self, vmin=None, vmax=None, clip=False): 

945 """ 

946 If *vmin* or *vmax* is not given, they are initialized from the 

947 minimum and maximum value respectively of the first input 

948 processed. That is, *__call__(A)* calls *autoscale_None(A)*. 

949 If *clip* is *True* and the given value falls outside the range, 

950 the returned value will be 0 or 1, whichever is closer. 

951 Returns 0 if :: 

952 

953 vmin==vmax 

954 

955 Works with scalars or arrays, including masked arrays. If 

956 *clip* is *True*, masked values are set to 1; otherwise they 

957 remain masked. Clipping silently defeats the purpose of setting 

958 the over, under, and masked colors in the colormap, so it is 

959 likely to lead to surprises; therefore the default is 

960 *clip* = *False*. 

961 """ 

962 self.vmin = _sanitize_extrema(vmin) 

963 self.vmax = _sanitize_extrema(vmax) 

964 self.clip = clip 

965 

966 @staticmethod 

967 def process_value(value): 

968 """ 

969 Homogenize the input *value* for easy and efficient normalization. 

970 

971 *value* can be a scalar or sequence. 

972 

973 Returns *result*, *is_scalar*, where *result* is a 

974 masked array matching *value*. Float dtypes are preserved; 

975 integer types with two bytes or smaller are converted to 

976 np.float32, and larger types are converted to np.float64. 

977 Preserving float32 when possible, and using in-place operations, 

978 can greatly improve speed for large arrays. 

979 

980 Experimental; we may want to add an option to force the 

981 use of float32. 

982 """ 

983 is_scalar = not np.iterable(value) 

984 if is_scalar: 

985 value = [value] 

986 dtype = np.min_scalar_type(value) 

987 if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_: 

988 # bool_/int8/int16 -> float32; int32/int64 -> float64 

989 dtype = np.promote_types(dtype, np.float32) 

990 # ensure data passed in as an ndarray subclass are interpreted as 

991 # an ndarray. See issue #6622. 

992 mask = np.ma.getmask(value) 

993 data = np.asarray(value) 

994 result = np.ma.array(data, mask=mask, dtype=dtype, copy=True) 

995 return result, is_scalar 

996 

997 def __call__(self, value, clip=None): 

998 """ 

999 Normalize *value* data in the ``[vmin, vmax]`` interval into 

1000 the ``[0.0, 1.0]`` interval and return it. *clip* defaults 

1001 to *self.clip* (which defaults to *False*). If not already 

1002 initialized, *vmin* and *vmax* are initialized using 

1003 *autoscale_None(value)*. 

1004 """ 

1005 if clip is None: 

1006 clip = self.clip 

1007 

1008 result, is_scalar = self.process_value(value) 

1009 

1010 self.autoscale_None(result) 

1011 # Convert at least to float, without losing precision. 

1012 (vmin,), _ = self.process_value(self.vmin) 

1013 (vmax,), _ = self.process_value(self.vmax) 

1014 if vmin == vmax: 

1015 result.fill(0) # Or should it be all masked? Or 0.5? 

1016 elif vmin > vmax: 

1017 raise ValueError("minvalue must be less than or equal to maxvalue") 

1018 else: 

1019 if clip: 

1020 mask = np.ma.getmask(result) 

1021 result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), 

1022 mask=mask) 

1023 # ma division is very slow; we can take a shortcut 

1024 resdat = result.data 

1025 resdat -= vmin 

1026 resdat /= (vmax - vmin) 

1027 result = np.ma.array(resdat, mask=result.mask, copy=False) 

1028 if is_scalar: 

1029 result = result[0] 

1030 return result 

1031 

1032 def inverse(self, value): 

1033 if not self.scaled(): 

1034 raise ValueError("Not invertible until scaled") 

1035 (vmin,), _ = self.process_value(self.vmin) 

1036 (vmax,), _ = self.process_value(self.vmax) 

1037 

1038 if np.iterable(value): 

1039 val = np.ma.asarray(value) 

1040 return vmin + val * (vmax - vmin) 

1041 else: 

1042 return vmin + value * (vmax - vmin) 

1043 

1044 def autoscale(self, A): 

1045 """Set *vmin*, *vmax* to min, max of *A*.""" 

1046 A = np.asanyarray(A) 

1047 self.vmin = A.min() 

1048 self.vmax = A.max() 

1049 

1050 def autoscale_None(self, A): 

1051 """Autoscale only None-valued vmin or vmax.""" 

1052 A = np.asanyarray(A) 

1053 if self.vmin is None and A.size: 

1054 self.vmin = A.min() 

1055 if self.vmax is None and A.size: 

1056 self.vmax = A.max() 

1057 

1058 def scaled(self): 

1059 """Return whether vmin and vmax are set.""" 

1060 return self.vmin is not None and self.vmax is not None 

1061 

1062 

1063class TwoSlopeNorm(Normalize): 

1064 def __init__(self, vcenter, vmin=None, vmax=None): 

1065 """ 

1066 Normalize data with a set center. 

1067 

1068 Useful when mapping data with an unequal rates of change around a 

1069 conceptual center, e.g., data that range from -2 to 4, with 0 as 

1070 the midpoint. 

1071 

1072 Parameters 

1073 ---------- 

1074 vcenter : float 

1075 The data value that defines ``0.5`` in the normalization. 

1076 vmin : float, optional 

1077 The data value that defines ``0.0`` in the normalization. 

1078 Defaults to the min value of the dataset. 

1079 vmax : float, optional 

1080 The data value that defines ``1.0`` in the normalization. 

1081 Defaults to the the max value of the dataset. 

1082 

1083 Examples 

1084 -------- 

1085 This maps data value -4000 to 0., 0 to 0.5, and +10000 to 1.0; data 

1086 between is linearly interpolated:: 

1087 

1088 >>> import matplotlib.colors as mcolors 

1089 >>> offset = mcolors.TwoSlopeNorm(vmin=-4000., 

1090 vcenter=0., vmax=10000) 

1091 >>> data = [-4000., -2000., 0., 2500., 5000., 7500., 10000.] 

1092 >>> offset(data) 

1093 array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0]) 

1094 """ 

1095 

1096 self.vcenter = vcenter 

1097 self.vmin = vmin 

1098 self.vmax = vmax 

1099 if vcenter is not None and vmax is not None and vcenter >= vmax: 

1100 raise ValueError('vmin, vcenter, and vmax must be in ' 

1101 'ascending order') 

1102 if vcenter is not None and vmin is not None and vcenter <= vmin: 

1103 raise ValueError('vmin, vcenter, and vmax must be in ' 

1104 'ascending order') 

1105 

1106 def autoscale_None(self, A): 

1107 """ 

1108 Get vmin and vmax, and then clip at vcenter 

1109 """ 

1110 super().autoscale_None(A) 

1111 if self.vmin > self.vcenter: 

1112 self.vmin = self.vcenter 

1113 if self.vmax < self.vcenter: 

1114 self.vmax = self.vcenter 

1115 

1116 def __call__(self, value, clip=None): 

1117 """ 

1118 Map value to the interval [0, 1]. The clip argument is unused. 

1119 """ 

1120 result, is_scalar = self.process_value(value) 

1121 self.autoscale_None(result) # sets self.vmin, self.vmax if None 

1122 

1123 if not self.vmin <= self.vcenter <= self.vmax: 

1124 raise ValueError("vmin, vcenter, vmax must increase monotonically") 

1125 result = np.ma.masked_array( 

1126 np.interp(result, [self.vmin, self.vcenter, self.vmax], 

1127 [0, 0.5, 1.]), mask=np.ma.getmask(result)) 

1128 if is_scalar: 

1129 result = np.atleast_1d(result)[0] 

1130 return result 

1131 

1132 

1133@cbook.deprecation.deprecated('3.2', alternative='TwoSlopeNorm') 

1134class DivergingNorm(TwoSlopeNorm): 

1135 ... 

1136 

1137 

1138class LogNorm(Normalize): 

1139 """Normalize a given value to the 0-1 range on a log scale.""" 

1140 

1141 def _check_vmin_vmax(self): 

1142 if self.vmin > self.vmax: 

1143 raise ValueError("minvalue must be less than or equal to maxvalue") 

1144 elif self.vmin <= 0: 

1145 raise ValueError("minvalue must be positive") 

1146 

1147 def __call__(self, value, clip=None): 

1148 if clip is None: 

1149 clip = self.clip 

1150 

1151 result, is_scalar = self.process_value(value) 

1152 

1153 result = np.ma.masked_less_equal(result, 0, copy=False) 

1154 

1155 self.autoscale_None(result) 

1156 self._check_vmin_vmax() 

1157 vmin, vmax = self.vmin, self.vmax 

1158 if vmin == vmax: 

1159 result.fill(0) 

1160 else: 

1161 if clip: 

1162 mask = np.ma.getmask(result) 

1163 result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), 

1164 mask=mask) 

1165 # in-place equivalent of above can be much faster 

1166 resdat = result.data 

1167 mask = result.mask 

1168 if mask is np.ma.nomask: 

1169 mask = (resdat <= 0) 

1170 else: 

1171 mask |= resdat <= 0 

1172 np.copyto(resdat, 1, where=mask) 

1173 np.log(resdat, resdat) 

1174 resdat -= np.log(vmin) 

1175 resdat /= (np.log(vmax) - np.log(vmin)) 

1176 result = np.ma.array(resdat, mask=mask, copy=False) 

1177 if is_scalar: 

1178 result = result[0] 

1179 return result 

1180 

1181 def inverse(self, value): 

1182 if not self.scaled(): 

1183 raise ValueError("Not invertible until scaled") 

1184 self._check_vmin_vmax() 

1185 vmin, vmax = self.vmin, self.vmax 

1186 

1187 if np.iterable(value): 

1188 val = np.ma.asarray(value) 

1189 return vmin * np.ma.power((vmax / vmin), val) 

1190 else: 

1191 return vmin * pow((vmax / vmin), value) 

1192 

1193 def autoscale(self, A): 

1194 # docstring inherited. 

1195 super().autoscale(np.ma.masked_less_equal(A, 0, copy=False)) 

1196 

1197 def autoscale_None(self, A): 

1198 # docstring inherited. 

1199 super().autoscale_None(np.ma.masked_less_equal(A, 0, copy=False)) 

1200 

1201 

1202class SymLogNorm(Normalize): 

1203 """ 

1204 The symmetrical logarithmic scale is logarithmic in both the 

1205 positive and negative directions from the origin. 

1206 

1207 Since the values close to zero tend toward infinity, there is a 

1208 need to have a range around zero that is linear. The parameter 

1209 *linthresh* allows the user to specify the size of this range 

1210 (-*linthresh*, *linthresh*). 

1211 """ 

1212 def __init__(self, linthresh, linscale=1.0, vmin=None, vmax=None, 

1213 clip=False, *, base=None): 

1214 """ 

1215 Parameters 

1216 ---------- 

1217 linthresh : float 

1218 The range within which the plot is linear (to avoid having the plot 

1219 go to infinity around zero). 

1220 linscale : float, default: 1 

1221 This allows the linear range (-*linthresh* to *linthresh*) 

1222 to be stretched relative to the logarithmic range. Its 

1223 value is the number of powers of *base* to use for each 

1224 half of the linear range. 

1225 

1226 For example, when *linscale* == 1.0 (the default) and 

1227 ``base=10``, then space used for the positive and negative 

1228 halves of the linear range will be equal to a decade in 

1229 the logarithmic. 

1230 

1231 base : float, default: None 

1232 If not given, defaults to ``np.e`` (consistent with prior 

1233 behavior) and warns. 

1234 

1235 In v3.3 the default value will change to 10 to be consistent with 

1236 `.SymLogNorm`. 

1237 

1238 To suppress the warning pass *base* as a keyword argument. 

1239 

1240 """ 

1241 Normalize.__init__(self, vmin, vmax, clip) 

1242 if base is None: 

1243 self._base = np.e 

1244 cbook.warn_deprecated("3.3", message="default base may change " 

1245 "from np.e to 10. To suppress this warning specify the base " 

1246 "keyword argument.") 

1247 else: 

1248 self._base = base 

1249 self._log_base = np.log(self._base) 

1250 

1251 self.linthresh = float(linthresh) 

1252 self._linscale_adj = (linscale / (1.0 - self._base ** -1)) 

1253 if vmin is not None and vmax is not None: 

1254 self._transform_vmin_vmax() 

1255 

1256 def __call__(self, value, clip=None): 

1257 if clip is None: 

1258 clip = self.clip 

1259 

1260 result, is_scalar = self.process_value(value) 

1261 self.autoscale_None(result) 

1262 vmin, vmax = self.vmin, self.vmax 

1263 

1264 if vmin > vmax: 

1265 raise ValueError("minvalue must be less than or equal to maxvalue") 

1266 elif vmin == vmax: 

1267 result.fill(0) 

1268 else: 

1269 if clip: 

1270 mask = np.ma.getmask(result) 

1271 result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), 

1272 mask=mask) 

1273 # in-place equivalent of above can be much faster 

1274 resdat = self._transform(result.data) 

1275 resdat -= self._lower 

1276 resdat /= (self._upper - self._lower) 

1277 

1278 if is_scalar: 

1279 result = result[0] 

1280 return result 

1281 

1282 def _transform(self, a): 

1283 """Inplace transformation.""" 

1284 with np.errstate(invalid="ignore"): 

1285 masked = np.abs(a) > self.linthresh 

1286 sign = np.sign(a[masked]) 

1287 log = (self._linscale_adj + 

1288 np.log(np.abs(a[masked]) / self.linthresh) / self._log_base) 

1289 log *= sign * self.linthresh 

1290 a[masked] = log 

1291 a[~masked] *= self._linscale_adj 

1292 return a 

1293 

1294 def _inv_transform(self, a): 

1295 """Inverse inplace Transformation.""" 

1296 masked = np.abs(a) > (self.linthresh * self._linscale_adj) 

1297 sign = np.sign(a[masked]) 

1298 exp = np.power(self._base, 

1299 sign * a[masked] / self.linthresh - self._linscale_adj) 

1300 exp *= sign * self.linthresh 

1301 a[masked] = exp 

1302 a[~masked] /= self._linscale_adj 

1303 return a 

1304 

1305 def _transform_vmin_vmax(self): 

1306 """Calculates vmin and vmax in the transformed system.""" 

1307 vmin, vmax = self.vmin, self.vmax 

1308 arr = np.array([vmax, vmin]).astype(float) 

1309 self._upper, self._lower = self._transform(arr) 

1310 

1311 def inverse(self, value): 

1312 if not self.scaled(): 

1313 raise ValueError("Not invertible until scaled") 

1314 val = np.ma.asarray(value) 

1315 val = val * (self._upper - self._lower) + self._lower 

1316 return self._inv_transform(val) 

1317 

1318 def autoscale(self, A): 

1319 # docstring inherited. 

1320 super().autoscale(A) 

1321 self._transform_vmin_vmax() 

1322 

1323 def autoscale_None(self, A): 

1324 # docstring inherited. 

1325 super().autoscale_None(A) 

1326 self._transform_vmin_vmax() 

1327 

1328 

1329class PowerNorm(Normalize): 

1330 """ 

1331 Linearly map a given value to the 0-1 range and then apply 

1332 a power-law normalization over that range. 

1333 """ 

1334 def __init__(self, gamma, vmin=None, vmax=None, clip=False): 

1335 Normalize.__init__(self, vmin, vmax, clip) 

1336 self.gamma = gamma 

1337 

1338 def __call__(self, value, clip=None): 

1339 if clip is None: 

1340 clip = self.clip 

1341 

1342 result, is_scalar = self.process_value(value) 

1343 

1344 self.autoscale_None(result) 

1345 gamma = self.gamma 

1346 vmin, vmax = self.vmin, self.vmax 

1347 if vmin > vmax: 

1348 raise ValueError("minvalue must be less than or equal to maxvalue") 

1349 elif vmin == vmax: 

1350 result.fill(0) 

1351 else: 

1352 if clip: 

1353 mask = np.ma.getmask(result) 

1354 result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), 

1355 mask=mask) 

1356 resdat = result.data 

1357 resdat -= vmin 

1358 resdat[resdat < 0] = 0 

1359 np.power(resdat, gamma, resdat) 

1360 resdat /= (vmax - vmin) ** gamma 

1361 

1362 result = np.ma.array(resdat, mask=result.mask, copy=False) 

1363 if is_scalar: 

1364 result = result[0] 

1365 return result 

1366 

1367 def inverse(self, value): 

1368 if not self.scaled(): 

1369 raise ValueError("Not invertible until scaled") 

1370 gamma = self.gamma 

1371 vmin, vmax = self.vmin, self.vmax 

1372 

1373 if np.iterable(value): 

1374 val = np.ma.asarray(value) 

1375 return np.ma.power(val, 1. / gamma) * (vmax - vmin) + vmin 

1376 else: 

1377 return pow(value, 1. / gamma) * (vmax - vmin) + vmin 

1378 

1379 

1380class BoundaryNorm(Normalize): 

1381 """ 

1382 Generate a colormap index based on discrete intervals. 

1383 

1384 Unlike `Normalize` or `LogNorm`, `BoundaryNorm` maps values to integers 

1385 instead of to the interval 0-1. 

1386 

1387 Mapping to the 0-1 interval could have been done via piece-wise linear 

1388 interpolation, but using integers seems simpler, and reduces the number of 

1389 conversions back and forth between integer and floating point. 

1390 """ 

1391 def __init__(self, boundaries, ncolors, clip=False): 

1392 """ 

1393 Parameters 

1394 ---------- 

1395 boundaries : array-like 

1396 Monotonically increasing sequence of boundaries 

1397 ncolors : int 

1398 Number of colors in the colormap to be used 

1399 clip : bool, optional 

1400 If clip is ``True``, out of range values are mapped to 0 if they 

1401 are below ``boundaries[0]`` or mapped to ncolors - 1 if they are 

1402 above ``boundaries[-1]``. 

1403 

1404 If clip is ``False``, out of range values are mapped to -1 if 

1405 they are below ``boundaries[0]`` or mapped to ncolors if they are 

1406 above ``boundaries[-1]``. These are then converted to valid indices 

1407 by :meth:`Colormap.__call__`. 

1408 

1409 Notes 

1410 ----- 

1411 *boundaries* defines the edges of bins, and data falling within a bin 

1412 is mapped to the color with the same index. 

1413 

1414 If the number of bins doesn't equal *ncolors*, the color is chosen 

1415 by linear interpolation of the bin number onto color numbers. 

1416 """ 

1417 self.clip = clip 

1418 self.vmin = boundaries[0] 

1419 self.vmax = boundaries[-1] 

1420 self.boundaries = np.asarray(boundaries) 

1421 self.N = len(self.boundaries) 

1422 self.Ncmap = ncolors 

1423 if self.N - 1 == self.Ncmap: 

1424 self._interp = False 

1425 else: 

1426 self._interp = True 

1427 

1428 def __call__(self, value, clip=None): 

1429 if clip is None: 

1430 clip = self.clip 

1431 

1432 xx, is_scalar = self.process_value(value) 

1433 mask = np.ma.getmaskarray(xx) 

1434 xx = np.atleast_1d(xx.filled(self.vmax + 1)) 

1435 if clip: 

1436 np.clip(xx, self.vmin, self.vmax, out=xx) 

1437 max_col = self.Ncmap - 1 

1438 else: 

1439 max_col = self.Ncmap 

1440 iret = np.zeros(xx.shape, dtype=np.int16) 

1441 for i, b in enumerate(self.boundaries): 

1442 iret[xx >= b] = i 

1443 if self._interp: 

1444 scalefac = (self.Ncmap - 1) / (self.N - 2) 

1445 iret = (iret * scalefac).astype(np.int16) 

1446 iret[xx < self.vmin] = -1 

1447 iret[xx >= self.vmax] = max_col 

1448 ret = np.ma.array(iret, mask=mask) 

1449 if is_scalar: 

1450 ret = int(ret[0]) # assume python scalar 

1451 return ret 

1452 

1453 def inverse(self, value): 

1454 """ 

1455 Raises 

1456 ------ 

1457 ValueError 

1458 BoundaryNorm is not invertible, so calling this method will always 

1459 raise an error 

1460 """ 

1461 raise ValueError("BoundaryNorm is not invertible") 

1462 

1463 

1464class NoNorm(Normalize): 

1465 """ 

1466 Dummy replacement for `Normalize`, for the case where we want to use 

1467 indices directly in a `~matplotlib.cm.ScalarMappable`. 

1468 """ 

1469 def __call__(self, value, clip=None): 

1470 return value 

1471 

1472 def inverse(self, value): 

1473 return value 

1474 

1475 

1476def rgb_to_hsv(arr): 

1477 """ 

1478 Convert float rgb values (in the range [0, 1]), in a numpy array to hsv 

1479 values. 

1480 

1481 Parameters 

1482 ---------- 

1483 arr : (..., 3) array-like 

1484 All values must be in the range [0, 1] 

1485 

1486 Returns 

1487 ------- 

1488 hsv : (..., 3) ndarray 

1489 Colors converted to hsv values in range [0, 1] 

1490 """ 

1491 arr = np.asarray(arr) 

1492 

1493 # check length of the last dimension, should be _some_ sort of rgb 

1494 if arr.shape[-1] != 3: 

1495 raise ValueError("Last dimension of input array must be 3; " 

1496 "shape {} was found.".format(arr.shape)) 

1497 

1498 in_shape = arr.shape 

1499 arr = np.array( 

1500 arr, copy=False, 

1501 dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints. 

1502 ndmin=2, # In case input was 1D. 

1503 ) 

1504 out = np.zeros_like(arr) 

1505 arr_max = arr.max(-1) 

1506 ipos = arr_max > 0 

1507 delta = arr.ptp(-1) 

1508 s = np.zeros_like(delta) 

1509 s[ipos] = delta[ipos] / arr_max[ipos] 

1510 ipos = delta > 0 

1511 # red is max 

1512 idx = (arr[..., 0] == arr_max) & ipos 

1513 out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx] 

1514 # green is max 

1515 idx = (arr[..., 1] == arr_max) & ipos 

1516 out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx] 

1517 # blue is max 

1518 idx = (arr[..., 2] == arr_max) & ipos 

1519 out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx] 

1520 

1521 out[..., 0] = (out[..., 0] / 6.0) % 1.0 

1522 out[..., 1] = s 

1523 out[..., 2] = arr_max 

1524 

1525 return out.reshape(in_shape) 

1526 

1527 

1528def hsv_to_rgb(hsv): 

1529 """ 

1530 Convert hsv values to rgb. 

1531 

1532 Parameters 

1533 ---------- 

1534 hsv : (..., 3) array-like 

1535 All values assumed to be in range [0, 1] 

1536 

1537 Returns 

1538 ------- 

1539 rgb : (..., 3) ndarray 

1540 Colors converted to RGB values in range [0, 1] 

1541 """ 

1542 hsv = np.asarray(hsv) 

1543 

1544 # check length of the last dimension, should be _some_ sort of rgb 

1545 if hsv.shape[-1] != 3: 

1546 raise ValueError("Last dimension of input array must be 3; " 

1547 "shape {shp} was found.".format(shp=hsv.shape)) 

1548 

1549 in_shape = hsv.shape 

1550 hsv = np.array( 

1551 hsv, copy=False, 

1552 dtype=np.promote_types(hsv.dtype, np.float32), # Don't work on ints. 

1553 ndmin=2, # In case input was 1D. 

1554 ) 

1555 

1556 h = hsv[..., 0] 

1557 s = hsv[..., 1] 

1558 v = hsv[..., 2] 

1559 

1560 r = np.empty_like(h) 

1561 g = np.empty_like(h) 

1562 b = np.empty_like(h) 

1563 

1564 i = (h * 6.0).astype(int) 

1565 f = (h * 6.0) - i 

1566 p = v * (1.0 - s) 

1567 q = v * (1.0 - s * f) 

1568 t = v * (1.0 - s * (1.0 - f)) 

1569 

1570 idx = i % 6 == 0 

1571 r[idx] = v[idx] 

1572 g[idx] = t[idx] 

1573 b[idx] = p[idx] 

1574 

1575 idx = i == 1 

1576 r[idx] = q[idx] 

1577 g[idx] = v[idx] 

1578 b[idx] = p[idx] 

1579 

1580 idx = i == 2 

1581 r[idx] = p[idx] 

1582 g[idx] = v[idx] 

1583 b[idx] = t[idx] 

1584 

1585 idx = i == 3 

1586 r[idx] = p[idx] 

1587 g[idx] = q[idx] 

1588 b[idx] = v[idx] 

1589 

1590 idx = i == 4 

1591 r[idx] = t[idx] 

1592 g[idx] = p[idx] 

1593 b[idx] = v[idx] 

1594 

1595 idx = i == 5 

1596 r[idx] = v[idx] 

1597 g[idx] = p[idx] 

1598 b[idx] = q[idx] 

1599 

1600 idx = s == 0 

1601 r[idx] = v[idx] 

1602 g[idx] = v[idx] 

1603 b[idx] = v[idx] 

1604 

1605 rgb = np.stack([r, g, b], axis=-1) 

1606 

1607 return rgb.reshape(in_shape) 

1608 

1609 

1610def _vector_magnitude(arr): 

1611 # things that don't work here: 

1612 # * np.linalg.norm 

1613 # - doesn't broadcast in numpy 1.7 

1614 # - drops the mask from ma.array 

1615 # * using keepdims - broken on ma.array until 1.11.2 

1616 # * using sum - discards mask on ma.array unless entire vector is masked 

1617 

1618 sum_sq = 0 

1619 for i in range(arr.shape[-1]): 

1620 sum_sq += np.square(arr[..., i, np.newaxis]) 

1621 return np.sqrt(sum_sq) 

1622 

1623 

1624class LightSource: 

1625 """ 

1626 Create a light source coming from the specified azimuth and elevation. 

1627 Angles are in degrees, with the azimuth measured 

1628 clockwise from north and elevation up from the zero plane of the surface. 

1629 

1630 The :meth:`shade` is used to produce "shaded" rgb values for a data array. 

1631 :meth:`shade_rgb` can be used to combine an rgb image with 

1632 The :meth:`shade_rgb` 

1633 The :meth:`hillshade` produces an illumination map of a surface. 

1634 """ 

1635 def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1, 

1636 hsv_min_sat=1, hsv_max_sat=0): 

1637 """ 

1638 Specify the azimuth (measured clockwise from south) and altitude 

1639 (measured up from the plane of the surface) of the light source 

1640 in degrees. 

1641 

1642 Parameters 

1643 ---------- 

1644 azdeg : number, optional 

1645 The azimuth (0-360, degrees clockwise from North) of the light 

1646 source. Defaults to 315 degrees (from the northwest). 

1647 altdeg : number, optional 

1648 The altitude (0-90, degrees up from horizontal) of the light 

1649 source. Defaults to 45 degrees from horizontal. 

1650 

1651 Notes 

1652 ----- 

1653 For backwards compatibility, the parameters *hsv_min_val*, 

1654 *hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at 

1655 initialization as well. However, these parameters will only be used if 

1656 "blend_mode='hsv'" is passed into :meth:`shade` or :meth:`shade_rgb`. 

1657 See the documentation for :meth:`blend_hsv` for more details. 

1658 """ 

1659 self.azdeg = azdeg 

1660 self.altdeg = altdeg 

1661 self.hsv_min_val = hsv_min_val 

1662 self.hsv_max_val = hsv_max_val 

1663 self.hsv_min_sat = hsv_min_sat 

1664 self.hsv_max_sat = hsv_max_sat 

1665 

1666 @property 

1667 def direction(self): 

1668 """The unit vector direction towards the light source.""" 

1669 # Azimuth is in degrees clockwise from North. Convert to radians 

1670 # counterclockwise from East (mathematical notation). 

1671 az = np.radians(90 - self.azdeg) 

1672 alt = np.radians(self.altdeg) 

1673 return np.array([ 

1674 np.cos(az) * np.cos(alt), 

1675 np.sin(az) * np.cos(alt), 

1676 np.sin(alt) 

1677 ]) 

1678 

1679 def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.): 

1680 """ 

1681 Calculates the illumination intensity for a surface using the defined 

1682 azimuth and elevation for the light source. 

1683 

1684 This computes the normal vectors for the surface, and then passes them 

1685 on to `shade_normals` 

1686 

1687 Parameters 

1688 ---------- 

1689 elevation : array-like 

1690 A 2d array (or equivalent) of the height values used to generate an 

1691 illumination map 

1692 vert_exag : number, optional 

1693 The amount to exaggerate the elevation values by when calculating 

1694 illumination. This can be used either to correct for differences in 

1695 units between the x-y coordinate system and the elevation 

1696 coordinate system (e.g. decimal degrees vs. meters) or to 

1697 exaggerate or de-emphasize topographic effects. 

1698 dx : number, optional 

1699 The x-spacing (columns) of the input *elevation* grid. 

1700 dy : number, optional 

1701 The y-spacing (rows) of the input *elevation* grid. 

1702 fraction : number, optional 

1703 Increases or decreases the contrast of the hillshade. Values 

1704 greater than one will cause intermediate values to move closer to 

1705 full illumination or shadow (and clipping any values that move 

1706 beyond 0 or 1). Note that this is not visually or mathematically 

1707 the same as vertical exaggeration. 

1708 

1709 Returns 

1710 ------- 

1711 intensity : ndarray 

1712 A 2d array of illumination values between 0-1, where 0 is 

1713 completely in shadow and 1 is completely illuminated. 

1714 """ 

1715 

1716 # Because most image and raster GIS data has the first row in the array 

1717 # as the "top" of the image, dy is implicitly negative. This is 

1718 # consistent to what `imshow` assumes, as well. 

1719 dy = -dy 

1720 

1721 # compute the normal vectors from the partial derivatives 

1722 e_dy, e_dx = np.gradient(vert_exag * elevation, dy, dx) 

1723 

1724 # .view is to keep subclasses 

1725 normal = np.empty(elevation.shape + (3,)).view(type(elevation)) 

1726 normal[..., 0] = -e_dx 

1727 normal[..., 1] = -e_dy 

1728 normal[..., 2] = 1 

1729 normal /= _vector_magnitude(normal) 

1730 

1731 return self.shade_normals(normal, fraction) 

1732 

1733 def shade_normals(self, normals, fraction=1.): 

1734 """ 

1735 Calculates the illumination intensity for the normal vectors of a 

1736 surface using the defined azimuth and elevation for the light source. 

1737 

1738 Imagine an artificial sun placed at infinity in some azimuth and 

1739 elevation position illuminating our surface. The parts of the surface 

1740 that slope toward the sun should brighten while those sides facing away 

1741 should become darker. 

1742 

1743 Parameters 

1744 ---------- 

1745 fraction : number, optional 

1746 Increases or decreases the contrast of the hillshade. Values 

1747 greater than one will cause intermediate values to move closer to 

1748 full illumination or shadow (and clipping any values that move 

1749 beyond 0 or 1). Note that this is not visually or mathematically 

1750 the same as vertical exaggeration. 

1751 

1752 Returns 

1753 ------- 

1754 intensity : ndarray 

1755 A 2d array of illumination values between 0-1, where 0 is 

1756 completely in shadow and 1 is completely illuminated. 

1757 """ 

1758 

1759 intensity = normals.dot(self.direction) 

1760 

1761 # Apply contrast stretch 

1762 imin, imax = intensity.min(), intensity.max() 

1763 intensity *= fraction 

1764 

1765 # Rescale to 0-1, keeping range before contrast stretch 

1766 # If constant slope, keep relative scaling (i.e. flat should be 0.5, 

1767 # fully occluded 0, etc.) 

1768 if (imax - imin) > 1e-6: 

1769 # Strictly speaking, this is incorrect. Negative values should be 

1770 # clipped to 0 because they're fully occluded. However, rescaling 

1771 # in this manner is consistent with the previous implementation and 

1772 # visually appears better than a "hard" clip. 

1773 intensity -= imin 

1774 intensity /= (imax - imin) 

1775 intensity = np.clip(intensity, 0, 1) 

1776 

1777 return intensity 

1778 

1779 def shade(self, data, cmap, norm=None, blend_mode='overlay', vmin=None, 

1780 vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs): 

1781 """ 

1782 Combine colormapped data values with an illumination intensity map 

1783 (a.k.a. "hillshade") of the values. 

1784 

1785 Parameters 

1786 ---------- 

1787 data : array-like 

1788 A 2d array (or equivalent) of the height values used to generate a 

1789 shaded map. 

1790 cmap : `~matplotlib.colors.Colormap` instance 

1791 The colormap used to color the *data* array. Note that this must be 

1792 a `~matplotlib.colors.Colormap` instance. For example, rather than 

1793 passing in `cmap='gist_earth'`, use 

1794 `cmap=plt.get_cmap('gist_earth')` instead. 

1795 norm : `~matplotlib.colors.Normalize` instance, optional 

1796 The normalization used to scale values before colormapping. If 

1797 None, the input will be linearly scaled between its min and max. 

1798 blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional 

1799 The type of blending used to combine the colormapped data 

1800 values with the illumination intensity. Default is 

1801 "overlay". Note that for most topographic surfaces, 

1802 "overlay" or "soft" appear more visually realistic. If a 

1803 user-defined function is supplied, it is expected to 

1804 combine an MxNx3 RGB array of floats (ranging 0 to 1) with 

1805 an MxNx1 hillshade array (also 0 to 1). (Call signature 

1806 `func(rgb, illum, **kwargs)`) Additional kwargs supplied 

1807 to this function will be passed on to the *blend_mode* 

1808 function. 

1809 vmin : scalar or None, optional 

1810 The minimum value used in colormapping *data*. If *None* the 

1811 minimum value in *data* is used. If *norm* is specified, then this 

1812 argument will be ignored. 

1813 vmax : scalar or None, optional 

1814 The maximum value used in colormapping *data*. If *None* the 

1815 maximum value in *data* is used. If *norm* is specified, then this 

1816 argument will be ignored. 

1817 vert_exag : number, optional 

1818 The amount to exaggerate the elevation values by when calculating 

1819 illumination. This can be used either to correct for differences in 

1820 units between the x-y coordinate system and the elevation 

1821 coordinate system (e.g. decimal degrees vs. meters) or to 

1822 exaggerate or de-emphasize topography. 

1823 dx : number, optional 

1824 The x-spacing (columns) of the input *elevation* grid. 

1825 dy : number, optional 

1826 The y-spacing (rows) of the input *elevation* grid. 

1827 fraction : number, optional 

1828 Increases or decreases the contrast of the hillshade. Values 

1829 greater than one will cause intermediate values to move closer to 

1830 full illumination or shadow (and clipping any values that move 

1831 beyond 0 or 1). Note that this is not visually or mathematically 

1832 the same as vertical exaggeration. 

1833 Additional kwargs are passed on to the *blend_mode* function. 

1834 

1835 Returns 

1836 ------- 

1837 rgba : ndarray 

1838 An MxNx4 array of floats ranging between 0-1. 

1839 """ 

1840 if vmin is None: 

1841 vmin = data.min() 

1842 if vmax is None: 

1843 vmax = data.max() 

1844 if norm is None: 

1845 norm = Normalize(vmin=vmin, vmax=vmax) 

1846 

1847 rgb0 = cmap(norm(data)) 

1848 rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode, 

1849 vert_exag=vert_exag, dx=dx, dy=dy, 

1850 fraction=fraction, **kwargs) 

1851 # Don't overwrite the alpha channel, if present. 

1852 rgb0[..., :3] = rgb1[..., :3] 

1853 return rgb0 

1854 

1855 def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv', 

1856 vert_exag=1, dx=1, dy=1, **kwargs): 

1857 """ 

1858 Use this light source to adjust the colors of the *rgb* input array to 

1859 give the impression of a shaded relief map with the given `elevation`. 

1860 

1861 Parameters 

1862 ---------- 

1863 rgb : array-like 

1864 An (M, N, 3) RGB array, assumed to be in the range of 0 to 1. 

1865 elevation : array-like 

1866 An (M, N) array of the height values used to generate a shaded map. 

1867 fraction : number 

1868 Increases or decreases the contrast of the hillshade. Values 

1869 greater than one will cause intermediate values to move closer to 

1870 full illumination or shadow (and clipping any values that move 

1871 beyond 0 or 1). Note that this is not visually or mathematically 

1872 the same as vertical exaggeration. 

1873 blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional 

1874 The type of blending used to combine the colormapped data values 

1875 with the illumination intensity. For backwards compatibility, this 

1876 defaults to "hsv". Note that for most topographic surfaces, 

1877 "overlay" or "soft" appear more visually realistic. If a 

1878 user-defined function is supplied, it is expected to combine an 

1879 MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade 

1880 array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`) 

1881 Additional kwargs supplied to this function will be passed on to 

1882 the *blend_mode* function. 

1883 vert_exag : number, optional 

1884 The amount to exaggerate the elevation values by when calculating 

1885 illumination. This can be used either to correct for differences in 

1886 units between the x-y coordinate system and the elevation 

1887 coordinate system (e.g. decimal degrees vs. meters) or to 

1888 exaggerate or de-emphasize topography. 

1889 dx : number, optional 

1890 The x-spacing (columns) of the input *elevation* grid. 

1891 dy : number, optional 

1892 The y-spacing (rows) of the input *elevation* grid. 

1893 Additional kwargs are passed on to the *blend_mode* function. 

1894 

1895 Returns 

1896 ------- 

1897 shaded_rgb : ndarray 

1898 An (m, n, 3) array of floats ranging between 0-1. 

1899 """ 

1900 # Calculate the "hillshade" intensity. 

1901 intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction) 

1902 intensity = intensity[..., np.newaxis] 

1903 

1904 # Blend the hillshade and rgb data using the specified mode 

1905 lookup = { 

1906 'hsv': self.blend_hsv, 

1907 'soft': self.blend_soft_light, 

1908 'overlay': self.blend_overlay, 

1909 } 

1910 if blend_mode in lookup: 

1911 blend = lookup[blend_mode](rgb, intensity, **kwargs) 

1912 else: 

1913 try: 

1914 blend = blend_mode(rgb, intensity, **kwargs) 

1915 except TypeError: 

1916 raise ValueError('"blend_mode" must be callable or one of {}' 

1917 .format(lookup.keys)) 

1918 

1919 # Only apply result where hillshade intensity isn't masked 

1920 if hasattr(intensity, 'mask'): 

1921 mask = intensity.mask[..., 0] 

1922 for i in range(3): 

1923 blend[..., i][mask] = rgb[..., i][mask] 

1924 

1925 return blend 

1926 

1927 def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None, 

1928 hsv_min_val=None, hsv_min_sat=None): 

1929 """ 

1930 Take the input data array, convert to HSV values in the given colormap, 

1931 then adjust those color values to give the impression of a shaded 

1932 relief map with a specified light source. RGBA values are returned, 

1933 which can then be used to plot the shaded image with imshow. 

1934 

1935 The color of the resulting image will be darkened by moving the (s, v) 

1936 values (in hsv colorspace) toward (hsv_min_sat, hsv_min_val) in the 

1937 shaded regions, or lightened by sliding (s, v) toward (hsv_max_sat, 

1938 hsv_max_val) in regions that are illuminated. The default extremes are 

1939 chose so that completely shaded points are nearly black (s = 1, v = 0) 

1940 and completely illuminated points are nearly white (s = 0, v = 1). 

1941 

1942 Parameters 

1943 ---------- 

1944 rgb : ndarray 

1945 An MxNx3 RGB array of floats ranging from 0 to 1 (color image). 

1946 intensity : ndarray 

1947 An MxNx1 array of floats ranging from 0 to 1 (grayscale image). 

1948 hsv_max_sat : number, optional 

1949 The maximum saturation value that the *intensity* map can shift the 

1950 output image to. Defaults to 1. 

1951 hsv_min_sat : number, optional 

1952 The minimum saturation value that the *intensity* map can shift the 

1953 output image to. Defaults to 0. 

1954 hsv_max_val : number, optional 

1955 The maximum value ("v" in "hsv") that the *intensity* map can shift 

1956 the output image to. Defaults to 1. 

1957 hsv_min_val : number, optional 

1958 The minimum value ("v" in "hsv") that the *intensity* map can shift 

1959 the output image to. Defaults to 0. 

1960 

1961 Returns 

1962 ------- 

1963 rgb : ndarray 

1964 An MxNx3 RGB array representing the combined images. 

1965 """ 

1966 # Backward compatibility... 

1967 if hsv_max_sat is None: 

1968 hsv_max_sat = self.hsv_max_sat 

1969 if hsv_max_val is None: 

1970 hsv_max_val = self.hsv_max_val 

1971 if hsv_min_sat is None: 

1972 hsv_min_sat = self.hsv_min_sat 

1973 if hsv_min_val is None: 

1974 hsv_min_val = self.hsv_min_val 

1975 

1976 # Expects a 2D intensity array scaled between -1 to 1... 

1977 intensity = intensity[..., 0] 

1978 intensity = 2 * intensity - 1 

1979 

1980 # Convert to rgb, then rgb to hsv 

1981 hsv = rgb_to_hsv(rgb[:, :, 0:3]) 

1982 hue, sat, val = np.moveaxis(hsv, -1, 0) 

1983 

1984 # Modify hsv values (in place) to simulate illumination. 

1985 # putmask(A, mask, B) <=> A[mask] = B[mask] 

1986 np.putmask(sat, (np.abs(sat) > 1.e-10) & (intensity > 0), 

1987 (1 - intensity) * sat + intensity * hsv_max_sat) 

1988 np.putmask(sat, (np.abs(sat) > 1.e-10) & (intensity < 0), 

1989 (1 + intensity) * sat - intensity * hsv_min_sat) 

1990 np.putmask(val, intensity > 0, 

1991 (1 - intensity) * val + intensity * hsv_max_val) 

1992 np.putmask(val, intensity < 0, 

1993 (1 + intensity) * val - intensity * hsv_min_val) 

1994 np.clip(hsv[:, :, 1:], 0, 1, out=hsv[:, :, 1:]) 

1995 

1996 # Convert modified hsv back to rgb. 

1997 return hsv_to_rgb(hsv) 

1998 

1999 def blend_soft_light(self, rgb, intensity): 

2000 """ 

2001 Combines an rgb image with an intensity map using "soft light" 

2002 blending. Uses the "pegtop" formula. 

2003 

2004 Parameters 

2005 ---------- 

2006 rgb : ndarray 

2007 An MxNx3 RGB array of floats ranging from 0 to 1 (color image). 

2008 intensity : ndarray 

2009 An MxNx1 array of floats ranging from 0 to 1 (grayscale image). 

2010 

2011 Returns 

2012 ------- 

2013 rgb : ndarray 

2014 An MxNx3 RGB array representing the combined images. 

2015 """ 

2016 return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2 

2017 

2018 def blend_overlay(self, rgb, intensity): 

2019 """ 

2020 Combines an rgb image with an intensity map using "overlay" blending. 

2021 

2022 Parameters 

2023 ---------- 

2024 rgb : ndarray 

2025 An MxNx3 RGB array of floats ranging from 0 to 1 (color image). 

2026 intensity : ndarray 

2027 An MxNx1 array of floats ranging from 0 to 1 (grayscale image). 

2028 

2029 Returns 

2030 ------- 

2031 rgb : ndarray 

2032 An MxNx3 RGB array representing the combined images. 

2033 """ 

2034 low = 2 * intensity * rgb 

2035 high = 1 - 2 * (1 - intensity) * (1 - rgb) 

2036 return np.where(rgb <= 0.5, low, high) 

2037 

2038 

2039def from_levels_and_colors(levels, colors, extend='neither'): 

2040 """ 

2041 A helper routine to generate a cmap and a norm instance which 

2042 behave similar to contourf's levels and colors arguments. 

2043 

2044 Parameters 

2045 ---------- 

2046 levels : sequence of numbers 

2047 The quantization levels used to construct the :class:`BoundaryNorm`. 

2048 Value ``v`` is quantized to level ``i`` if ``lev[i] <= v < lev[i+1]``. 

2049 colors : sequence of colors 

2050 The fill color to use for each level. If `extend` is "neither" there 

2051 must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add 

2052 one extra color, and for an `extend` of "both" add two colors. 

2053 extend : {'neither', 'min', 'max', 'both'}, optional 

2054 The behaviour when a value falls out of range of the given levels. 

2055 See :func:`~matplotlib.pyplot.contourf` for details. 

2056 

2057 Returns 

2058 ------- 

2059 cmap : `~matplotlib.colors.Normalize` 

2060 norm : `~matplotlib.colors.Colormap` 

2061 """ 

2062 slice_map = { 

2063 'both': slice(1, -1), 

2064 'min': slice(1, None), 

2065 'max': slice(0, -1), 

2066 'neither': slice(0, None), 

2067 } 

2068 cbook._check_in_list(slice_map, extend=extend) 

2069 color_slice = slice_map[extend] 

2070 

2071 n_data_colors = len(levels) - 1 

2072 n_expected = n_data_colors + color_slice.start - (color_slice.stop or 0) 

2073 if len(colors) != n_expected: 

2074 raise ValueError( 

2075 f'With extend == {extend!r} and {len(levels)} levels, ' 

2076 f'expected {n_expected} colors, but got {len(colors)}') 

2077 

2078 cmap = ListedColormap(colors[color_slice], N=n_data_colors) 

2079 

2080 if extend in ['min', 'both']: 

2081 cmap.set_under(colors[0]) 

2082 else: 

2083 cmap.set_under('none') 

2084 

2085 if extend in ['max', 'both']: 

2086 cmap.set_over(colors[-1]) 

2087 else: 

2088 cmap.set_over('none') 

2089 

2090 cmap.colorbar_extend = extend 

2091 

2092 norm = BoundaryNorm(levels, ncolors=n_data_colors) 

2093 return cmap, norm