Coverage for /home/martinb/.local/share/virtualenvs/camcops/lib/python3.6/site-packages/matplotlib/tri/triinterpolate.py : 15%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1"""
2Interpolation inside triangular grids.
3"""
5import numpy as np
7from matplotlib import cbook
8from matplotlib.tri import Triangulation
9from matplotlib.tri.trifinder import TriFinder
10from matplotlib.tri.tritools import TriAnalyzer
12__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
15class TriInterpolator:
16 """
17 Abstract base class for classes used to perform interpolation on
18 triangular grids.
20 Derived classes implement the following methods:
22 - ``__call__(x, y)`` ,
23 where x, y are array-like point coordinates of the same shape, and
24 that returns a masked array of the same shape containing the
25 interpolated z-values.
27 - ``gradient(x, y)`` ,
28 where x, y are array-like point coordinates of the same
29 shape, and that returns a list of 2 masked arrays of the same shape
30 containing the 2 derivatives of the interpolator (derivatives of
31 interpolated z values with respect to x and y).
33 """
34 def __init__(self, triangulation, z, trifinder=None):
35 cbook._check_isinstance(Triangulation, triangulation=triangulation)
36 self._triangulation = triangulation
38 self._z = np.asarray(z)
39 if self._z.shape != self._triangulation.x.shape:
40 raise ValueError("z array must have same length as triangulation x"
41 " and y arrays")
43 cbook._check_isinstance((TriFinder, None), trifinder=trifinder)
44 self._trifinder = trifinder or self._triangulation.get_trifinder()
46 # Default scaling factors : 1.0 (= no scaling)
47 # Scaling may be used for interpolations for which the order of
48 # magnitude of x, y has an impact on the interpolant definition.
49 # Please refer to :meth:`_interpolate_multikeys` for details.
50 self._unit_x = 1.0
51 self._unit_y = 1.0
53 # Default triangle renumbering: None (= no renumbering)
54 # Renumbering may be used to avoid unnecessary computations
55 # if complex calculations are done inside the Interpolator.
56 # Please refer to :meth:`_interpolate_multikeys` for details.
57 self._tri_renum = None
59 # __call__ and gradient docstrings are shared by all subclasses
60 # (except, if needed, relevant additions).
61 # However these methods are only implemented in subclasses to avoid
62 # confusion in the documentation.
63 _docstring__call__ = """
64 Returns a masked array containing interpolated values at the specified
65 (x, y) points.
67 Parameters
68 ----------
69 x, y : array-like
70 x and y coordinates of the same shape and any number of
71 dimensions.
73 Returns
74 -------
75 z : np.ma.array
76 Masked array of the same shape as *x* and *y*; values corresponding
77 to (*x*, *y*) points outside of the triangulation are masked out.
79 """
81 _docstringgradient = r"""
82 Returns a list of 2 masked arrays containing interpolated derivatives
83 at the specified (x, y) points.
85 Parameters
86 ----------
87 x, y : array-like
88 x and y coordinates of the same shape and any number of
89 dimensions.
91 Returns
92 -------
93 dzdx, dzdy : np.ma.array
94 2 masked arrays of the same shape as *x* and *y*; values
95 corresponding to (x, y) points outside of the triangulation
96 are masked out.
97 The first returned array contains the values of
98 :math:`\frac{\partial z}{\partial x}` and the second those of
99 :math:`\frac{\partial z}{\partial y}`.
101 """
103 def _interpolate_multikeys(self, x, y, tri_index=None,
104 return_keys=('z',)):
105 """
106 Versatile (private) method defined for all TriInterpolators.
108 :meth:`_interpolate_multikeys` is a wrapper around method
109 :meth:`_interpolate_single_key` (to be defined in the child
110 subclasses).
111 :meth:`_interpolate_single_key actually performs the interpolation,
112 but only for 1-dimensional inputs and at valid locations (inside
113 unmasked triangles of the triangulation).
115 The purpose of :meth:`_interpolate_multikeys` is to implement the
116 following common tasks needed in all subclasses implementations:
118 - calculation of containing triangles
119 - dealing with more than one interpolation request at the same
120 location (e.g., if the 2 derivatives are requested, it is
121 unnecessary to compute the containing triangles twice)
122 - scaling according to self._unit_x, self._unit_y
123 - dealing with points outside of the grid (with fill value np.nan)
124 - dealing with multi-dimensional *x*, *y* arrays: flattening for
125 :meth:`_interpolate_params` call and final reshaping.
127 (Note that np.vectorize could do most of those things very well for
128 you, but it does it by function evaluations over successive tuples of
129 the input arrays. Therefore, this tends to be more time consuming than
130 using optimized numpy functions - e.g., np.dot - which can be used
131 easily on the flattened inputs, in the child-subclass methods
132 :meth:`_interpolate_single_key`.)
134 It is guaranteed that the calls to :meth:`_interpolate_single_key`
135 will be done with flattened (1-d) array-like input parameters *x*, *y*
136 and with flattened, valid `tri_index` arrays (no -1 index allowed).
138 Parameters
139 ----------
140 x, y : array-like
141 x and y coordinates indicating where interpolated values are
142 requested.
143 tri_index : array-like of int, optional
144 Array of the containing triangle indices, same shape as
145 *x* and *y*. Defaults to None. If None, these indices
146 will be computed by a TriFinder instance.
147 (Note: For point outside the grid, tri_index[ipt] shall be -1).
148 return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
149 Defines the interpolation arrays to return, and in which order.
151 Returns
152 -------
153 ret : list of arrays
154 Each array-like contains the expected interpolated values in the
155 order defined by *return_keys* parameter.
156 """
157 # Flattening and rescaling inputs arrays x, y
158 # (initial shape is stored for output)
159 x = np.asarray(x, dtype=np.float64)
160 y = np.asarray(y, dtype=np.float64)
161 sh_ret = x.shape
162 if x.shape != y.shape:
163 raise ValueError("x and y shall have same shapes."
164 " Given: {0} and {1}".format(x.shape, y.shape))
165 x = np.ravel(x)
166 y = np.ravel(y)
167 x_scaled = x/self._unit_x
168 y_scaled = y/self._unit_y
169 size_ret = np.size(x_scaled)
171 # Computes & ravels the element indexes, extract the valid ones.
172 if tri_index is None:
173 tri_index = self._trifinder(x, y)
174 else:
175 if tri_index.shape != sh_ret:
176 raise ValueError(
177 "tri_index array is provided and shall"
178 " have same shape as x and y. Given: "
179 "{0} and {1}".format(tri_index.shape, sh_ret))
180 tri_index = np.ravel(tri_index)
182 mask_in = (tri_index != -1)
183 if self._tri_renum is None:
184 valid_tri_index = tri_index[mask_in]
185 else:
186 valid_tri_index = self._tri_renum[tri_index[mask_in]]
187 valid_x = x_scaled[mask_in]
188 valid_y = y_scaled[mask_in]
190 ret = []
191 for return_key in return_keys:
192 # Find the return index associated with the key.
193 try:
194 return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
195 except KeyError:
196 raise ValueError("return_keys items shall take values in"
197 " {'z', 'dzdx', 'dzdy'}")
199 # Sets the scale factor for f & df components
200 scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
202 # Computes the interpolation
203 ret_loc = np.empty(size_ret, dtype=np.float64)
204 ret_loc[~mask_in] = np.nan
205 ret_loc[mask_in] = self._interpolate_single_key(
206 return_key, valid_tri_index, valid_x, valid_y) * scale
207 ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
209 return ret
211 def _interpolate_single_key(self, return_key, tri_index, x, y):
212 """
213 Performs the interpolation at points belonging to the triangulation
214 (inside an unmasked triangles).
216 Parameters
217 ----------
218 return_index : {'z', 'dzdx', 'dzdy'}
219 Identifies the requested values (z or its derivatives)
220 tri_index : 1d integer array
221 Valid triangle index (-1 prohibited)
222 x, y : 1d arrays, same shape as `tri_index`
223 Valid locations where interpolation is requested.
225 Returns
226 -------
227 ret : 1-d array
228 Returned array of the same size as *tri_index*
229 """
230 raise NotImplementedError("TriInterpolator subclasses" +
231 "should implement _interpolate_single_key!")
234class LinearTriInterpolator(TriInterpolator):
235 """
236 A LinearTriInterpolator performs linear interpolation on a triangular grid.
238 Each triangle is represented by a plane so that an interpolated value at
239 point (x, y) lies on the plane of the triangle containing (x, y).
240 Interpolated values are therefore continuous across the triangulation, but
241 their first derivatives are discontinuous at edges between triangles.
243 Parameters
244 ----------
245 triangulation : :class:`~matplotlib.tri.Triangulation` object
246 The triangulation to interpolate over.
247 z : array-like of shape (npoints,)
248 Array of values, defined at grid points, to interpolate between.
249 trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
250 If this is not specified, the Triangulation's default TriFinder will
251 be used by calling
252 :func:`matplotlib.tri.Triangulation.get_trifinder`.
254 Methods
255 -------
256 `__call__` (x, y) : Returns interpolated values at (x, y) points.
257 `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
259 """
260 def __init__(self, triangulation, z, trifinder=None):
261 TriInterpolator.__init__(self, triangulation, z, trifinder)
263 # Store plane coefficients for fast interpolation calculations.
264 self._plane_coefficients = \
265 self._triangulation.calculate_plane_coefficients(self._z)
267 def __call__(self, x, y):
268 return self._interpolate_multikeys(x, y, tri_index=None,
269 return_keys=('z',))[0]
270 __call__.__doc__ = TriInterpolator._docstring__call__
272 def gradient(self, x, y):
273 return self._interpolate_multikeys(x, y, tri_index=None,
274 return_keys=('dzdx', 'dzdy'))
275 gradient.__doc__ = TriInterpolator._docstringgradient
277 def _interpolate_single_key(self, return_key, tri_index, x, y):
278 if return_key == 'z':
279 return (self._plane_coefficients[tri_index, 0]*x +
280 self._plane_coefficients[tri_index, 1]*y +
281 self._plane_coefficients[tri_index, 2])
282 elif return_key == 'dzdx':
283 return self._plane_coefficients[tri_index, 0]
284 elif return_key == 'dzdy':
285 return self._plane_coefficients[tri_index, 1]
286 else:
287 raise ValueError("Invalid return_key: " + return_key)
290class CubicTriInterpolator(TriInterpolator):
291 r"""
292 A CubicTriInterpolator performs cubic interpolation on triangular grids.
294 In one-dimension - on a segment - a cubic interpolating function is
295 defined by the values of the function and its derivative at both ends.
296 This is almost the same in 2-d inside a triangle, except that the values
297 of the function and its 2 derivatives have to be defined at each triangle
298 node.
300 The CubicTriInterpolator takes the value of the function at each node -
301 provided by the user - and internally computes the value of the
302 derivatives, resulting in a smooth interpolation.
303 (As a special feature, the user can also impose the value of the
304 derivatives at each node, but this is not supposed to be the common
305 usage.)
307 Parameters
308 ----------
309 triangulation : :class:`~matplotlib.tri.Triangulation` object
310 The triangulation to interpolate over.
311 z : array-like of shape (npoints,)
312 Array of values, defined at grid points, to interpolate between.
313 kind : {'min_E', 'geom', 'user'}, optional
314 Choice of the smoothing algorithm, in order to compute
315 the interpolant derivatives (defaults to 'min_E'):
317 - if 'min_E': (default) The derivatives at each node is computed
318 to minimize a bending energy.
319 - if 'geom': The derivatives at each node is computed as a
320 weighted average of relevant triangle normals. To be used for
321 speed optimization (large grids).
322 - if 'user': The user provides the argument *dz*, no computation
323 is hence needed.
325 trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
326 If not specified, the Triangulation's default TriFinder will
327 be used by calling
328 :func:`matplotlib.tri.Triangulation.get_trifinder`.
329 dz : tuple of array-likes (dzdx, dzdy), optional
330 Used only if *kind* ='user'. In this case *dz* must be provided as
331 (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
332 are the interpolant first derivatives at the *triangulation* points.
334 Methods
335 -------
336 `__call__` (x, y) : Returns interpolated values at (x, y) points.
337 `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
339 Notes
340 -----
341 This note is a bit technical and details the way a
342 :class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic
343 interpolation.
345 The interpolation is based on a Clough-Tocher subdivision scheme of
346 the *triangulation* mesh (to make it clearer, each triangle of the
347 grid will be divided in 3 child-triangles, and on each child triangle
348 the interpolated function is a cubic polynomial of the 2 coordinates).
349 This technique originates from FEM (Finite Element Method) analysis;
350 the element used is a reduced Hsieh-Clough-Tocher (HCT)
351 element. Its shape functions are described in [1]_.
352 The assembled function is guaranteed to be C1-smooth, i.e. it is
353 continuous and its first derivatives are also continuous (this
354 is easy to show inside the triangles but is also true when crossing the
355 edges).
357 In the default case (*kind* ='min_E'), the interpolant minimizes a
358 curvature energy on the functional space generated by the HCT element
359 shape functions - with imposed values but arbitrary derivatives at each
360 node. The minimized functional is the integral of the so-called total
361 curvature (implementation based on an algorithm from [2]_ - PCG sparse
362 solver):
364 .. math::
366 E(z) = \frac{1}{2} \int_{\Omega} \left(
367 \left( \frac{\partial^2{z}}{\partial{x}^2} \right)^2 +
368 \left( \frac{\partial^2{z}}{\partial{y}^2} \right)^2 +
369 2\left( \frac{\partial^2{z}}{\partial{y}\partial{x}} \right)^2
370 \right) dx\,dy
372 If the case *kind* ='geom' is chosen by the user, a simple geometric
373 approximation is used (weighted average of the triangle normal
374 vectors), which could improve speed on very large grids.
376 References
377 ----------
378 .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
379 Hsieh-Clough-Tocher triangles, complete or reduced.",
380 International Journal for Numerical Methods in Engineering,
381 17(5):784 - 789. 2.01.
382 .. [2] C.T. Kelley, "Iterative Methods for Optimization".
384 """
385 def __init__(self, triangulation, z, kind='min_E', trifinder=None,
386 dz=None):
387 TriInterpolator.__init__(self, triangulation, z, trifinder)
389 # Loads the underlying c++ _triangulation.
390 # (During loading, reordering of triangulation._triangles may occur so
391 # that all final triangles are now anti-clockwise)
392 self._triangulation.get_cpp_triangulation()
394 # To build the stiffness matrix and avoid zero-energy spurious modes
395 # we will only store internally the valid (unmasked) triangles and
396 # the necessary (used) points coordinates.
397 # 2 renumbering tables need to be computed and stored:
398 # - a triangle renum table in order to translate the result from a
399 # TriFinder instance into the internal stored triangle number.
400 # - a node renum table to overwrite the self._z values into the new
401 # (used) node numbering.
402 tri_analyzer = TriAnalyzer(self._triangulation)
403 (compressed_triangles, compressed_x, compressed_y, tri_renum,
404 node_renum) = tri_analyzer._get_compressed_triangulation(True, True)
405 self._triangles = compressed_triangles
406 self._tri_renum = tri_renum
407 # Taking into account the node renumbering in self._z:
408 node_mask = (node_renum == -1)
409 self._z[node_renum[~node_mask]] = self._z
410 self._z = self._z[~node_mask]
412 # Computing scale factors
413 self._unit_x = np.ptp(compressed_x)
414 self._unit_y = np.ptp(compressed_y)
415 self._pts = np.column_stack([compressed_x / self._unit_x,
416 compressed_y / self._unit_y])
417 # Computing triangle points
418 self._tris_pts = self._pts[self._triangles]
419 # Computing eccentricities
420 self._eccs = self._compute_tri_eccentricities(self._tris_pts)
421 # Computing dof estimations for HCT triangle shape function
422 self._dof = self._compute_dof(kind, dz=dz)
423 # Loading HCT element
424 self._ReferenceElement = _ReducedHCT_Element()
426 def __call__(self, x, y):
427 return self._interpolate_multikeys(x, y, tri_index=None,
428 return_keys=('z',))[0]
429 __call__.__doc__ = TriInterpolator._docstring__call__
431 def gradient(self, x, y):
432 return self._interpolate_multikeys(x, y, tri_index=None,
433 return_keys=('dzdx', 'dzdy'))
434 gradient.__doc__ = TriInterpolator._docstringgradient
436 def _interpolate_single_key(self, return_key, tri_index, x, y):
437 tris_pts = self._tris_pts[tri_index]
438 alpha = self._get_alpha_vec(x, y, tris_pts)
439 ecc = self._eccs[tri_index]
440 dof = np.expand_dims(self._dof[tri_index], axis=1)
441 if return_key == 'z':
442 return self._ReferenceElement.get_function_values(
443 alpha, ecc, dof)
444 elif return_key in ['dzdx', 'dzdy']:
445 J = self._get_jacobian(tris_pts)
446 dzdx = self._ReferenceElement.get_function_derivatives(
447 alpha, J, ecc, dof)
448 if return_key == 'dzdx':
449 return dzdx[:, 0, 0]
450 else:
451 return dzdx[:, 1, 0]
452 else:
453 raise ValueError("Invalid return_key: " + return_key)
455 def _compute_dof(self, kind, dz=None):
456 """
457 Computes and returns nodal dofs according to kind
459 Parameters
460 ----------
461 kind : {'min_E', 'geom', 'user'}
462 Choice of the _DOF_estimator subclass to perform the gradient
463 estimation.
464 dz : tuple of array-likes (dzdx, dzdy), optional
465 Used only if *kind*=user; in this case passed to the
466 :class:`_DOF_estimator_user`.
468 Returns
469 -------
470 dof : array-like, shape (npts, 2)
471 Estimation of the gradient at triangulation nodes (stored as
472 degree of freedoms of reduced-HCT triangle elements).
473 """
474 if kind == 'user':
475 if dz is None:
476 raise ValueError("For a CubicTriInterpolator with "
477 "*kind*='user', a valid *dz* "
478 "argument is expected.")
479 TE = _DOF_estimator_user(self, dz=dz)
480 elif kind == 'geom':
481 TE = _DOF_estimator_geom(self)
482 elif kind == 'min_E':
483 TE = _DOF_estimator_min_E(self)
484 else:
485 raise ValueError("CubicTriInterpolator *kind* proposed: {0}; "
486 "should be one of: "
487 "'user', 'geom', 'min_E'".format(kind))
488 return TE.compute_dof_from_df()
490 @staticmethod
491 def _get_alpha_vec(x, y, tris_pts):
492 """
493 Fast (vectorized) function to compute barycentric coordinates alpha.
495 Parameters
496 ----------
497 x, y : array-like of dim 1 (shape (nx,))
498 Coordinates of the points whose points barycentric
499 coordinates are requested
500 tris_pts : array like of dim 3 (shape: (nx, 3, 2))
501 Coordinates of the containing triangles apexes.
503 Returns
504 -------
505 alpha : array of dim 2 (shape (nx, 3))
506 Barycentric coordinates of the points inside the containing
507 triangles.
508 """
509 ndim = tris_pts.ndim-2
511 a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
512 b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
513 abT = np.stack([a, b], axis=-1)
514 ab = _transpose_vectorized(abT)
515 OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :]
517 metric = _prod_vectorized(ab, abT)
518 # Here we try to deal with the colinear cases.
519 # metric_inv is in this case set to the Moore-Penrose pseudo-inverse
520 # meaning that we will still return a set of valid barycentric
521 # coordinates.
522 metric_inv = _pseudo_inv22sym_vectorized(metric)
523 Covar = _prod_vectorized(ab, _transpose_vectorized(
524 np.expand_dims(OM, ndim)))
525 ksi = _prod_vectorized(metric_inv, Covar)
526 alpha = _to_matrix_vectorized([
527 [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
528 return alpha
530 @staticmethod
531 def _get_jacobian(tris_pts):
532 """
533 Fast (vectorized) function to compute triangle jacobian matrix.
535 Parameters
536 ----------
537 tris_pts : array like of dim 3 (shape: (nx, 3, 2))
538 Coordinates of the containing triangles apexes.
540 Returns
541 -------
542 J : array of dim 3 (shape (nx, 2, 2))
543 Barycentric coordinates of the points inside the containing
544 triangles.
545 J[itri,:,:] is the jacobian matrix at apex 0 of the triangle
546 itri, so that the following (matrix) relationship holds:
547 [dz/dksi] = [J] x [dz/dx]
548 with x: global coordinates
549 ksi: element parametric coordinates in triangle first apex
550 local basis.
551 """
552 a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
553 b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
554 J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
555 [b[:, 0], b[:, 1]]])
556 return J
558 @staticmethod
559 def _compute_tri_eccentricities(tris_pts):
560 """
561 Computes triangle eccentricities
563 Parameters
564 ----------
565 tris_pts : array like of dim 3 (shape: (nx, 3, 2))
566 Coordinates of the triangles apexes.
568 Returns
569 -------
570 ecc : array like of dim 2 (shape: (nx, 3))
571 The so-called eccentricity parameters [1] needed for
572 HCT triangular element.
573 """
574 a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2)
575 b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2)
576 c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2)
577 # Do not use np.squeeze, this is dangerous if only one triangle
578 # in the triangulation...
579 dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0]
580 dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0]
581 dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0]
582 # Note that this line will raise a warning for dot_a, dot_b or dot_c
583 # zeros, but we choose not to support triangles with duplicate points.
584 return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
585 [(dot_a-dot_c) / dot_b],
586 [(dot_b-dot_a) / dot_c]])
589# FEM element used for interpolation and for solving minimisation
590# problem (Reduced HCT element)
591class _ReducedHCT_Element:
592 """
593 Implementation of reduced HCT triangular element with explicit shape
594 functions.
596 Computes z, dz, d2z and the element stiffness matrix for bending energy:
597 E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
599 *** Reference for the shape functions: ***
600 [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
601 reduced.
602 Michel Bernadou, Kamal Hassan
603 International Journal for Numerical Methods in Engineering.
604 17(5):784 - 789. 2.01
606 *** Element description: ***
607 9 dofs: z and dz given at 3 apex
608 C1 (conform)
610 """
611 # 1) Loads matrices to generate shape functions as a function of
612 # triangle eccentricities - based on [1] p.11 '''
613 M = np.array([
614 [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
615 [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
616 [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
617 [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
618 [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
619 [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
620 [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
621 [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
622 [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
623 M0 = np.array([
624 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
625 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
626 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
627 [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
628 [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
629 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
630 [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
631 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
632 [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
633 M1 = np.array([
634 [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
635 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
636 [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
637 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
638 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
639 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
640 [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
641 [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
642 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
643 M2 = np.array([
644 [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
645 [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
646 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
647 [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
648 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
649 [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
650 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
651 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
652 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
654 # 2) Loads matrices to rotate components of gradient & Hessian
655 # vectors in the reference basis of triangle first apex (a0)
656 rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
657 [ 0., 1.], [-1., -1.],
658 [-1., -1.], [ 1., 0.]])
660 rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
661 [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
662 [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
664 # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
665 # exact integral - 3 points on each subtriangles.
666 # NOTE: as the 2nd derivative is discontinuous , we really need those 9
667 # points!
668 n_gauss = 9
669 gauss_pts = np.array([[13./18., 4./18., 1./18.],
670 [ 4./18., 13./18., 1./18.],
671 [ 7./18., 7./18., 4./18.],
672 [ 1./18., 13./18., 4./18.],
673 [ 1./18., 4./18., 13./18.],
674 [ 4./18., 7./18., 7./18.],
675 [ 4./18., 1./18., 13./18.],
676 [13./18., 1./18., 4./18.],
677 [ 7./18., 4./18., 7./18.]], dtype=np.float64)
678 gauss_w = np.ones([9], dtype=np.float64) / 9.
680 # 4) Stiffness matrix for curvature energy
681 E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
683 # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
684 J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
685 J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
687 def get_function_values(self, alpha, ecc, dofs):
688 """
689 Parameters
690 ----------
691 alpha : is a (N x 3 x 1) array (array of column-matrices) of
692 barycentric coordinates,
693 ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
694 eccentricities,
695 dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
696 degrees of freedom.
698 Returns
699 -------
700 Returns the N-array of interpolated function values.
701 """
702 subtri = np.argmin(alpha, axis=1)[:, 0]
703 ksi = _roll_vectorized(alpha, -subtri, axis=0)
704 E = _roll_vectorized(ecc, -subtri, axis=0)
705 x = ksi[:, 0, 0]
706 y = ksi[:, 1, 0]
707 z = ksi[:, 2, 0]
708 x_sq = x*x
709 y_sq = y*y
710 z_sq = z*z
711 V = _to_matrix_vectorized([
712 [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
713 [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
714 prod = _prod_vectorized(self.M, V)
715 prod += _scalar_vectorized(E[:, 0, 0],
716 _prod_vectorized(self.M0, V))
717 prod += _scalar_vectorized(E[:, 1, 0],
718 _prod_vectorized(self.M1, V))
719 prod += _scalar_vectorized(E[:, 2, 0],
720 _prod_vectorized(self.M2, V))
721 s = _roll_vectorized(prod, 3*subtri, axis=0)
722 return _prod_vectorized(dofs, s)[:, 0, 0]
724 def get_function_derivatives(self, alpha, J, ecc, dofs):
725 """
726 Parameters
727 ----------
728 *alpha* is a (N x 3 x 1) array (array of column-matrices of
729 barycentric coordinates)
730 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
731 triangle first apex)
732 *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
733 eccentricities)
734 *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
735 degrees of freedom.
737 Returns
738 -------
739 Returns the values of interpolated function derivatives [dz/dx, dz/dy]
740 in global coordinates at locations alpha, as a column-matrices of
741 shape (N x 2 x 1).
742 """
743 subtri = np.argmin(alpha, axis=1)[:, 0]
744 ksi = _roll_vectorized(alpha, -subtri, axis=0)
745 E = _roll_vectorized(ecc, -subtri, axis=0)
746 x = ksi[:, 0, 0]
747 y = ksi[:, 1, 0]
748 z = ksi[:, 2, 0]
749 x_sq = x*x
750 y_sq = y*y
751 z_sq = z*z
752 dV = _to_matrix_vectorized([
753 [ -3.*x_sq, -3.*x_sq],
754 [ 3.*y_sq, 0.],
755 [ 0., 3.*z_sq],
756 [ -2.*x*z, -2.*x*z+x_sq],
757 [-2.*x*y+x_sq, -2.*x*y],
758 [ 2.*x*y-y_sq, -y_sq],
759 [ 2.*y*z, y_sq],
760 [ z_sq, 2.*y*z],
761 [ -z_sq, 2.*x*z-z_sq],
762 [ x*z-y*z, x*y-y*z]])
763 # Puts back dV in first apex basis
764 dV = _prod_vectorized(dV, _extract_submatrices(
765 self.rotate_dV, subtri, block_size=2, axis=0))
767 prod = _prod_vectorized(self.M, dV)
768 prod += _scalar_vectorized(E[:, 0, 0],
769 _prod_vectorized(self.M0, dV))
770 prod += _scalar_vectorized(E[:, 1, 0],
771 _prod_vectorized(self.M1, dV))
772 prod += _scalar_vectorized(E[:, 2, 0],
773 _prod_vectorized(self.M2, dV))
774 dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
775 dfdksi = _prod_vectorized(dofs, dsdksi)
776 # In global coordinates:
777 # Here we try to deal with the simplest colinear cases, returning a
778 # null matrix.
779 J_inv = _safe_inv22_vectorized(J)
780 dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi))
781 return dfdx
783 def get_function_hessians(self, alpha, J, ecc, dofs):
784 """
785 Parameters
786 ----------
787 *alpha* is a (N x 3 x 1) array (array of column-matrices) of
788 barycentric coordinates
789 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
790 triangle first apex)
791 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
792 eccentricities
793 *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
794 degrees of freedom.
796 Returns
797 -------
798 Returns the values of interpolated function 2nd-derivatives
799 [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
800 as a column-matrices of shape (N x 3 x 1).
801 """
802 d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
803 d2fdksi2 = _prod_vectorized(dofs, d2sdksi2)
804 H_rot = self.get_Hrot_from_J(J)
805 d2fdx2 = _prod_vectorized(d2fdksi2, H_rot)
806 return _transpose_vectorized(d2fdx2)
808 def get_d2Sidksij2(self, alpha, ecc):
809 """
810 Parameters
811 ----------
812 *alpha* is a (N x 3 x 1) array (array of column-matrices) of
813 barycentric coordinates
814 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
815 eccentricities
817 Returns
818 -------
819 Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
820 expressed in covariant coordinates in first apex basis.
821 """
822 subtri = np.argmin(alpha, axis=1)[:, 0]
823 ksi = _roll_vectorized(alpha, -subtri, axis=0)
824 E = _roll_vectorized(ecc, -subtri, axis=0)
825 x = ksi[:, 0, 0]
826 y = ksi[:, 1, 0]
827 z = ksi[:, 2, 0]
828 d2V = _to_matrix_vectorized([
829 [ 6.*x, 6.*x, 6.*x],
830 [ 6.*y, 0., 0.],
831 [ 0., 6.*z, 0.],
832 [ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
833 [2.*y-4.*x, 2.*y, 2.*y-2.*x],
834 [2.*x-4.*y, 0., -2.*y],
835 [ 2.*z, 0., 2.*y],
836 [ 0., 2.*y, 2.*z],
837 [ 0., 2.*x-4.*z, -2.*z],
838 [ -2.*z, -2.*y, x-y-z]])
839 # Puts back d2V in first apex basis
840 d2V = _prod_vectorized(d2V, _extract_submatrices(
841 self.rotate_d2V, subtri, block_size=3, axis=0))
842 prod = _prod_vectorized(self.M, d2V)
843 prod += _scalar_vectorized(E[:, 0, 0],
844 _prod_vectorized(self.M0, d2V))
845 prod += _scalar_vectorized(E[:, 1, 0],
846 _prod_vectorized(self.M1, d2V))
847 prod += _scalar_vectorized(E[:, 2, 0],
848 _prod_vectorized(self.M2, d2V))
849 d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
850 return d2sdksi2
852 def get_bending_matrices(self, J, ecc):
853 """
854 Parameters
855 ----------
856 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
857 triangle first apex)
858 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
859 eccentricities
861 Returns
862 -------
863 Returns the element K matrices for bending energy expressed in
864 GLOBAL nodal coordinates.
865 K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
866 tri_J is needed to rotate dofs from local basis to global basis
867 """
868 n = np.size(ecc, 0)
870 # 1) matrix to rotate dofs in global coordinates
871 J1 = _prod_vectorized(self.J0_to_J1, J)
872 J2 = _prod_vectorized(self.J0_to_J2, J)
873 DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
874 DOF_rot[:, 0, 0] = 1
875 DOF_rot[:, 3, 3] = 1
876 DOF_rot[:, 6, 6] = 1
877 DOF_rot[:, 1:3, 1:3] = J
878 DOF_rot[:, 4:6, 4:6] = J1
879 DOF_rot[:, 7:9, 7:9] = J2
881 # 2) matrix to rotate Hessian in global coordinates.
882 H_rot, area = self.get_Hrot_from_J(J, return_area=True)
884 # 3) Computes stiffness matrix
885 # Gauss quadrature.
886 K = np.zeros([n, 9, 9], dtype=np.float64)
887 weights = self.gauss_w
888 pts = self.gauss_pts
889 for igauss in range(self.n_gauss):
890 alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
891 alpha = np.expand_dims(alpha, 2)
892 weight = weights[igauss]
893 d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
894 d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot)
895 K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E),
896 _transpose_vectorized(d2Skdx2))
898 # 4) With nodal (not elem) dofs
899 K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot),
900 K), DOF_rot)
902 # 5) Need the area to compute total element energy
903 return _scalar_vectorized(area, K)
905 def get_Hrot_from_J(self, J, return_area=False):
906 """
907 Parameters
908 ----------
909 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
910 triangle first apex)
912 Returns
913 -------
914 Returns H_rot used to rotate Hessian from local basis of first apex,
915 to global coordinates.
916 if *return_area* is True, returns also the triangle area (0.5*det(J))
917 """
918 # Here we try to deal with the simplest colinear cases; a null
919 # energy and area is imposed.
920 J_inv = _safe_inv22_vectorized(J)
921 Ji00 = J_inv[:, 0, 0]
922 Ji11 = J_inv[:, 1, 1]
923 Ji10 = J_inv[:, 1, 0]
924 Ji01 = J_inv[:, 0, 1]
925 H_rot = _to_matrix_vectorized([
926 [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
927 [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
928 [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
929 if not return_area:
930 return H_rot
931 else:
932 area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
933 return H_rot, area
935 def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
936 """
937 Builds K and F for the following elliptic formulation:
938 minimization of curvature energy with value of function at node
939 imposed and derivatives 'free'.
940 Builds the global Kff matrix in cco format.
941 Builds the full Ff vec Ff = - Kfc x Uc
943 Parameters
944 ----------
945 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
946 triangle first apex)
947 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
948 eccentricities
949 *triangles* is a (N x 3) array of nodes indexes.
950 *Uc* is (N x 3) array of imposed displacements at nodes
952 Returns
953 -------
954 (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
955 (row, col) entries must be summed.
956 Ff: force vector - dim npts * 3
957 """
958 ntri = np.size(ecc, 0)
959 vec_range = np.arange(ntri, dtype=np.int32)
960 c_indices = np.full(ntri, -1, dtype=np.int32) # for unused dofs, -1
961 f_dof = [1, 2, 4, 5, 7, 8]
962 c_dof = [0, 3, 6]
964 # vals, rows and cols indices in global dof numbering
965 f_dof_indices = _to_matrix_vectorized([[
966 c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
967 c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
968 c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
970 expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
971 f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices),
972 _transpose_vectorized(expand_indices))
973 f_col_indices = _prod_vectorized(expand_indices, f_dof_indices)
974 K_elem = self.get_bending_matrices(J, ecc)
976 # Extracting sub-matrices
977 # Explanation & notations:
978 # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
979 # * Subscript c denotes 'condensated' (imposed) degrees of freedom
980 # (i.e. z at all nodes)
981 # * F = [Ff, Fc] is the force vector
982 # * U = [Uf, Uc] is the imposed dof vector
983 # [ Kff Kfc ]
984 # * K = [ ] is the laplacian stiffness matrix
985 # [ Kcf Kff ]
986 # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
988 # Computing Kff stiffness matrix in sparse coo format
989 Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
990 Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
991 Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
993 # Computing Ff force vector in sparse coo format
994 Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
995 Uc_elem = np.expand_dims(Uc, axis=2)
996 Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0]
997 Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
999 # Extracting Ff force vector in dense format
1000 # We have to sum duplicate indices - using bincount
1001 Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
1002 return Kff_rows, Kff_cols, Kff_vals, Ff
1005# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
1006# _DOF_estimator_min_E
1007# Private classes used to compute the degree of freedom of each triangular
1008# element for the TriCubicInterpolator.
1009class _DOF_estimator:
1010 """
1011 Abstract base class for classes used to perform estimation of a function
1012 first derivatives, and deduce the dofs for a CubicTriInterpolator using a
1013 reduced HCT element formulation.
1014 Derived classes implement compute_df(self,**kwargs), returning
1015 np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2
1016 gradient coordinates.
1017 """
1018 def __init__(self, interpolator, **kwargs):
1019 cbook._check_isinstance(
1020 CubicTriInterpolator, interpolator=interpolator)
1021 self._pts = interpolator._pts
1022 self._tris_pts = interpolator._tris_pts
1023 self.z = interpolator._z
1024 self._triangles = interpolator._triangles
1025 (self._unit_x, self._unit_y) = (interpolator._unit_x,
1026 interpolator._unit_y)
1027 self.dz = self.compute_dz(**kwargs)
1028 self.compute_dof_from_df()
1030 def compute_dz(self, **kwargs):
1031 raise NotImplementedError
1033 def compute_dof_from_df(self):
1034 """
1035 Computes reduced-HCT elements degrees of freedom, knowing the
1036 gradient.
1037 """
1038 J = CubicTriInterpolator._get_jacobian(self._tris_pts)
1039 tri_z = self.z[self._triangles]
1040 tri_dz = self.dz[self._triangles]
1041 tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
1042 return tri_dof
1044 @staticmethod
1045 def get_dof_vec(tri_z, tri_dz, J):
1046 """
1047 Computes the dof vector of a triangle, knowing the value of f, df and
1048 of the local Jacobian at each node.
1050 *tri_z*: array of shape (3,) of f nodal values
1051 *tri_dz*: array of shape (3, 2) of df/dx, df/dy nodal values
1052 *J*: Jacobian matrix in local basis of apex 0
1054 Returns dof array of shape (9,) so that for each apex iapex:
1055 dof[iapex*3+0] = f(Ai)
1056 dof[iapex*3+1] = df(Ai).(AiAi+)
1057 dof[iapex*3+2] = df(Ai).(AiAi-)]
1058 """
1059 npt = tri_z.shape[0]
1060 dof = np.zeros([npt, 9], dtype=np.float64)
1061 J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J)
1062 J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J)
1064 col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=2))
1065 col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=2))
1066 col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=2))
1068 dfdksi = _to_matrix_vectorized([
1069 [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
1070 [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
1071 dof[:, 0:7:3] = tri_z
1072 dof[:, 1:8:3] = dfdksi[:, 0]
1073 dof[:, 2:9:3] = dfdksi[:, 1]
1074 return dof
1077class _DOF_estimator_user(_DOF_estimator):
1078 """dz is imposed by user; accounts for scaling if any."""
1080 def compute_dz(self, dz):
1081 (dzdx, dzdy) = dz
1082 dzdx = dzdx * self._unit_x
1083 dzdy = dzdy * self._unit_y
1084 return np.vstack([dzdx, dzdy]).T
1087class _DOF_estimator_geom(_DOF_estimator):
1088 """Fast 'geometric' approximation, recommended for large arrays."""
1090 def compute_dz(self):
1091 """
1092 self.df is computed as weighted average of _triangles sharing a common
1093 node. On each triangle itri f is first assumed linear (= ~f), which
1094 allows to compute d~f[itri]
1095 Then the following approximation of df nodal values is then proposed:
1096 f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
1097 The weighted coeff. w[itri] are proportional to the angle of the
1098 triangle itri at apex ipt
1099 """
1100 el_geom_w = self.compute_geom_weights()
1101 el_geom_grad = self.compute_geom_grads()
1103 # Sum of weights coeffs
1104 w_node_sum = np.bincount(np.ravel(self._triangles),
1105 weights=np.ravel(el_geom_w))
1107 # Sum of weighted df = (dfx, dfy)
1108 dfx_el_w = np.empty_like(el_geom_w)
1109 dfy_el_w = np.empty_like(el_geom_w)
1110 for iapex in range(3):
1111 dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
1112 dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
1113 dfx_node_sum = np.bincount(np.ravel(self._triangles),
1114 weights=np.ravel(dfx_el_w))
1115 dfy_node_sum = np.bincount(np.ravel(self._triangles),
1116 weights=np.ravel(dfy_el_w))
1118 # Estimation of df
1119 dfx_estim = dfx_node_sum/w_node_sum
1120 dfy_estim = dfy_node_sum/w_node_sum
1121 return np.vstack([dfx_estim, dfy_estim]).T
1123 def compute_geom_weights(self):
1124 """
1125 Builds the (nelems x 3) weights coeffs of _triangles angles,
1126 renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
1127 """
1128 weights = np.zeros([np.size(self._triangles, 0), 3])
1129 tris_pts = self._tris_pts
1130 for ipt in range(3):
1131 p0 = tris_pts[:, (ipt) % 3, :]
1132 p1 = tris_pts[:, (ipt+1) % 3, :]
1133 p2 = tris_pts[:, (ipt-1) % 3, :]
1134 alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
1135 alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
1136 # In the below formula we could take modulo 2. but
1137 # modulo 1. is safer regarding round-off errors (flat triangles).
1138 angle = np.abs(((alpha2-alpha1) / np.pi) % 1)
1139 # Weight proportional to angle up np.pi/2; null weight for
1140 # degenerated cases 0 and np.pi (note that *angle* is normalized
1141 # by np.pi).
1142 weights[:, ipt] = 0.5 - np.abs(angle-0.5)
1143 return weights
1145 def compute_geom_grads(self):
1146 """
1147 Compute the (global) gradient component of f assumed linear (~f).
1148 returns array df of shape (nelems, 2)
1149 df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
1150 """
1151 tris_pts = self._tris_pts
1152 tris_f = self.z[self._triangles]
1154 dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
1155 dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
1156 dM = np.dstack([dM1, dM2])
1157 # Here we try to deal with the simplest colinear cases: a null
1158 # gradient is assumed in this case.
1159 dM_inv = _safe_inv22_vectorized(dM)
1161 dZ1 = tris_f[:, 1] - tris_f[:, 0]
1162 dZ2 = tris_f[:, 2] - tris_f[:, 0]
1163 dZ = np.vstack([dZ1, dZ2]).T
1164 df = np.empty_like(dZ)
1166 # With np.einsum : could be ej,eji -> ej
1167 df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
1168 df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
1169 return df
1172class _DOF_estimator_min_E(_DOF_estimator_geom):
1173 """
1174 The 'smoothest' approximation, df is computed through global minimization
1175 of the bending energy:
1176 E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
1177 """
1178 def __init__(self, Interpolator):
1179 self._eccs = Interpolator._eccs
1180 _DOF_estimator_geom.__init__(self, Interpolator)
1182 def compute_dz(self):
1183 """
1184 Elliptic solver for bending energy minimization.
1185 Uses a dedicated 'toy' sparse Jacobi PCG solver.
1186 """
1187 # Initial guess for iterative PCG solver.
1188 dz_init = _DOF_estimator_geom.compute_dz(self)
1189 Uf0 = np.ravel(dz_init)
1191 reference_element = _ReducedHCT_Element()
1192 J = CubicTriInterpolator._get_jacobian(self._tris_pts)
1193 eccs = self._eccs
1194 triangles = self._triangles
1195 Uc = self.z[self._triangles]
1197 # Building stiffness matrix and force vector in coo format
1198 Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
1199 J, eccs, triangles, Uc)
1201 # Building sparse matrix and solving minimization problem
1202 # We could use scipy.sparse direct solver; however to avoid this
1203 # external dependency an implementation of a simple PCG solver with
1204 # a simple diagonal Jacobi preconditioner is implemented.
1205 tol = 1.e-10
1206 n_dof = Ff.shape[0]
1207 Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
1208 shape=(n_dof, n_dof))
1209 Kff_coo.compress_csc()
1210 Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
1211 # If the PCG did not converge, we return the best guess between Uf0
1212 # and Uf.
1213 err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
1214 if err0 < err:
1215 # Maybe a good occasion to raise a warning here ?
1216 cbook._warn_external("In TriCubicInterpolator initialization, "
1217 "PCG sparse solver did not converge after "
1218 "1000 iterations. `geom` approximation is "
1219 "used instead of `min_E`")
1220 Uf = Uf0
1222 # Building dz from Uf
1223 dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
1224 dz[:, 0] = Uf[::2]
1225 dz[:, 1] = Uf[1::2]
1226 return dz
1229# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
1230# a PCG sparse solver for (symmetric) elliptic problems.
1231class _Sparse_Matrix_coo:
1232 def __init__(self, vals, rows, cols, shape):
1233 """
1234 Creates a sparse matrix in coo format
1235 *vals*: arrays of values of non-null entries of the matrix
1236 *rows*: int arrays of rows of non-null entries of the matrix
1237 *cols*: int arrays of cols of non-null entries of the matrix
1238 *shape*: 2-tuple (n, m) of matrix shape
1240 """
1241 self.n, self.m = shape
1242 self.vals = np.asarray(vals, dtype=np.float64)
1243 self.rows = np.asarray(rows, dtype=np.int32)
1244 self.cols = np.asarray(cols, dtype=np.int32)
1246 def dot(self, V):
1247 """
1248 Dot product of self by a vector *V* in sparse-dense to dense format
1249 *V* dense vector of shape (self.m,)
1250 """
1251 assert V.shape == (self.m,)
1252 return np.bincount(self.rows,
1253 weights=self.vals*V[self.cols],
1254 minlength=self.m)
1256 def compress_csc(self):
1257 """
1258 Compress rows, cols, vals / summing duplicates. Sort for csc format.
1259 """
1260 _, unique, indices = np.unique(
1261 self.rows + self.n*self.cols,
1262 return_index=True, return_inverse=True)
1263 self.rows = self.rows[unique]
1264 self.cols = self.cols[unique]
1265 self.vals = np.bincount(indices, weights=self.vals)
1267 def compress_csr(self):
1268 """
1269 Compress rows, cols, vals / summing duplicates. Sort for csr format.
1270 """
1271 _, unique, indices = np.unique(
1272 self.m*self.rows + self.cols,
1273 return_index=True, return_inverse=True)
1274 self.rows = self.rows[unique]
1275 self.cols = self.cols[unique]
1276 self.vals = np.bincount(indices, weights=self.vals)
1278 def to_dense(self):
1279 """
1280 Returns a dense matrix representing self.
1281 Mainly for debugging purposes.
1282 """
1283 ret = np.zeros([self.n, self.m], dtype=np.float64)
1284 nvals = self.vals.size
1285 for i in range(nvals):
1286 ret[self.rows[i], self.cols[i]] += self.vals[i]
1287 return ret
1289 def __str__(self):
1290 return self.to_dense().__str__()
1292 @property
1293 def diag(self):
1294 """
1295 Returns the (dense) vector of the diagonal elements.
1296 """
1297 in_diag = (self.rows == self.cols)
1298 diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
1299 diag[self.rows[in_diag]] = self.vals[in_diag]
1300 return diag
1303def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
1304 """
1305 Use Preconditioned Conjugate Gradient iteration to solve A x = b
1306 A simple Jacobi (diagonal) preconditionner is used.
1308 Parameters
1309 ----------
1310 A : _Sparse_Matrix_coo
1311 *A* must have been compressed before by compress_csc or
1312 compress_csr method.
1314 b : array
1315 Right hand side of the linear system.
1317 Returns
1318 -------
1319 x : array
1320 The converged solution.
1321 err : float
1322 The absolute error np.linalg.norm(A.dot(x) - b)
1324 Other parameters
1325 ----------------
1326 x0 : array
1327 Starting guess for the solution.
1328 tol : float
1329 Tolerance to achieve. The algorithm terminates when the relative
1330 residual is below tol.
1331 maxiter : integer
1332 Maximum number of iterations. Iteration will stop
1333 after maxiter steps even if the specified tolerance has not
1334 been achieved.
1335 """
1336 n = b.size
1337 assert A.n == n
1338 assert A.m == n
1339 b_norm = np.linalg.norm(b)
1341 # Jacobi pre-conditioner
1342 kvec = A.diag
1343 # For diag elem < 1e-6 we keep 1e-6.
1344 kvec = np.maximum(kvec, 1e-6)
1346 # Initial guess
1347 if x0 is None:
1348 x = np.zeros(n)
1349 else:
1350 x = x0
1352 r = b - A.dot(x)
1353 w = r/kvec
1355 p = np.zeros(n)
1356 beta = 0.0
1357 rho = np.dot(r, w)
1358 k = 0
1360 # Following C. T. Kelley
1361 while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
1362 p = w + beta*p
1363 z = A.dot(p)
1364 alpha = rho/np.dot(p, z)
1365 r = r - alpha*z
1366 w = r/kvec
1367 rhoold = rho
1368 rho = np.dot(r, w)
1369 x = x + alpha*p
1370 beta = rho/rhoold
1371 #err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
1372 k += 1
1373 err = np.linalg.norm(A.dot(x) - b)
1374 return x, err
1377# The following private functions:
1378# :func:`_safe_inv22_vectorized`
1379# :func:`_pseudo_inv22sym_vectorized`
1380# :func:`_prod_vectorized`
1381# :func:`_scalar_vectorized`
1382# :func:`_transpose_vectorized`
1383# :func:`_roll_vectorized`
1384# :func:`_to_matrix_vectorized`
1385# :func:`_extract_submatrices`
1386# provide fast numpy implementation of some standard operations on arrays of
1387# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
1389# Development note: Dealing with pathologic 'flat' triangles in the
1390# CubicTriInterpolator code and impact on (2, 2)-matrix inversion functions
1391# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
1392#
1393# Goals:
1394# 1) The CubicTriInterpolator should be able to handle flat or almost flat
1395# triangles without raising an error,
1396# 2) These degenerated triangles should have no impact on the automatic dof
1397# calculation (associated with null weight for the _DOF_estimator_geom and
1398# with null energy for the _DOF_estimator_min_E),
1399# 3) Linear patch test should be passed exactly on degenerated meshes,
1400# 4) Interpolation (with :meth:`_interpolate_single_key` or
1401# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
1402# the pathologic triangles, to interact correctly with a TriRefiner class.
1403#
1404# Difficulties:
1405# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
1406# *metric* (the metric tensor = J x J.T). Computation of the local
1407# tangent plane is also problematic.
1408#
1409# Implementation:
1410# Most of the time, when computing the inverse of a rank-deficient matrix it
1411# is safe to simply return the null matrix (which is the implementation in
1412# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
1413# enforced by:
1414# - null area hence null energy in :class:`_DOF_estimator_min_E`
1415# - angles close or equal to 0 or np.pi hence null weight in
1416# :class:`_DOF_estimator_geom`.
1417# Note that the function angle -> weight is continuous and maximum for an
1418# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
1419# The exception is the computation of barycentric coordinates, which is done
1420# by inversion of the *metric* matrix. In this case, we need to compute a set
1421# of valid coordinates (1 among numerous possibilities), to ensure point 4).
1422# We benefit here from the symmetry of metric = J x J.T, which makes it easier
1423# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
1424def _safe_inv22_vectorized(M):
1425 """
1426 Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient
1427 matrices.
1429 *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
1430 """
1431 assert M.ndim == 3
1432 assert M.shape[-2:] == (2, 2)
1433 M_inv = np.empty_like(M)
1434 prod1 = M[:, 0, 0]*M[:, 1, 1]
1435 delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
1437 # We set delta_inv to 0. in case of a rank deficient matrix; a
1438 # rank-deficient input matrix *M* will lead to a null matrix in output
1439 rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
1440 if np.all(rank2):
1441 # Normal 'optimized' flow.
1442 delta_inv = 1./delta
1443 else:
1444 # 'Pathologic' flow.
1445 delta_inv = np.zeros(M.shape[0])
1446 delta_inv[rank2] = 1./delta[rank2]
1448 M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
1449 M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
1450 M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
1451 M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
1452 return M_inv
1455def _pseudo_inv22sym_vectorized(M):
1456 """
1457 Inversion of arrays of (2, 2) SYMMETRIC matrices; returns the
1458 (Moore-Penrose) pseudo-inverse for rank-deficient matrices.
1460 In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
1461 projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
1462 In case M is of rank 0, we return the null matrix.
1464 *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
1465 """
1466 assert M.ndim == 3
1467 assert M.shape[-2:] == (2, 2)
1468 M_inv = np.empty_like(M)
1469 prod1 = M[:, 0, 0]*M[:, 1, 1]
1470 delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
1471 rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
1473 if np.all(rank2):
1474 # Normal 'optimized' flow.
1475 M_inv[:, 0, 0] = M[:, 1, 1] / delta
1476 M_inv[:, 0, 1] = -M[:, 0, 1] / delta
1477 M_inv[:, 1, 0] = -M[:, 1, 0] / delta
1478 M_inv[:, 1, 1] = M[:, 0, 0] / delta
1479 else:
1480 # 'Pathologic' flow.
1481 # Here we have to deal with 2 sub-cases
1482 # 1) First sub-case: matrices of rank 2:
1483 delta = delta[rank2]
1484 M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
1485 M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
1486 M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
1487 M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
1488 # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
1489 rank01 = ~rank2
1490 tr = M[rank01, 0, 0] + M[rank01, 1, 1]
1491 tr_zeros = (np.abs(tr) < 1.e-8)
1492 sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
1493 #sq_tr_inv = 1. / tr**2
1494 M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
1495 M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
1496 M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
1497 M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
1499 return M_inv
1502def _prod_vectorized(M1, M2):
1503 """
1504 Matrix product between arrays of matrices, or a matrix and an array of
1505 matrices (*M1* and *M2*)
1506 """
1507 sh1 = M1.shape
1508 sh2 = M2.shape
1509 assert len(sh1) >= 2
1510 assert len(sh2) >= 2
1511 assert sh1[-1] == sh2[-2]
1513 ndim1 = len(sh1)
1514 t1_index = [*range(ndim1-2), ndim1-1, ndim1-2]
1515 return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *
1516 M2[..., np.newaxis, :], -3)
1519def _scalar_vectorized(scalar, M):
1520 """
1521 Scalar product between scalars and matrices.
1522 """
1523 return scalar[:, np.newaxis, np.newaxis]*M
1526def _transpose_vectorized(M):
1527 """
1528 Transposition of an array of matrices *M*.
1529 """
1530 return np.transpose(M, [0, 2, 1])
1533def _roll_vectorized(M, roll_indices, axis):
1534 """
1535 Rolls an array of matrices along an axis according to an array of indices
1536 *roll_indices*
1537 *axis* can be either 0 (rolls rows) or 1 (rolls columns).
1538 """
1539 assert axis in [0, 1]
1540 ndim = M.ndim
1541 assert ndim == 3
1542 ndim_roll = roll_indices.ndim
1543 assert ndim_roll == 1
1544 sh = M.shape
1545 r, c = sh[-2:]
1546 assert sh[0] == roll_indices.shape[0]
1547 vec_indices = np.arange(sh[0], dtype=np.int32)
1549 # Builds the rolled matrix
1550 M_roll = np.empty_like(M)
1551 if axis == 0:
1552 for ir in range(r):
1553 for ic in range(c):
1554 M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
1555 elif axis == 1:
1556 for ir in range(r):
1557 for ic in range(c):
1558 M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
1559 return M_roll
1562def _to_matrix_vectorized(M):
1563 """
1564 Builds an array of matrices from individuals np.arrays of identical
1565 shapes.
1566 *M*: ncols-list of nrows-lists of shape sh.
1568 Returns M_res np.array of shape (sh, nrow, ncols) so that:
1569 M_res[..., i, j] = M[i][j]
1570 """
1571 assert isinstance(M, (tuple, list))
1572 assert all(isinstance(item, (tuple, list)) for item in M)
1573 c_vec = np.asarray([len(item) for item in M])
1574 assert np.all(c_vec-c_vec[0] == 0)
1575 r = len(M)
1576 c = c_vec[0]
1577 M00 = np.asarray(M[0][0])
1578 dt = M00.dtype
1579 sh = [M00.shape[0], r, c]
1580 M_ret = np.empty(sh, dtype=dt)
1581 for irow in range(r):
1582 for icol in range(c):
1583 M_ret[:, irow, icol] = np.asarray(M[irow][icol])
1584 return M_ret
1587def _extract_submatrices(M, block_indices, block_size, axis):
1588 """
1589 Extracts selected blocks of a matrices *M* depending on parameters
1590 *block_indices* and *block_size*.
1592 Returns the array of extracted matrices *Mres* so that:
1593 M_res[...,ir,:] = M[(block_indices*block_size+ir), :]
1594 """
1595 assert block_indices.ndim == 1
1596 assert axis in [0, 1]
1598 r, c = M.shape
1599 if axis == 0:
1600 sh = [block_indices.shape[0], block_size, c]
1601 elif axis == 1:
1602 sh = [block_indices.shape[0], r, block_size]
1604 dt = M.dtype
1605 M_res = np.empty(sh, dtype=dt)
1606 if axis == 0:
1607 for ir in range(block_size):
1608 M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
1609 elif axis == 1:
1610 for ic in range(block_size):
1611 M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
1613 return M_res