Coverage for kwave/kWaveSimulation_helper/create_storage_variables.py: 7%
206 statements
« prev ^ index » next coverage.py v6.5.0, created at 2022-10-24 11:52 -0700
« prev ^ index » next coverage.py v6.5.0, created at 2022-10-24 11:52 -0700
1from numpy.fft import ifftshift
3from kwave import SimulationOptions, kWaveGrid
4from kwave.data import Array
5from kwave.utils import dotdict
6import numpy as np
8# Note from Farid: This function/file is very suspicios. I'm pretty sure that the implementation is not correct.
9# Full test-coverage is required for bug-fixes!
11def create_storage_variables(
12 kgrid: kWaveGrid, sensor, opt: SimulationOptions,
13 values: dotdict, flags: dotdict
14):
15 # =========================================================================
16 # PREPARE DATA MASKS AND STORAGE VARIABLES
17 # =========================================================================
19 record = None # self.record # ???
21 set_flags(flags, values.sensor_x, sensor.mask, opt.cartesian_interp)
23 # preallocate output variables
24 if flags.time_rev:
25 return flags
27 num_sensor_points = get_num_of_sensor_points(flags.blank_sensor, flags.binary_sensor_mask, kgrid.k,
28 values.sensor_mask_index, values.sensor_x)
30 num_recorded_time_points, stream_data_index = \
31 get_num_recorded_time_points(kgrid.dim, kgrid.Nt, opt.stream_to_disk, sensor.record_start_index)
33 create_shift_operators(record, values.record, kgrid, opt.use_sg)
35 create_normalized_wavenumber_vectors(record, kgrid, flags.record_u_split_field)
37 pml_size = [opt.pml_x_size, opt.pml_y_size, opt.pml_z_size]
38 pml_size = Array(pml_size[:kgrid.dim])
39 all_vars_size = calculate_all_vars_size(kgrid, opt.pml_inside, pml_size)
41 sensor_data = create_sensor_variables(values.record, kgrid, num_sensor_points, num_recorded_time_points, all_vars_size)
43 create_transducer_buffer(values.transducer_sensor, values.transducer_receive_elevation_focus, sensor,
44 num_sensor_points, num_recorded_time_points, values.sensor_data_buffer_size,
45 flags, sensor_data)
47 compute_triangulation_points(flags, kgrid, record)
49 return flags
52def set_flags(flags, sensor_x, sensor_mask, is_cartesian_interp):
53 # check sensor mask based on the Cartesian interpolation setting
54 if not flags.binary_sensor_mask and is_cartesian_interp == 'nearest':
56 # extract the data using the binary sensor mask created in
57 # inputChecking, but switch on Cartesian reorder flag so that the
58 # final data is returned in the correct order (not in time
59 # reversal mode).
60 flags.binary_sensor_mask = True
61 if not flags.time_rev:
62 flags.reorder_data = True
64 # check if any duplicate points have been discarded in the
65 # conversion from a Cartesian to binary mask
66 num_discarded_points = len(sensor_x) - sensor_mask.sum()
67 if num_discarded_points != 0:
68 print(f' WARNING: {num_discarded_points} duplicated sensor points discarded (nearest neighbour interpolation)')
72def get_num_of_sensor_points(is_blank_sensor, is_binary_sensor_mask, kgrid_k, sensor_mask_index, sensor_x):
73 if is_blank_sensor:
74 num_sensor_points = kgrid_k.size
75 elif is_binary_sensor_mask:
76 num_sensor_points = len(sensor_mask_index)
77 else:
78 num_sensor_points = len(sensor_x)
79 return num_sensor_points
82def get_num_recorded_time_points(kgrid_dim, Nt, stream_to_disk, record_start_index):
83 """
84 calculate the number of time points that are stored
85 - if streaming data to disk, reduce to the size of the
86 sensor_data matrix based on the value of self.options.stream_to_disk
87 - if a user input for sensor.record_start_index is given, reduce
88 the size of the sensor_data matrix based on the value given
89 Args:
90 kgrid_dim:
91 Nt:
92 stream_to_disk:
93 record_start_index:
95 Returns:
97 """
98 if kgrid_dim == 3 and stream_to_disk:
100 # set the number of points
101 num_recorded_time_points = stream_to_disk
103 # initialise the file index variable
104 stream_data_index = 1
106 else:
107 num_recorded_time_points = Nt - record_start_index + 1
108 stream_data_index = None # ???
110 return num_recorded_time_points, stream_data_index
113def create_shift_operators(record: dotdict, record_old: dotdict, kgrid: kWaveGrid, is_use_sg):
114 # create shift operators used for calculating the components of the
115 # particle velocity field on the non-staggered grids (these are used
116 # for both binary and cartesian sensor masks)
117 if (record_old.u_non_staggered or record_old.u_split_field or record_old.I or record_old.I_avg):
118 if is_use_sg:
119 if kgrid.dim == 1:
120 record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2))
121 elif kgrid.dim == 2:
122 record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2))
123 record.y_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.y * kgrid.dy / 2) ).T
124 elif kgrid.dim == 3:
125 record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2))
126 record.y_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.y * kgrid.dy / 2) ).T
127 record.z_shift_neg = np.transpose(ifftshift( np.exp(-1j*kgrid.k_vec.z*kgrid.dz/2) ), [1, 2, 0])
128 else:
129 if kgrid.dim == 1:
130 record.x_shift_neg = 1
131 elif kgrid.dim == 2:
132 record.x_shift_neg = 1
133 record.y_shift_neg = 1
134 elif kgrid.dim == 3:
135 record.x_shift_neg = 1
136 record.y_shift_neg = 1
137 record.z_shift_neg = 1
140def create_normalized_wavenumber_vectors(record: dotdict, kgrid: kWaveGrid, is_record_u_split_field):
141 # create normalised wavenumber vectors for k-space dyadics used to
142 # split the particule velocity into compressional and shear components
143 if not is_record_u_split_field:
144 return
146 # x-dimension
147 record.kx_norm = kgrid.kx / kgrid.k
148 record.kx_norm[kgrid.k == 0] = 0
149 record.kx_norm = ifftshift(record.kx_norm)
151 # y-dimension
152 record.ky_norm = kgrid.ky / kgrid.k
153 record.ky_norm[kgrid.k == 0] = 0
154 record.ky_norm = ifftshift(record.ky_norm)
156 # z-dimension
157 if kgrid.dim == 3:
158 record.kz_norm = kgrid.kz / kgrid.k
159 record.kz_norm[kgrid.k == 0] = 0
160 record.kz_norm = ifftshift(record.kz_norm)
163def create_sensor_variables(record_old: dotdict, kgrid, num_sensor_points, num_recorded_time_points, all_vars_size):
164 # create storage and scaling variables - all variables are saved as
165 # fields of a structure called sensor_data
167 # allocate empty sensor structure
168 sensor_data = dotdict()
170 # if only p is being stored (i.e., if no user input is given for
171 # sensor.record), then sensor_data.p is copied to sensor_data at the
172 # end of the simulation
174 # time history of the acoustic pressure
175 if record_old.p or record_old.I or record_old.I_avg:
176 sensor_data.p = np.zeros([num_sensor_points, num_recorded_time_points])
178 # maximum pressure
179 if record_old.p_max:
180 sensor_data.p_max = np.zeros([num_sensor_points, 1])
182 # minimum pressure
183 if record_old.p_min:
184 sensor_data.p_min = np.zeros([num_sensor_points, 1])
186 # rms pressure
187 if record_old.p_rms:
188 sensor_data.p_rms = np.zeros([num_sensor_points, 1])
190 # maximum pressure over all grid points
191 if record_old.p_max_all:
192 sensor_data.p_max_all = np.zeros(all_vars_size)
194 # minimum pressure over all grid points
195 if record_old.p_min_all:
196 sensor_data.p_min_all = np.zeros(all_vars_size)
198 # time history of the acoustic particle velocity
199 if record_old.u:
200 # pre-allocate the velocity fields based on the number of dimensions in the simulation
201 if kgrid.dim == 1:
202 sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points])
203 elif kgrid.dim == 2:
204 sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points])
205 sensor_data.uy = np.zeros([num_sensor_points, num_recorded_time_points])
206 elif kgrid.dim == 3:
207 sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points])
208 sensor_data.uy = np.zeros([num_sensor_points, num_recorded_time_points])
209 sensor_data.uz = np.zeros([num_sensor_points, num_recorded_time_points])
211 # maximum particle velocity
212 if record_old.u_max:
214 # pre-allocate the velocity fields based on the number of
215 # dimensions in the simulation
216 if kgrid.dim == 1:
217 sensor_data.ux_max = np.zeros([num_sensor_points, 1])
218 if kgrid.dim == 2:
219 sensor_data.ux_max = np.zeros([num_sensor_points, 1])
220 sensor_data.uy_max = np.zeros([num_sensor_points, 1])
221 if kgrid.dim == 3:
222 sensor_data.ux_max = np.zeros([num_sensor_points, 1])
223 sensor_data.uy_max = np.zeros([num_sensor_points, 1])
224 sensor_data.uz_max = np.zeros([num_sensor_points, 1])
226 # minimum particle velocity
227 if record_old.u_min:
228 # pre-allocate the velocity fields based on the number of
229 # dimensions in the simulation
231 if kgrid.dim == 1:
232 sensor_data.ux_min = np.zeros([num_sensor_points, 1])
233 if kgrid.dim == 2:
234 sensor_data.ux_min = np.zeros([num_sensor_points, 1])
235 sensor_data.uy_min = np.zeros([num_sensor_points, 1])
236 if kgrid.dim == 3:
237 sensor_data.ux_min = np.zeros([num_sensor_points, 1])
238 sensor_data.uy_min = np.zeros([num_sensor_points, 1])
239 sensor_data.uz_min = np.zeros([num_sensor_points, 1])
241 # rms particle velocity
242 if record_old.u_rms:
243 # pre-allocate the velocity fields based on the number of dimensions in the simulation
245 if kgrid.dim == 1:
246 sensor_data.ux_rms = np.zeros([num_sensor_points, 1])
247 if kgrid.dim == 2:
248 sensor_data.ux_rms = np.zeros([num_sensor_points, 1])
249 sensor_data.uy_rms = np.zeros([num_sensor_points, 1])
250 if kgrid.dim == 3:
251 sensor_data.ux_rms = np.zeros([num_sensor_points, 1])
252 sensor_data.uy_rms = np.zeros([num_sensor_points, 1])
253 sensor_data.uz_rms = np.zeros([num_sensor_points, 1])
255 # maximum particle velocity over all grid points
256 if record_old.u_max_all:
258 # pre-allocate the velocity fields based on the number of dimensions in the simulation
259 if kgrid.dim == 1:
260 sensor_data.ux_max_all = np.zeros(all_vars_size)
261 if kgrid.dim == 2:
262 sensor_data.ux_max_all = np.zeros(all_vars_size)
263 sensor_data.uy_max_all = np.zeros(all_vars_size)
264 if kgrid.dim == 3:
265 sensor_data.ux_max_all = np.zeros(all_vars_size)
266 sensor_data.uy_max_all = np.zeros(all_vars_size)
267 sensor_data.uz_max_all = np.zeros(all_vars_size)
269 # minimum particle velocity over all grid points
270 if record_old.u_min_all:
272 # pre-allocate the velocity fields based on the number of dimensions in the simulation
273 if kgrid.dim == 1:
274 sensor_data.ux_min_all = np.zeros(all_vars_size)
275 if kgrid.dim == 2:
276 sensor_data.ux_min_all = np.zeros(all_vars_size)
277 sensor_data.uy_min_all = np.zeros(all_vars_size)
278 if kgrid.dim == 3:
279 sensor_data.ux_min_all = np.zeros(all_vars_size)
280 sensor_data.uy_min_all = np.zeros(all_vars_size)
281 sensor_data.uz_min_all = np.zeros(all_vars_size)
283 # time history of the acoustic particle velocity on the
284 # non-staggered grid points
285 if record_old.u_non_staggered or record_old.I or record_old.I_avg:
287 # pre-allocate the velocity fields based on the number of dimensions in the simulation
288 if kgrid.dim == 1:
289 sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points])
290 if kgrid.dim == 2:
291 sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points])
292 sensor_data.uy_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points])
293 if kgrid.dim == 3:
294 sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points])
295 sensor_data.uy_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points])
296 sensor_data.uz_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points])
298 # time history of the acoustic particle velocity split into
299 # compressional and shear components
300 if record_old.u_split_field:
302 # pre-allocate the velocity fields based on the number of dimensions in the simulation
303 if kgrid.dim == 2:
304 sensor_data.ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points])
305 sensor_data.ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points])
306 sensor_data.uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points])
307 sensor_data.uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points])
308 if kgrid.dim == 3:
309 sensor_data.ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points])
310 sensor_data.ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points])
311 sensor_data.uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points])
312 sensor_data.uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points])
313 sensor_data.uz_split_p = np.zeros([num_sensor_points, num_recorded_time_points])
314 sensor_data.uz_split_s = np.zeros([num_sensor_points, num_recorded_time_points])
316 return sensor_data
319def create_transducer_buffer(is_transducer_sensor, is_transducer_receive_elevation_focus, sensor,
320 num_sensor_points, num_recorded_time_points, sensor_data_buffer_size, flags, sensor_data):
321 # object of the kWaveTransducer class is being used as a sensor
322 if is_transducer_sensor:
323 if is_transducer_receive_elevation_focus:
325 # if there is elevation focusing, a buffer is
326 # needed to store a short time history at each
327 # sensor point before averaging
328 # ???
329 sensor_data_buffer_size = sensor.elevation_beamforming_delays.max() + 1
330 if sensor_data_buffer_size > 1:
331 sensor_data_buffer = np.zeros([num_sensor_points, sensor_data_buffer_size])
332 else:
333 del sensor_data_buffer_size
334 flags.transducer_receive_elevation_focus = False
336 # the grid points can be summed on the fly and so the
337 # sensor is the size of the number of active elements
338 sensor_data.transducer = np.zeros([int(sensor.number_active_elements), num_recorded_time_points])
341def compute_triangulation_points(flags, kgrid, record):
342 # precomputate the triangulation points if a Cartesian sensor mask
343 # is used with linear interpolation (tri and bc are the Delaunay
344 # triangulation and Barycentric coordinates)
345 if not flags.binary_sensor_mask:
346 if kgrid.dim == 1:
348 # assign pseudonym for Cartesain grid points in 1D (this
349 # is later used for data casting)
350 record.grid_x = kgrid.x_vec
352 else:
354 # update command line status
355 print(' calculating Delaunay triangulation...')
357 # compute triangulation
358 if kgrid.dim == 2:
359 if flags.axisymmetric:
360 record.tri, record.bc = gridDataFast2D(kgrid.x, kgrid.y - kgrid.y_vec.min(), sensor_x, sensor_y)
361 else:
362 record.tri, record.bc = gridDataFast2D(kgrid.x, kgrid.y, sensor_x, sensor_y)
363 elif kgrid.dim == 3:
364 record.tri, record.bc = gridDataFast3D(kgrid.x, kgrid.y, kgrid.z, sensor_x, sensor_y, sensor_z)
367def calculate_all_vars_size(kgrid, is_pml_inside, pml_size):
368 # calculate the size of the _all and _final output variables - if the
369 # PML is set to be outside the grid, these will be the same size as the
370 # user input, rather than the expanded grid
371 if is_pml_inside:
372 all_vars_size = kgrid.k.shape
373 else:
374 if kgrid.dim == 1:
375 all_vars_size = [kgrid.Nx - 2 * pml_size.x, 1]
376 elif kgrid.dim == 2:
377 all_vars_size = [kgrid.Nx - 2 * pml_size.x, kgrid.Ny - 2 * pml_size.y]
378 elif kgrid.dim == 3:
379 all_vars_size = [kgrid.Nx - 2 * pml_size.x, kgrid.Ny - 2 * pml_size.y, kgrid.Nz - 2 * pml_size.z]
380 else:
381 raise NotImplementedError
382 return all_vars_size