Coverage for C:\src\imod-python\imod\mf6\boundary_condition.py: 98%
217 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-08 13:27 +0200
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-08 13:27 +0200
1import abc
2import pathlib
3import warnings
4from copy import copy, deepcopy
5from typing import Mapping, Optional, Union
7import numpy as np
8import xarray as xr
9import xugrid as xu
11from imod.mf6.auxiliary_variables import (
12 expand_transient_auxiliary_variables,
13 get_variable_names,
14)
15from imod.mf6.package import Package
16from imod.mf6.utilities.package import get_repeat_stress
17from imod.mf6.write_context import WriteContext
18from imod.typing.grid import GridDataArray
21def _dis_recarr(arrdict, layer, notnull):
22 # Define the numpy structured array dtype
23 index_spec = [("layer", np.int32), ("row", np.int32), ("column", np.int32)]
24 field_spec = [(key, np.float64) for key in arrdict]
25 sparse_dtype = np.dtype(index_spec + field_spec)
26 # Initialize the structured array
27 nrow = notnull.sum()
28 recarr = np.empty(nrow, dtype=sparse_dtype)
29 # Fill in the indices
30 if notnull.ndim == 2:
31 recarr["row"], recarr["column"] = (np.argwhere(notnull) + 1).transpose()
32 recarr["layer"] = layer
33 else:
34 ilayer, irow, icolumn = np.argwhere(notnull).transpose()
35 recarr["row"] = irow + 1
36 recarr["column"] = icolumn + 1
37 recarr["layer"] = layer[ilayer]
38 return recarr
41def _disv_recarr(arrdict, layer, notnull):
42 # Define the numpy structured array dtype
43 index_spec = [("layer", np.int32), ("cell2d", np.int32)]
44 field_spec = [(key, np.float64) for key in arrdict]
45 sparse_dtype = np.dtype(index_spec + field_spec)
46 # Initialize the structured array
47 nrow = notnull.sum()
48 recarr = np.empty(nrow, dtype=sparse_dtype)
49 # Fill in the indices
50 if notnull.ndim == 1 and layer.size == 1:
51 recarr["cell2d"] = (np.argwhere(notnull) + 1).transpose()
52 recarr["layer"] = layer
53 else:
54 ilayer, icell2d = np.argwhere(notnull).transpose()
55 recarr["cell2d"] = icell2d + 1
56 recarr["layer"] = layer[ilayer]
57 return recarr
60class BoundaryCondition(Package, abc.ABC):
61 """
62 BoundaryCondition is used to share methods for specific stress packages
63 with a time component.
65 It is not meant to be used directly, only to inherit from, to implement new
66 packages.
68 This class only supports `list input
69 <https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=19>`_,
70 not the array input which is used in :class:`Package`.
71 """
73 def __init__(self, allargs: Mapping[str, GridDataArray | float | int | bool | str]):
74 super().__init__(allargs)
75 if "concentration" in allargs.keys() and allargs["concentration"] is None:
76 # Remove vars inplace
77 del self.dataset["concentration"]
78 del self.dataset["concentration_boundary_type"]
79 else:
80 expand_transient_auxiliary_variables(self)
82 def set_repeat_stress(self, times: dict[np.datetime64, np.datetime64]) -> None:
83 """
84 Set repeat stresses: re-use data of earlier periods.
86 Parameters
87 ----------
88 times: Dict of datetime-like to datetime-like.
89 The data of the value datetime is used for the key datetime.
90 """
91 warnings.warn(
92 f"""{self.__class__.__name__}.set_repeat_stress(...) is deprecated.
93 In the future, add repeat stresses as constructor parameters. An
94 object containing them can be created using 'get_repeat_stress', as
95 follows:
97 from imod.mf6.utilities.package_utils import get_repeat_stress
99 repeat_stress = get_repeat_stress(repeat_periods) # args before provided to River.set_repeat_stress
100 riv = imod.mf6.River(..., repeat_stress=repeat_stress)
102 Note that the location of get_repeat_stress (imod.mf6.utilities.package_utils)
103 may change in the future
104 """,
105 DeprecationWarning,
106 )
108 self.dataset["repeat_stress"] = get_repeat_stress(times)
110 def _max_active_n(self):
111 """
112 Determine the maximum active number of cells that are active
113 during a stress period.
114 """
115 da = self.dataset[self.get_period_varnames()[0]]
116 if "time" in da.coords:
117 nmax = int(da.groupby("time").count(xr.ALL_DIMS).max())
118 else:
119 nmax = int(da.count())
120 return nmax
122 def _write_binaryfile(self, outpath, struct_array):
123 with open(outpath, "w") as f:
124 struct_array.tofile(f)
126 def _write_textfile(self, outpath, struct_array):
127 fields = struct_array.dtype.fields
128 fmt = [self._number_format(field[0]) for field in fields.values()]
129 header = " ".join(list(fields.keys()))
130 with open(outpath, "w") as f:
131 np.savetxt(fname=f, X=struct_array, fmt=fmt, header=header)
133 def _write_datafile(self, outpath, ds, binary):
134 """
135 Writes a modflow6 binary data file
136 """
137 layer = ds["layer"].values if "layer" in ds.coords else None
138 arrdict = self._ds_to_arrdict(ds)
139 struct_array = self._to_struct_array(arrdict, layer)
140 outpath.parent.mkdir(exist_ok=True, parents=True)
141 if binary:
142 self._write_binaryfile(outpath, struct_array)
143 else:
144 self._write_textfile(outpath, struct_array)
146 def _ds_to_arrdict(self, ds):
147 for datavar in ds.data_vars:
148 if ds[datavar].shape == ():
149 raise ValueError(
150 f"{datavar} in {self._pkg_id} package cannot be a scalar"
151 )
153 arrdict = {}
154 for datavar in ds.data_vars:
155 arrdict[datavar] = ds[datavar].values
157 return arrdict
159 def _to_struct_array(self, arrdict, layer):
160 """Convert from dense arrays to list based input"""
161 # TODO stream the data per stress period
162 # TODO add pkgcheck that period table aligns
163 # Get the number of valid values
164 if layer is None:
165 raise ValueError("Layer should be provided")
167 data = next(iter(arrdict.values()))
168 notnull = ~np.isnan(data)
170 if isinstance(self.dataset, xr.Dataset):
171 recarr = _dis_recarr(arrdict, layer, notnull)
172 elif isinstance(self.dataset, xu.UgridDataset):
173 recarr = _disv_recarr(arrdict, layer, notnull)
174 else:
175 raise TypeError(
176 "self.dataset should be xarray.Dataset or xugrid.UgridDataset,"
177 f" is {type(self.dataset)} instead"
178 )
179 # Fill in the data
180 for key, arr in arrdict.items():
181 values = arr[notnull].astype(np.float64)
182 recarr[key] = values
184 return recarr
186 def _period_paths(self, directory, pkgname, globaltimes, bin_ds, binary):
187 directory = pathlib.Path(directory) / pkgname
189 if binary:
190 ext = "bin"
191 else:
192 ext = "dat"
194 periods = {}
195 if "time" in bin_ds: # one of bin_ds has time
196 package_times = bin_ds.coords["time"].values
197 starts = np.searchsorted(globaltimes, package_times) + 1
198 for i, start in enumerate(starts):
199 path = directory / f"{self._pkg_id}-{i}.{ext}"
200 periods[start] = path.as_posix()
202 repeat_stress = self.dataset.get("repeat_stress")
203 if repeat_stress is not None and repeat_stress.values[()] is not None:
204 keys = repeat_stress.isel(repeat_items=0).values
205 values = repeat_stress.isel(repeat_items=1).values
206 repeat_starts = np.searchsorted(globaltimes, keys) + 1
207 values_index = np.searchsorted(globaltimes, values) + 1
208 for i, start in zip(values_index, repeat_starts):
209 periods[start] = periods[i]
210 # Now make sure the periods are sorted by key.
211 periods = dict(sorted(periods.items()))
212 else:
213 path = directory / f"{self._pkg_id}.{ext}"
214 periods[1] = path.as_posix()
216 return periods
218 def _get_options(
219 self, predefined_options: dict, not_options: Optional[list] = None
220 ):
221 options = copy(predefined_options)
223 if not_options is None:
224 not_options = self.get_period_varnames()
226 for varname in self.dataset.data_vars.keys(): # pylint:disable=no-member
227 if varname in not_options:
228 continue
229 v = self.dataset[varname].values[()]
230 if self._valid(v): # skip None and False
231 options[varname] = v
232 return options
234 def _get_bin_ds(self):
235 """
236 Get binary dataset data for stress periods, this data will be written to
237 datafiles. This method can be overriden to do some extra operations on
238 this dataset before writing.
239 """
240 return self[self.get_period_varnames()]
242 def render(self, directory, pkgname, globaltimes, binary):
243 """Render fills in the template only, doesn't write binary data"""
244 d = {"binary": binary}
245 bin_ds = self._get_bin_ds()
246 d["periods"] = self._period_paths(
247 directory, pkgname, globaltimes, bin_ds, binary
248 )
249 # construct the rest (dict for render)
250 d = self._get_options(d)
251 d["maxbound"] = self._max_active_n()
253 if (hasattr(self, "_auxiliary_data")) and (names := get_variable_names(self)):
254 d["auxiliary"] = names
256 return self._template.render(d)
258 def _write_perioddata(self, directory, pkgname, binary):
259 if len(self.get_period_varnames()) == 0:
260 return
261 bin_ds = self._get_bin_ds()
263 if binary:
264 ext = "bin"
265 else:
266 ext = "dat"
268 if "time" in bin_ds: # one of bin_ds has time
269 for i in range(len(self.dataset.time)):
270 path = directory / pkgname / f"{self._pkg_id}-{i}.{ext}"
271 self._write_datafile(
272 path, bin_ds.isel(time=i), binary=binary
273 ) # one timestep
274 else:
275 path = directory / pkgname / f"{self._pkg_id}.{ext}"
276 self._write_datafile(path, bin_ds, binary=binary)
278 def write(
279 self,
280 pkgname: str,
281 globaltimes: Union[list[np.datetime64], np.ndarray],
282 write_context: WriteContext,
283 ):
284 """
285 writes the blockfile and binary data
287 directory is modelname
288 """
290 super().write(pkgname, globaltimes, write_context)
291 directory = write_context.write_directory
293 self._write_perioddata(
294 directory=directory,
295 pkgname=pkgname,
296 binary=write_context.use_binary,
297 )
299 def get_period_varnames(self):
300 result = []
301 if hasattr(self, "_period_data"):
302 result.extend(self._period_data)
303 if hasattr(self, "_auxiliary_data"):
304 result.extend(get_variable_names(self))
306 return result
309class AdvancedBoundaryCondition(BoundaryCondition, abc.ABC):
310 """
311 Class dedicated to advanced boundary conditions, since MF6 does not support
312 binary files for Advanced Boundary conditions.
314 The advanced boundary condition packages are: "uzf", "lak", "maw", "sfr".
316 """
318 def _get_field_spec_from_dtype(self, recarr):
319 """
320 From https://stackoverflow.com/questions/21777125/how-to-output-dtype-to-a-list-or-dict
321 """
322 return [
323 (x, y[0])
324 for x, y in sorted(recarr.dtype.fields.items(), key=lambda k: k[1])
325 ]
327 def _write_file(self, outpath, sparse_data):
328 """
329 Write to textfile, which is necessary for Advanced Stress Packages
330 """
331 fields = sparse_data.dtype.fields
332 fmt = [self._number_format(field[0]) for field in fields.values()]
333 header = " ".join(list(fields.keys()))
334 np.savetxt(fname=outpath, X=sparse_data, fmt=fmt, header=header)
336 @abc.abstractmethod
337 def _package_data_to_sparse(self):
338 """
339 Get packagedata, override with function for the advanced boundary
340 condition in particular
341 """
342 return
344 def write_packagedata(self, directory, pkgname, binary):
345 outpath = directory / pkgname / f"{self._pkg_id}-pkgdata.dat"
346 outpath.parent.mkdir(exist_ok=True, parents=True)
347 package_data = self._package_data_to_sparse()
348 self._write_file(outpath, package_data)
350 def write(
351 self,
352 pkgname: str,
353 globaltimes: Union[list[np.datetime64], np.ndarray],
354 write_context: WriteContext,
355 ):
356 boundary_condition_write_context = deepcopy(write_context)
357 boundary_condition_write_context.use_binary = False
359 self.fill_stress_perioddata()
360 super().write(pkgname, globaltimes, boundary_condition_write_context)
362 directory = boundary_condition_write_context.write_directory
363 self.write_packagedata(directory, pkgname, binary=False)
365 @abc.abstractmethod
366 def fill_stress_perioddata(self):
367 raise NotImplementedError
370class DisStructuredBoundaryCondition(BoundaryCondition):
371 def _to_struct_array(self, arrdict, layer):
372 spec = []
373 for key in arrdict:
374 if key in ["layer", "row", "column"]:
375 spec.append((key, np.int32))
376 else:
377 spec.append((key, np.float64))
379 sparse_dtype = np.dtype(spec)
380 nrow = next(iter(arrdict.values())).size
381 recarr = np.empty(nrow, dtype=sparse_dtype)
382 for key, arr in arrdict.items():
383 recarr[key] = arr
384 return recarr
387class DisVerticesBoundaryCondition(BoundaryCondition):
388 def _to_struct_array(self, arrdict, layer):
389 spec = []
390 for key in arrdict:
391 if key in ["layer", "cell2d"]:
392 spec.append((key, np.int32))
393 else:
394 spec.append((key, np.float64))
396 sparse_dtype = np.dtype(spec)
397 nrow = next(iter(arrdict.values())).size
398 recarr = np.empty(nrow, dtype=sparse_dtype)
399 for key, arr in arrdict.items():
400 recarr[key] = arr
401 return recarr