Coverage for C:\src\imod-python\imod\mf6\simulation.py: 94%
480 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-08 13:27 +0200
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-08 13:27 +0200
1from __future__ import annotations
3import collections
4import pathlib
5import subprocess
6import warnings
7from copy import deepcopy
8from pathlib import Path
9from typing import Any, Callable, DefaultDict, Iterable, Optional, Union, cast
11import cftime
12import dask
13import jinja2
14import numpy as np
15import tomli
16import tomli_w
17import xarray as xr
18import xugrid as xu
20import imod
21import imod.logging
22import imod.mf6.exchangebase
23from imod.logging import standard_log_decorator
24from imod.mf6.gwfgwf import GWFGWF
25from imod.mf6.gwfgwt import GWFGWT
26from imod.mf6.gwtgwt import GWTGWT
27from imod.mf6.ims import Solution
28from imod.mf6.interfaces.isimulation import ISimulation
29from imod.mf6.model import Modflow6Model
30from imod.mf6.model_gwf import GroundwaterFlowModel
31from imod.mf6.model_gwt import GroundwaterTransportModel
32from imod.mf6.multimodel.exchange_creator_structured import ExchangeCreator_Structured
33from imod.mf6.multimodel.exchange_creator_unstructured import (
34 ExchangeCreator_Unstructured,
35)
36from imod.mf6.multimodel.modelsplitter import create_partition_info, slice_model
37from imod.mf6.out import open_cbc, open_conc, open_hds
38from imod.mf6.package import Package
39from imod.mf6.ssm import SourceSinkMixing
40from imod.mf6.statusinfo import NestedStatusInfo
41from imod.mf6.utilities.mask import _mask_all_models
42from imod.mf6.utilities.regrid import _regrid_like
43from imod.mf6.write_context import WriteContext
44from imod.schemata import ValidationError
45from imod.typing import GridDataArray, GridDataset
46from imod.typing.grid import (
47 concat,
48 is_equal,
49 is_unstructured,
50 merge_partitions,
51)
53OUTPUT_FUNC_MAPPING: dict[str, Callable] = {
54 "head": open_hds,
55 "concentration": open_conc,
56 "budget-flow": open_cbc,
57 "budget-transport": open_cbc,
58}
60OUTPUT_MODEL_MAPPING: dict[
61 str, type[GroundwaterFlowModel] | type[GroundwaterTransportModel]
62] = {
63 "head": GroundwaterFlowModel,
64 "concentration": GroundwaterTransportModel,
65 "budget-flow": GroundwaterFlowModel,
66 "budget-transport": GroundwaterTransportModel,
67}
70def get_models(simulation: Modflow6Simulation) -> dict[str, Modflow6Model]:
71 return {k: v for k, v in simulation.items() if isinstance(v, Modflow6Model)}
74def get_packages(simulation: Modflow6Simulation) -> dict[str, Package]:
75 return {
76 pkg_name: pkg
77 for pkg_name, pkg in simulation.items()
78 if isinstance(pkg, Package)
79 }
82class Modflow6Simulation(collections.UserDict, ISimulation):
83 def _initialize_template(self):
84 loader = jinja2.PackageLoader("imod", "templates/mf6")
85 env = jinja2.Environment(loader=loader, keep_trailing_newline=True)
86 self._template = env.get_template("sim-nam.j2")
88 def __init__(self, name):
89 super().__init__()
90 self.name = name
91 self.directory = None
92 self._initialize_template()
94 def __setitem__(self, key, value):
95 super().__setitem__(key, value)
97 def update(self, *args, **kwargs):
98 for k, v in dict(*args, **kwargs).items():
99 self[k] = v
101 def time_discretization(self, times):
102 warnings.warn(
103 f"{self.__class__.__name__}.time_discretization() is deprecated. "
104 f"In the future call {self.__class__.__name__}.create_time_discretization().",
105 DeprecationWarning,
106 )
107 self.create_time_discretization(additional_times=times)
109 def create_time_discretization(self, additional_times, validate: bool = True):
110 """
111 Collect all unique times from model packages and additional given
112 `times`. These unique times are used as stress periods in the model. All
113 stress packages must have the same starting time. Function creates
114 TimeDiscretization object which is set to self["time_discretization"]
116 The time discretization in imod-python works as follows:
118 - The datetimes of all packages you send in are always respected
119 - Subsequently, the input data you use is always included fully as well
120 - All times are treated as starting times for the stress: a stress is
121 always applied until the next specified date
122 - For this reason, a final time is required to determine the length of
123 the last stress period
124 - Additional times can be provided to force shorter stress periods &
125 more detailed output
126 - Every stress has to be defined on the first stress period (this is a
127 modflow requirement)
129 Or visually (every letter a date in the time axes):
131 >>> recharge a - b - c - d - e - f
132 >>> river g - - - - h - - - - j
133 >>> times - - - - - - - - - - - i
134 >>> model a - b - c h d - e - f i
136 with the stress periods defined between these dates. I.e. the model
137 times are the set of all times you include in the model.
139 Parameters
140 ----------
141 additional_times : str, datetime; or iterable of str, datetimes.
142 Times to add to the time discretization. At least one single time
143 should be given, which will be used as the ending time of the
144 simulation.
146 Note
147 ----
148 To set the other parameters of the TimeDiscretization object, you have
149 to set these to the object after calling this function.
151 Example
152 -------
153 >>> simulation = imod.mf6.Modflow6Simulation("example")
154 >>> simulation.create_time_discretization(times=["2000-01-01", "2000-01-02"])
155 >>> # Set number of timesteps
156 >>> simulation["time_discretization"]["n_timesteps"] = 5
157 """
158 self.use_cftime = any(
159 [
160 model._use_cftime()
161 for model in self.values()
162 if isinstance(model, Modflow6Model)
163 ]
164 )
166 times = [
167 imod.util.time.to_datetime_internal(time, self.use_cftime)
168 for time in additional_times
169 ]
170 for model in self.values():
171 if isinstance(model, Modflow6Model):
172 times.extend(model._yield_times())
174 # np.unique also sorts
175 times = np.unique(np.hstack(times))
177 duration = imod.util.time.timestep_duration(times, self.use_cftime)
178 # Generate time discretization, just rely on default arguments
179 # Probably won't be used that much anyway?
180 timestep_duration = xr.DataArray(
181 duration, coords={"time": np.array(times)[:-1]}, dims=("time",)
182 )
183 self["time_discretization"] = imod.mf6.TimeDiscretization(
184 timestep_duration=timestep_duration, validate=validate
185 )
187 def render(self, write_context: WriteContext):
188 """Renders simulation namefile"""
189 d: dict[str, Any] = {}
190 models = []
191 solutiongroups = []
192 for key, value in self.items():
193 if isinstance(value, Modflow6Model):
194 model_name_file = pathlib.Path(
195 write_context.root_directory / pathlib.Path(f"{key}", f"{key}.nam")
196 ).as_posix()
197 models.append((value.model_id(), model_name_file, key))
198 elif isinstance(value, Package):
199 if value._pkg_id == "tdis":
200 d["tdis6"] = f"{key}.tdis"
201 elif value._pkg_id == "ims":
202 slnnames = value["modelnames"].values
203 modeltypes = set()
204 for name in slnnames:
205 try:
206 modeltypes.add(type(self[name]))
207 except KeyError:
208 raise KeyError(f"model {name} of {key} not found")
210 if len(modeltypes) > 1:
211 raise ValueError(
212 "Only a single type of model allowed in a solution"
213 )
214 solutiongroups.append(("ims6", f"{key}.ims", slnnames))
216 d["models"] = models
217 if len(models) > 1:
218 d["exchanges"] = self.get_exchange_relationships()
220 d["solutiongroups"] = [solutiongroups]
221 return self._template.render(d)
223 @standard_log_decorator()
224 def write(
225 self,
226 directory=".",
227 binary=True,
228 validate: bool = True,
229 use_absolute_paths=False,
230 ):
231 """
232 Write Modflow6 simulation, including assigned groundwater flow and
233 transport models.
235 Parameters
236 ----------
237 directory: str, pathlib.Path
238 Directory to write Modflow 6 simulation to.
239 binary: ({True, False}, optional)
240 Whether to write time-dependent input for stress packages as binary
241 files, which are smaller in size, or more human-readable text files.
242 validate: ({True, False}, optional)
243 Whether to validate the Modflow6 simulation, including models, at
244 write. If True, erronous model input will throw a
245 ``ValidationError``.
246 absolute_paths: ({True, False}, optional)
247 True if all paths written to the mf6 inputfiles should be absolute.
248 """
249 # create write context
250 write_context = WriteContext(directory, binary, use_absolute_paths)
251 if self.is_split():
252 write_context.is_partitioned = True
254 # Check models for required content
255 for key, model in self.items():
256 # skip timedis, exchanges
257 if isinstance(model, Modflow6Model):
258 model._model_checks(key)
260 # Generate GWF-GWT exchanges
261 if gwfgwt_exchanges := self._generate_gwfgwt_exchanges():
262 self["gwtgwf_exchanges"] = gwfgwt_exchanges
264 directory = pathlib.Path(directory)
265 directory.mkdir(exist_ok=True, parents=True)
267 # Write simulation namefile
268 mfsim_content = self.render(write_context)
269 mfsim_path = directory / "mfsim.nam"
270 with open(mfsim_path, "w") as f:
271 f.write(mfsim_content)
273 # Write time discretization file
274 self["time_discretization"].write(directory, "time_discretization")
276 # Write individual models
277 status_info = NestedStatusInfo("Simulation validation status")
278 globaltimes = self["time_discretization"]["time"].values
279 for key, value in self.items():
280 model_write_context = write_context.copy_with_new_write_directory(
281 write_context.simulation_directory
282 )
283 # skip timedis, exchanges
284 if isinstance(value, Modflow6Model):
285 status_info.add(
286 value.write(
287 modelname=key,
288 globaltimes=globaltimes,
289 validate=validate,
290 write_context=model_write_context,
291 )
292 )
293 elif isinstance(value, Package):
294 if value._pkg_id == "ims":
295 ims_write_context = write_context.copy_with_new_write_directory(
296 write_context.simulation_directory
297 )
298 value.write(key, globaltimes, ims_write_context)
299 elif isinstance(value, list):
300 for exchange in value:
301 if isinstance(exchange, imod.mf6.exchangebase.ExchangeBase):
302 exchange.write(
303 exchange.package_name(), globaltimes, write_context
304 )
306 if status_info.has_errors():
307 raise ValidationError("\n" + status_info.to_string())
309 self.directory = directory
311 def run(self, mf6path: Union[str, Path] = "mf6") -> None:
312 """
313 Run Modflow 6 simulation. This method runs a subprocess calling
314 ``mf6path``. This argument is set to ``mf6``, which means the Modflow 6
315 executable is expected to be added to your PATH environment variable.
316 :doc:`See this writeup how to add Modflow 6 to your PATH on Windows </examples/mf6/index>`
318 Note that the ``write`` method needs to be called before this method is
319 called.
321 Parameters
322 ----------
323 mf6path: Union[str, Path]
324 Path to the Modflow 6 executable. Defaults to calling ``mf6``.
326 Examples
327 --------
328 Make sure you write your model first
330 >>> simulation.write(path/to/model)
331 >>> simulation.run()
332 """
333 if self.directory is None:
334 raise RuntimeError(f"Simulation {self.name} has not been written yet.")
335 with imod.util.cd(self.directory):
336 result = subprocess.run(mf6path, capture_output=True)
337 if result.returncode != 0:
338 raise RuntimeError(
339 f"Simulation {self.name}: {mf6path} failed to run with returncode "
340 f"{result.returncode}, and error message:\n\n{result.stdout.decode()} "
341 )
343 def open_head(
344 self,
345 dry_nan: bool = False,
346 simulation_start_time: Optional[np.datetime64] = None,
347 time_unit: Optional[str] = "d",
348 ) -> GridDataArray:
349 """
350 Open heads of finished simulation, requires that the ``run`` method has
351 been called.
353 The data is lazily read per timestep and automatically converted into
354 (dense) xr.DataArrays or xu.UgridDataArrays, for DIS and DISV
355 respectively. The conversion is done via the information stored in the
356 Binary Grid file (GRB).
358 Parameters
359 ----------
360 dry_nan: bool, default value: False.
361 Whether to convert dry values to NaN.
362 simulation_start_time : Optional datetime
363 The time and date correpsonding to the beginning of the simulation.
364 Use this to convert the time coordinates of the output array to
365 calendar time/dates. time_unit must also be present if this argument is present.
366 time_unit: Optional str
367 The time unit MF6 is working in, in string representation.
368 Only used if simulation_start_time was provided.
369 Admissible values are:
370 ns -> nanosecond
371 ms -> microsecond
372 s -> second
373 m -> minute
374 h -> hour
375 d -> day
376 w -> week
377 Units "month" or "year" are not supported, as they do not represent unambiguous timedelta values durations.
379 Returns
380 -------
381 head: Union[xr.DataArray, xu.UgridDataArray]
383 Examples
384 --------
385 Make sure you write and run your model first
387 >>> simulation.write(path/to/model)
388 >>> simulation.run()
390 Then open heads:
392 >>> head = simulation.open_head()
393 """
394 return self._open_output(
395 "head",
396 dry_nan=dry_nan,
397 simulation_start_time=simulation_start_time,
398 time_unit=time_unit,
399 )
401 def open_transport_budget(
402 self,
403 species_ls: Optional[list[str]] = None,
404 simulation_start_time: Optional[np.datetime64] = None,
405 time_unit: Optional[str] = "d",
406 ) -> GridDataArray | GridDataset:
407 """
408 Open transport budgets of finished simulation, requires that the ``run``
409 method has been called.
411 The data is lazily read per timestep and automatically converted into
412 (dense) xr.DataArrays or xu.UgridDataArrays, for DIS and DISV
413 respectively. The conversion is done via the information stored in the
414 Binary Grid file (GRB).
416 Parameters
417 ----------
418 species_ls: list of strings, default value: None.
419 List of species names, which will be used to concatenate the
420 concentrations along the ``"species"`` dimension, in case the
421 simulation has multiple species and thus multiple transport models.
422 If None, transport model names will be used as species names.
424 Returns
425 -------
426 budget: Dict[str, xr.DataArray|xu.UgridDataArray]
427 DataArray contains float64 data of the budgets, with dimensions ("time",
428 "layer", "y", "x").
430 """
431 return self._open_output(
432 "budget-transport",
433 species_ls=species_ls,
434 simulation_start_time=simulation_start_time,
435 time_unit=time_unit,
436 merge_to_dataset=True,
437 flowja=False,
438 )
440 def open_flow_budget(
441 self,
442 flowja: bool = False,
443 simulation_start_time: Optional[np.datetime64] = None,
444 time_unit: Optional[str] = "d",
445 ) -> GridDataArray | GridDataset:
446 """
447 Open flow budgets of finished simulation, requires that the ``run``
448 method has been called.
450 The data is lazily read per timestep and automatically converted into
451 (dense) xr.DataArrays or xu.UgridDataArrays, for DIS and DISV
452 respectively. The conversion is done via the information stored in the
453 Binary Grid file (GRB).
455 The ``flowja`` argument controls whether the flow-ja-face array (if
456 present) is returned in grid form as "as is". By default
457 ``flowja=False`` and the array is returned in "grid form", meaning:
459 * DIS: in right, front, and lower face flow. All flows are placed in
460 the cell.
461 * DISV: in horizontal and lower face flow.the horizontal flows are
462 placed on the edges and the lower face flow is placed on the faces.
464 When ``flowja=True``, the flow-ja-face array is returned as it is found in
465 the CBC file, with a flow for every cell to cell connection. Additionally,
466 a ``connectivity`` DataArray is returned describing for every cell (n) its
467 connected cells (m).
469 Parameters
470 ----------
471 flowja: bool, default value: False
472 Whether to return the flow-ja-face values "as is" (``True``) or in a
473 grid form (``False``).
475 Returns
476 -------
477 budget: Dict[str, xr.DataArray|xu.UgridDataArray]
478 DataArray contains float64 data of the budgets, with dimensions ("time",
479 "layer", "y", "x").
481 Examples
482 --------
483 Make sure you write and run your model first
485 >>> simulation.write(path/to/model)
486 >>> simulation.run()
488 Then open budgets:
490 >>> budget = simulation.open_flow_budget()
492 Check the contents:
494 >>> print(budget.keys())
496 Get the drainage budget, compute a time mean for the first layer:
498 >>> drn_budget = budget["drn]
499 >>> mean = drn_budget.sel(layer=1).mean("time")
500 """
501 return self._open_output(
502 "budget-flow",
503 flowja=flowja,
504 simulation_start_time=simulation_start_time,
505 time_unit=time_unit,
506 merge_to_dataset=True,
507 )
509 def open_concentration(
510 self,
511 species_ls: Optional[list[str]] = None,
512 dry_nan: bool = False,
513 simulation_start_time: Optional[np.datetime64] = None,
514 time_unit: Optional[str] = "d",
515 ) -> GridDataArray:
516 """
517 Open concentration of finished simulation, requires that the ``run``
518 method has been called.
520 The data is lazily read per timestep and automatically converted into
521 (dense) xr.DataArrays or xu.UgridDataArrays, for DIS and DISV
522 respectively. The conversion is done via the information stored in the
523 Binary Grid file (GRB).
525 Parameters
526 ----------
527 species_ls: list of strings, default value: None.
528 List of species names, which will be used to concatenate the
529 concentrations along the ``"species"`` dimension, in case the
530 simulation has multiple species and thus multiple transport models.
531 If None, transport model names will be used as species names.
532 dry_nan: bool, default value: False.
533 Whether to convert dry values to NaN.
535 Returns
536 -------
537 concentration: Union[xr.DataArray, xu.UgridDataArray]
539 Examples
540 --------
541 Make sure you write and run your model first
543 >>> simulation.write(path/to/model)
544 >>> simulation.run()
546 Then open concentrations:
548 >>> concentration = simulation.open_concentration()
549 """
550 return self._open_output(
551 "concentration",
552 species_ls=species_ls,
553 dry_nan=dry_nan,
554 simulation_start_time=simulation_start_time,
555 time_unit=time_unit,
556 )
558 def _open_output(self, output: str, **settings) -> GridDataArray | GridDataset:
559 """
560 Opens output of one or multiple models.
562 Parameters
563 ----------
564 output: str
565 Output variable name to open
566 **settings:
567 Extra settings that need to be passed through to the respective
568 output function.
569 """
570 modeltype = OUTPUT_MODEL_MAPPING[output]
571 modelnames = self.get_models_of_type(modeltype._model_id).keys()
572 if len(modelnames) == 0:
573 modeltype = OUTPUT_MODEL_MAPPING[output]
574 raise ValueError(
575 f"Could not find any models of appropriate type for {output}, "
576 f"make sure a model of type {modeltype} is assigned to simulation."
577 )
579 if output in ["head", "budget-flow"]:
580 return self._open_single_output(modelnames, output, **settings)
581 elif output in ["concentration", "budget-transport"]:
582 return self._concat_species(output, **settings)
583 else:
584 raise RuntimeError(
585 f"Unexpected error when opening {output} for {modelnames}"
586 )
587 return
589 def _open_single_output(
590 self, modelnames: list[str], output: str, **settings
591 ) -> GridDataArray | GridDataset:
592 """
593 Open single output, e.g. concentration of single species, or heads. This
594 can be output of partitioned models that need to be merged.
595 """
596 if len(modelnames) == 0:
597 modeltype = OUTPUT_MODEL_MAPPING[output]
598 raise ValueError(
599 f"Could not find any models of appropriate type for {output}, "
600 f"make sure a model of type {modeltype} is assigned to simulation."
601 )
602 elif len(modelnames) == 1:
603 modelname = next(iter(modelnames))
604 return self._open_single_output_single_model(modelname, output, **settings)
605 elif self.is_split():
606 if "budget" in output:
607 return self._merge_budgets(modelnames, output, **settings)
608 else:
609 return self._merge_states(modelnames, output, **settings)
610 raise ValueError("error in _open_single_output")
612 def _merge_states(
613 self, modelnames: list[str], output: str, **settings
614 ) -> GridDataArray:
615 state_partitions = []
616 for modelname in modelnames:
617 state_partitions.append(
618 self._open_single_output_single_model(modelname, output, **settings)
619 )
620 return merge_partitions(state_partitions)
622 def _merge_and_assign_exchange_budgets(self, cbc: GridDataset) -> GridDataset:
623 """
624 Merge and assign exchange budgets to cell by cell budgets:
625 cbc[[gwf-gwf_1, gwf-gwf_3]] to cbc[gwf-gwf]
626 """
627 exchange_names = [
628 key
629 for key in cast(Iterable[str], cbc.keys())
630 if (("gwf-gwf" in key) or ("gwt-gwt" in key))
631 ]
632 exchange_budgets = cbc[exchange_names].to_array().sum(dim="variable")
633 cbc = cbc.drop_vars(exchange_names)
634 # "gwf-gwf" or "gwt-gwt"
635 exchange_key = exchange_names[0].split("_")[0]
636 cbc[exchange_key] = exchange_budgets
637 return cbc
639 def _pad_missing_variables(self, cbc_per_partition: list[GridDataset]) -> None:
640 """
641 Boundary conditions can be missing in certain partitions, as do their
642 budgets, in which case we manually assign an empty grid of nans.
643 """
644 dims_per_unique_key = {
645 key: cbc[key].dims for cbc in cbc_per_partition for key in cbc.keys()
646 }
647 for cbc in cbc_per_partition:
648 missing_keys = set(dims_per_unique_key.keys()) - set(cbc.keys())
650 for missing in missing_keys:
651 missing_dims = dims_per_unique_key[missing]
652 missing_coords = {dim: cbc.coords[dim] for dim in missing_dims}
654 shape = tuple([len(missing_coords[dim]) for dim in missing_dims])
655 chunks = (1,) + shape[1:]
656 missing_data = dask.array.full(shape, np.nan, chunks=chunks)
658 missing_grid = xr.DataArray(
659 missing_data, dims=missing_dims, coords=missing_coords
660 )
661 if isinstance(cbc, xu.UgridDataset):
662 missing_grid = xu.UgridDataArray(
663 missing_grid,
664 grid=cbc.ugrid.grid,
665 )
666 cbc[missing] = missing_grid
668 def _merge_budgets(
669 self, modelnames: list[str], output: str, **settings
670 ) -> GridDataset:
671 if settings["flowja"] is True:
672 raise ValueError("``flowja`` cannot be set to True when merging budgets.")
674 cbc_per_partition = []
675 for modelname in modelnames:
676 cbc = self._open_single_output_single_model(modelname, output, **settings)
677 # Merge and assign exchange budgets to dataset
678 # FUTURE: Refactor to insert these exchange budgets in horizontal
679 # flows.
680 cbc = self._merge_and_assign_exchange_budgets(cbc)
681 if not is_unstructured(cbc):
682 cbc = cbc.where(self[modelname].domain, other=np.nan)
683 cbc_per_partition.append(cbc)
685 self._pad_missing_variables(cbc_per_partition)
687 return merge_partitions(cbc_per_partition)
689 def _concat_species(
690 self, output: str, species_ls: Optional[list[str]] = None, **settings
691 ) -> GridDataArray | GridDataset:
692 # groupby flow model, to somewhat enforce consistent transport model
693 # ordening. Say:
694 # F = Flow model, T = Transport model
695 # a = species "a", b = species "b"
696 # 1 = partition 1, 2 = partition 2
697 # then this:
698 # F1Ta1 F1Tb1 F2Ta2 F2Tb2 -> F1: [Ta1, Tb1], F2: [Ta2, Tb2]
699 # F1Ta1 F2Tb1 F1Ta1 F2Tb2 -> F1: [Ta1, Tb1], F2: [Ta2, Tb2]
700 tpt_models_per_flow_model = self._get_transport_models_per_flow_model()
701 all_tpt_names = list(tpt_models_per_flow_model.values())
703 # [[Ta_1, Tb_1], [Ta_2, Tb_2]] -> [[Ta_1, Ta_2], [Tb_1, Tb_2]]
704 # [[Ta, Tb]] -> [[Ta], [Tb]]
705 tpt_names_per_species = list(zip(*all_tpt_names))
707 if self.is_split():
708 # [[Ta_1, Tb_1], [Ta_2, Tb_2]] -> [Ta, Tb]
709 unpartitioned_modelnames = [
710 tpt_name.rpartition("_")[0] for tpt_name in all_tpt_names[0]
711 ]
712 else:
713 # [[Ta, Tb]] -> [Ta, Tb]
714 unpartitioned_modelnames = all_tpt_names[0]
716 if not species_ls:
717 species_ls = unpartitioned_modelnames
719 if len(species_ls) != len(tpt_names_per_species):
720 raise ValueError(
721 "species_ls does not equal the number of transport models, "
722 f"expected length {len(tpt_names_per_species)}, received {species_ls}"
723 )
725 if len(species_ls) == 1:
726 return self._open_single_output(
727 list(tpt_names_per_species[0]), output, **settings
728 )
730 # Concatenate species
731 outputs = []
732 for species, tpt_names in zip(species_ls, tpt_names_per_species):
733 output_data = self._open_single_output(list(tpt_names), output, **settings)
734 output_data = output_data.assign_coords(species=species)
735 outputs.append(output_data)
736 return concat(outputs, dim="species")
738 def _open_single_output_single_model(
739 self, modelname: str, output: str, **settings
740 ) -> GridDataArray | GridDataset:
741 """
742 Opens single output of single model
744 Parameters
745 ----------
746 modelname: str
747 Name of groundwater model from which output should be read.
748 output: str
749 Output variable name to open.
750 **settings:
751 Extra settings that need to be passed through to the respective
752 output function.
753 """
754 open_func = OUTPUT_FUNC_MAPPING[output]
755 expected_modeltype = OUTPUT_MODEL_MAPPING[output]
757 if self.directory is None:
758 raise RuntimeError(f"Simulation {self.name} has not been written yet.")
759 model_path = self.directory / modelname
761 # Get model
762 model = self[modelname]
763 if not isinstance(model, expected_modeltype):
764 raise TypeError(
765 f"{modelname} not a {expected_modeltype}, instead got {type(model)}"
766 )
767 # Get output file path
768 oc_key = model._get_pkgkey("oc")
769 oc_pkg = model[oc_key]
770 # Ensure "-transport" and "-flow" are stripped from "budget"
771 oc_output = output.split("-")[0]
772 output_path = oc_pkg._get_output_filepath(model_path, oc_output)
773 # Force path to always include simulation directory.
774 output_path = self.directory / output_path
776 grb_path = self._get_grb_path(modelname)
778 if not output_path.exists():
779 raise RuntimeError(
780 f"Could not find output in {output_path}, check if you already ran simulation {self.name}"
781 )
783 return open_func(output_path, grb_path, **settings)
785 def _get_flow_modelname_coupled_to_transport_model(
786 self, transport_modelname: str
787 ) -> str:
788 """
789 Get name of flow model coupled to transport model, throws error if
790 multiple flow models are couple to 1 transport model.
791 """
792 exchanges = self.get_exchange_relationships()
793 coupled_flow_models = [
794 i[2]
795 for i in exchanges
796 if (i[3] == transport_modelname) & (i[0] == "GWF6-GWT6")
797 ]
798 if len(coupled_flow_models) != 1:
799 raise ValueError(
800 f"Exactly one flow model must be coupled to transport model {transport_modelname}, got: {coupled_flow_models}"
801 )
802 return coupled_flow_models[0]
804 def _get_grb_path(self, modelname: str) -> Path:
805 """
806 Finds appropriate grb path belonging to modelname. Grb files are not
807 written for transport models, so this method always returns a path to a
808 flowmodel. In case of a transport model, it returns the path to the grb
809 file its coupled flow model.
810 """
811 model = self[modelname]
812 # Get grb path
813 if isinstance(model, GroundwaterTransportModel):
814 flow_model_name = self._get_flow_modelname_coupled_to_transport_model(
815 modelname
816 )
817 flow_model_path = self.directory / flow_model_name
818 else:
819 flow_model_path = self.directory / modelname
821 diskey = model._get_diskey()
822 dis_id = model[diskey]._pkg_id
823 return flow_model_path / f"{diskey}.{dis_id}.grb"
825 @standard_log_decorator()
826 def dump(
827 self, directory=".", validate: bool = True, mdal_compliant: bool = False
828 ) -> None:
829 directory = pathlib.Path(directory)
830 directory.mkdir(parents=True, exist_ok=True)
832 toml_content: DefaultDict[str, dict] = collections.defaultdict(dict)
833 for key, value in self.items():
834 cls_name = type(value).__name__
835 if isinstance(value, Modflow6Model):
836 model_toml_path = value.dump(directory, key, validate, mdal_compliant)
837 toml_content[cls_name][key] = model_toml_path.relative_to(
838 directory
839 ).as_posix()
840 elif key in ["gwtgwf_exchanges", "split_exchanges"]:
841 toml_content[key] = collections.defaultdict(list)
842 for exchange_package in self[key]:
843 exchange_type, filename, _, _ = exchange_package.get_specification()
844 exchange_class_short = type(exchange_package).__name__
845 path = f"{filename}.nc"
846 exchange_package.dataset.to_netcdf(directory / path)
847 toml_content[key][exchange_class_short].append(path)
849 else:
850 path = f"{key}.nc"
851 value.dataset.to_netcdf(directory / path)
852 toml_content[cls_name][key] = path
854 with open(directory / f"{self.name}.toml", "wb") as f:
855 tomli_w.dump(toml_content, f)
857 return
859 @staticmethod
860 def from_file(toml_path):
861 classes = {
862 item_cls.__name__: item_cls
863 for item_cls in (
864 GroundwaterFlowModel,
865 GroundwaterTransportModel,
866 imod.mf6.TimeDiscretization,
867 imod.mf6.Solution,
868 imod.mf6.GWFGWF,
869 imod.mf6.GWFGWT,
870 imod.mf6.GWTGWT,
871 )
872 }
874 toml_path = pathlib.Path(toml_path)
875 with open(toml_path, "rb") as f:
876 toml_content = tomli.load(f)
878 simulation = Modflow6Simulation(name=toml_path.stem)
879 for key, entry in toml_content.items():
880 if not key in ["gwtgwf_exchanges", "split_exchanges"]:
881 item_cls = classes[key]
882 for name, filename in entry.items():
883 path = toml_path.parent / filename
884 simulation[name] = item_cls.from_file(path)
885 else:
886 simulation[key] = []
887 for exchange_class, exchange_list in entry.items():
888 item_cls = classes[exchange_class]
889 for filename in exchange_list:
890 path = toml_path.parent / filename
891 simulation[key].append(item_cls.from_file(path))
893 return simulation
895 def get_exchange_relationships(self):
896 result = []
898 if "gwtgwf_exchanges" in self:
899 for exchange in self["gwtgwf_exchanges"]:
900 result.append(exchange.get_specification())
902 # exchange for splitting models
903 if self.is_split():
904 for exchange in self["split_exchanges"]:
905 result.append(exchange.get_specification())
906 return result
908 def get_models_of_type(self, modeltype):
909 return {
910 k: v
911 for k, v in self.items()
912 if isinstance(v, Modflow6Model) and (v.model_id() == modeltype)
913 }
915 def get_models(self):
916 return {k: v for k, v in self.items() if isinstance(v, Modflow6Model)}
918 def clip_box(
919 self,
920 time_min: Optional[cftime.datetime | np.datetime64 | str] = None,
921 time_max: Optional[cftime.datetime | np.datetime64 | str] = None,
922 layer_min: Optional[int] = None,
923 layer_max: Optional[int] = None,
924 x_min: Optional[float] = None,
925 x_max: Optional[float] = None,
926 y_min: Optional[float] = None,
927 y_max: Optional[float] = None,
928 states_for_boundary: Optional[dict[str, GridDataArray]] = None,
929 ) -> Modflow6Simulation:
930 """
931 Clip a simulation by a bounding box (time, layer, y, x).
933 Slicing intervals may be half-bounded, by providing None:
935 * To select 500.0 <= x <= 1000.0:
936 ``clip_box(x_min=500.0, x_max=1000.0)``.
937 * To select x <= 1000.0: ``clip_box(x_min=None, x_max=1000.0)``
938 or ``clip_box(x_max=1000.0)``.
939 * To select x >= 500.0: ``clip_box(x_min = 500.0, x_max=None.0)``
940 or ``clip_box(x_min=1000.0)``.
942 Parameters
943 ----------
944 time_min: optional
945 time_max: optional
946 layer_min: optional, int
947 layer_max: optional, int
948 x_min: optional, float
949 x_max: optional, float
950 y_min: optional, float
951 y_max: optional, float
952 states_for_boundary : optional, Dict[pkg_name:str, boundary_values:Union[xr.DataArray, xu.UgridDataArray]]
954 Returns
955 -------
956 clipped : Simulation
957 """
959 if self.is_split():
960 raise RuntimeError(
961 "Unable to clip simulation. Clipping can only be done on simulations that haven't been split."
962 + "Therefore clipping should be done before splitting the simulation."
963 )
964 if not self.has_one_flow_model():
965 raise ValueError(
966 "Unable to clip simulation. Clipping can only be done on simulations that have a single flow model ."
967 )
968 for model_name, model in self.get_models().items():
969 supported, error_with_object = model.is_clipping_supported()
970 if not supported:
971 raise ValueError(
972 f"simulation cannot be clipped due to presence of package '{error_with_object}' in model '{model_name}'"
973 )
975 clipped = type(self)(name=self.name)
976 for key, value in self.items():
977 state_for_boundary = (
978 None if states_for_boundary is None else states_for_boundary.get(key)
979 )
981 clipped[key] = value.clip_box(
982 time_min=time_min,
983 time_max=time_max,
984 layer_min=layer_min,
985 layer_max=layer_max,
986 x_min=x_min,
987 x_max=x_max,
988 y_min=y_min,
989 y_max=y_max,
990 state_for_boundary=state_for_boundary,
991 )
992 return clipped
994 def split(self, submodel_labels: GridDataArray) -> Modflow6Simulation:
995 """
996 Split a simulation in different partitions using a submodel_labels array.
998 The submodel_labels array defines how a simulation will be split. The array should have the same topology as
999 the domain being split i.e. similar shape as a layer in the domain. The values in the array indicate to
1000 which partition a cell belongs. The values should be zero or greater.
1002 The method return a new simulation containing all the split models and packages
1003 """
1004 if self.is_split():
1005 raise RuntimeError(
1006 "Unable to split simulation. Splitting can only be done on simulations that haven't been split."
1007 )
1009 if not self.has_one_flow_model():
1010 raise ValueError(
1011 "splitting of simulations with more (or less) than 1 flow model currently not supported."
1012 )
1013 transport_models = self.get_models_of_type("gwt6")
1014 flow_models = self.get_models_of_type("gwf6")
1015 if not any(flow_models) and not any(transport_models):
1016 raise ValueError("a simulation without any models cannot be split.")
1018 original_models = {**flow_models, **transport_models}
1019 for model_name, model in original_models.items():
1020 supported, error_with_object = model.is_splitting_supported()
1021 if not supported:
1022 raise ValueError(
1023 f"simulation cannot be split due to presence of package '{error_with_object}' in model '{model_name}'"
1024 )
1026 original_packages = get_packages(self)
1028 partition_info = create_partition_info(submodel_labels)
1030 exchange_creator: ExchangeCreator_Unstructured | ExchangeCreator_Structured
1031 if is_unstructured(submodel_labels):
1032 exchange_creator = ExchangeCreator_Unstructured(
1033 submodel_labels, partition_info
1034 )
1035 else:
1036 exchange_creator = ExchangeCreator_Structured(
1037 submodel_labels, partition_info
1038 )
1040 new_simulation = imod.mf6.Modflow6Simulation(f"{self.name}_partioned")
1041 for package_name, package in {**original_packages}.items():
1042 new_simulation[package_name] = deepcopy(package)
1044 for model_name, model in original_models.items():
1045 solution_name = self.get_solution_name(model_name)
1046 new_simulation[solution_name].remove_model_from_solution(model_name)
1047 for submodel_partition_info in partition_info:
1048 new_model_name = f"{model_name}_{submodel_partition_info.id}"
1049 new_simulation[new_model_name] = slice_model(
1050 submodel_partition_info, model
1051 )
1052 new_simulation[solution_name].add_model_to_solution(new_model_name)
1054 exchanges: list[Any] = []
1056 for flow_model_name, flow_model in flow_models.items():
1057 exchanges += exchange_creator.create_gwfgwf_exchanges(
1058 flow_model_name, flow_model.domain.layer
1059 )
1061 if any(transport_models):
1062 for tpt_model_name in transport_models:
1063 exchanges += exchange_creator.create_gwtgwt_exchanges(
1064 tpt_model_name, flow_model_name, model.domain.layer
1065 )
1066 new_simulation._add_modelsplit_exchanges(exchanges)
1067 new_simulation._update_buoyancy_packages()
1068 new_simulation._set_flow_exchange_options()
1069 new_simulation._set_transport_exchange_options()
1070 new_simulation._update_ssm_packages()
1072 new_simulation._filter_inactive_cells_from_exchanges()
1073 return new_simulation
1075 def regrid_like(
1076 self,
1077 regridded_simulation_name: str,
1078 target_grid: GridDataArray,
1079 validate: bool = True,
1080 ) -> "Modflow6Simulation":
1081 """
1082 This method creates a new simulation object. The models contained in the new simulation are regridded versions
1083 of the models in the input object (this).
1084 Time discretization and solver settings are copied.
1086 Parameters
1087 ----------
1088 regridded_simulation_name: str
1089 name given to the output simulation
1090 target_grid: xr.DataArray or xu.UgridDataArray
1091 discretization onto which the models in this simulation will be regridded
1092 validate: bool
1093 set to true to validate the regridded packages
1095 Returns
1096 -------
1097 a new simulation object with regridded models
1098 """
1100 return _regrid_like(self, regridded_simulation_name, target_grid, validate)
1102 def _add_modelsplit_exchanges(self, exchanges_list: list[GWFGWF]) -> None:
1103 if not self.is_split():
1104 self["split_exchanges"] = []
1105 self["split_exchanges"].extend(exchanges_list)
1107 def _set_flow_exchange_options(self) -> None:
1108 # collect some options that we will auto-set
1109 for exchange in self["split_exchanges"]:
1110 if isinstance(exchange, GWFGWF):
1111 model_name_1 = exchange.dataset["model_name_1"].values[()]
1112 model_1 = self[model_name_1]
1113 exchange.set_options(
1114 save_flows=model_1["oc"].is_budget_output,
1115 dewatered=model_1["npf"].is_dewatered,
1116 variablecv=model_1["npf"].is_variable_vertical_conductance,
1117 xt3d=model_1["npf"].get_xt3d_option(),
1118 newton=model_1.is_use_newton(),
1119 )
1121 def _set_transport_exchange_options(self) -> None:
1122 for exchange in self["split_exchanges"]:
1123 if isinstance(exchange, GWTGWT):
1124 model_name_1 = exchange.dataset["model_name_1"].values[()]
1125 model_1 = self[model_name_1]
1126 advection_key = model_1._get_pkgkey("adv")
1127 dispersion_key = model_1._get_pkgkey("dsp")
1129 scheme = None
1130 xt3d_off = None
1131 xt3d_rhs = None
1132 if advection_key is not None:
1133 scheme = model_1[advection_key].dataset["scheme"].values[()]
1134 if dispersion_key is not None:
1135 xt3d_off = model_1[dispersion_key].dataset["xt3d_off"].values[()]
1136 xt3d_rhs = model_1[dispersion_key].dataset["xt3d_rhs"].values[()]
1137 exchange.set_options(
1138 save_flows=model_1["oc"].is_budget_output,
1139 adv_scheme=scheme,
1140 dsp_xt3d_off=xt3d_off,
1141 dsp_xt3d_rhs=xt3d_rhs,
1142 )
1144 def _filter_inactive_cells_from_exchanges(self) -> None:
1145 for ex in self["split_exchanges"]:
1146 for i in [1, 2]:
1147 self._filter_inactive_cells_exchange_domain(ex, i)
1149 def _filter_inactive_cells_exchange_domain(self, ex: GWFGWF, i: int) -> None:
1150 """Filters inactive cells from one exchange domain inplace"""
1151 modelname = ex[f"model_name_{i}"].values[()]
1152 domain = self[modelname].domain
1154 layer = ex.dataset["layer"] - 1
1155 id = ex.dataset[f"cell_id{i}"] - 1
1156 if is_unstructured(domain):
1157 exchange_cells = {
1158 "layer": layer,
1159 "mesh2d_nFaces": id,
1160 }
1161 else:
1162 exchange_cells = {
1163 "layer": layer,
1164 "y": id.sel({f"cell_dims{i}": f"row_{i}"}),
1165 "x": id.sel({f"cell_dims{i}": f"column_{i}"}),
1166 }
1167 exchange_domain = domain.isel(exchange_cells)
1168 active_exchange_domain = exchange_domain.where(exchange_domain.values > 0)
1169 active_exchange_domain = active_exchange_domain.dropna("index")
1170 ex.dataset = ex.dataset.sel(index=active_exchange_domain["index"])
1172 def get_solution_name(self, model_name: str) -> Optional[str]:
1173 for k, v in self.items():
1174 if isinstance(v, Solution):
1175 if model_name in v.dataset["modelnames"]:
1176 return k
1177 return None
1179 def __repr__(self) -> str:
1180 typename = type(self).__name__
1181 INDENT = " "
1182 attrs = [
1183 f"{typename}(",
1184 f"{INDENT}name={repr(self.name)},",
1185 f"{INDENT}directory={repr(self.directory)}",
1186 ]
1187 items = [
1188 f"{INDENT}{repr(key)}: {type(value).__name__},"
1189 for key, value in self.items()
1190 ]
1191 # Place the emtpy dict on the same line. Looks silly otherwise.
1192 if items:
1193 content = attrs + ["){"] + items + ["}"]
1194 else:
1195 content = attrs + ["){}"]
1196 return "\n".join(content)
1198 def _get_transport_models_per_flow_model(self) -> dict[str, list[str]]:
1199 flow_models = self.get_models_of_type("gwf6")
1200 transport_models = self.get_models_of_type("gwt6")
1201 # exchange for flow and transport
1202 result = collections.defaultdict(list)
1204 for flow_model_name in flow_models:
1205 flow_model = self[flow_model_name]
1206 for tpt_model_name in transport_models:
1207 tpt_model = self[tpt_model_name]
1208 if is_equal(tpt_model.domain, flow_model.domain):
1209 result[flow_model_name].append(tpt_model_name)
1210 return result
1212 def _generate_gwfgwt_exchanges(self) -> list[GWFGWT]:
1213 exchanges = []
1214 flow_transport_mapping = self._get_transport_models_per_flow_model()
1215 for flow_name, tpt_models_of_flow_model in flow_transport_mapping.items():
1216 if len(tpt_models_of_flow_model) > 0:
1217 for transport_model_name in tpt_models_of_flow_model:
1218 exchanges.append(GWFGWT(flow_name, transport_model_name))
1220 return exchanges
1222 def _update_ssm_packages(self) -> None:
1223 flow_transport_mapping = self._get_transport_models_per_flow_model()
1224 for flow_name, tpt_models_of_flow_model in flow_transport_mapping.items():
1225 flow_model = self[flow_name]
1226 for tpt_model_name in tpt_models_of_flow_model:
1227 tpt_model = self[tpt_model_name]
1228 ssm_key = tpt_model._get_pkgkey("ssm")
1229 if ssm_key is not None:
1230 old_ssm_package = tpt_model.pop(ssm_key)
1231 state_variable_name = old_ssm_package.dataset[
1232 "auxiliary_variable_name"
1233 ].values[0]
1234 ssm_package = SourceSinkMixing.from_flow_model(
1235 flow_model, state_variable_name, is_split=self.is_split()
1236 )
1237 if ssm_package is not None:
1238 tpt_model[ssm_key] = ssm_package
1240 def _update_buoyancy_packages(self) -> None:
1241 flow_transport_mapping = self._get_transport_models_per_flow_model()
1242 for flow_name, tpt_models_of_flow_model in flow_transport_mapping.items():
1243 flow_model = self[flow_name]
1244 flow_model.update_buoyancy_package(tpt_models_of_flow_model)
1246 def is_split(self) -> bool:
1247 return "split_exchanges" in self.keys()
1249 def has_one_flow_model(self) -> bool:
1250 flow_models = self.get_models_of_type("gwf6")
1251 return len(flow_models) == 1
1253 def mask_all_models(
1254 self,
1255 mask: GridDataArray,
1256 ):
1257 """
1258 This function applies a mask to all models in a simulation, provided they use
1259 the same discretization. The method parameter "mask" is an idomain-like array.
1260 Masking will overwrite idomain with the mask where the mask is 0 or -1.
1261 Where the mask is 1, the original value of idomain will be kept.
1262 Masking will update the packages accordingly, blanking their input where needed,
1263 and is therefore not a reversible operation.
1265 Parameters
1266 ----------
1267 mask: xr.DataArray, xu.UgridDataArray of ints
1268 idomain-like integer array. 1 sets cells to active, 0 sets cells to inactive,
1269 -1 sets cells to vertical passthrough
1270 """
1271 _mask_all_models(self, mask)