Coverage for C:\src\imod-python\imod\wq\model.py: 64%

399 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-08 14:15 +0200

1import collections 

2import os 

3import pathlib 

4import warnings 

5 

6import cftime 

7import jinja2 

8import numpy as np 

9import pandas as pd 

10import xarray as xr 

11from scipy.ndimage import binary_dilation 

12 

13import imod 

14from imod.util.time import timestep_duration, to_datetime_internal 

15from imod.wq.pkggroup import PackageGroups 

16 

17 

18def _relpath(path, to): 

19 # Wraps os.path.relpath 

20 try: 

21 return pathlib.Path(os.path.relpath(path, to)) 

22 except ValueError: 

23 # Fails to switch between drives e.g. 

24 return pathlib.Path(os.path.abspath(path)) 

25 

26 

27# This class allows only imod packages as values 

28class Model(collections.UserDict): 

29 def __setitem__(self, key, value): 

30 # TODO: raise ValueError on setting certain duplicates 

31 # e.g. two solvers 

32 if self.check == "eager": 

33 value._pkgcheck() 

34 super().__setitem__(key, value) 

35 

36 def update(self, *args, **kwargs): 

37 for k, v in dict(*args, **kwargs).items(): 

38 self[k] = v 

39 

40 def visualize( 

41 self, 

42 directory, 

43 cmap="viridis", 

44 overlays=[], 

45 quantile_colorscale=True, 

46 figsize=(8, 8), 

47 ): 

48 directory = pathlib.Path(directory) 

49 

50 for key, value in self.items(): 

51 if value._pkg_id == "well": 

52 pass 

53 # grouped = value.groupby(["x", "y"]) 

54 # x = grouped["x"].first() 

55 # y = grouped["y"].first() 

56 # Select appropriate datasets 

57 if "x" in value.dims and "y" in value.dims: 

58 for varname in value.data_vars: 

59 da = value[varname] 

60 if "x" in da.dims and "y" in da.dims: 

61 if da.isnull().all(): 

62 continue 

63 

64 imod.visualize.spatial.imshow_topview( 

65 da=da, 

66 name=varname, 

67 directory=directory / key, 

68 cmap=cmap, 

69 overlays=overlays, 

70 quantile_colorscale=quantile_colorscale, 

71 figsize=figsize, 

72 ) 

73 

74 def sel(self, **dimensions): 

75 selmodel = type(self)(self.modelname, self.check) 

76 for pkgname, pkg in self.items(): 

77 sel_dims = {k: v for k, v in dimensions.items() if k in pkg.dataset} 

78 

79 if pkg._pkg_id == "bas6": 

80 # da.sel() unsets dimensions for scalars, this messes with bas6 

81 # package, because check_ibound is called upon initialization. 

82 sel_dims = { 

83 k: [v] if np.isscalar(v) else v for k, v in sel_dims.items() 

84 } 

85 if len(sel_dims) == 0: 

86 selmodel[pkgname] = pkg 

87 else: 

88 if pkg._pkg_id != "wel": 

89 selmodel[pkgname] = type(pkg)(**pkg.dataset.loc[sel_dims]) 

90 else: 

91 df = pkg.dataset.to_dataframe().drop(columns="save_budget") 

92 for k, v in sel_dims.items(): 

93 try: 

94 if isinstance(v, slice): 

95 # slice? 

96 # to account for reversed order of y 

97 low, high = min(v.start, v.stop), max(v.start, v.stop) 

98 df = df.loc[(df[k] >= low) & (df[k] <= high)] 

99 else: # list, labels etc 

100 df = df.loc[df[k].isin(v)] 

101 except Exception: 

102 raise ValueError( 

103 "Invalid indexer for Well package, accepts slice or list-like of values" 

104 ) 

105 selmodel[pkgname] = imod.wq.Well( 

106 save_budget=pkg.dataset["save_budget"], **df 

107 ) 

108 return selmodel 

109 

110 def isel(self, **dimensions): 

111 selmodel = type(self)(self.modelname, self.check) 

112 for pkgname, pkg in self.items(): 

113 sel_dims = {k: v for k, v in dimensions.items() if k in pkg.dataset} 

114 if len(sel_dims) == 0: 

115 selmodel[pkgname] = pkg 

116 else: 

117 selmodel[pkgname].dataset = pkg.dataset[sel_dims] 

118 return selmodel 

119 

120 def to_netcdf(self, directory=".", pattern="{pkgname}.nc", **kwargs): 

121 """Convenience function to write all model packages 

122 to netcdf files. 

123 

124 Parameters 

125 ---------- 

126 directory : str, pathlib.Path 

127 Directory into which the different model packages will be written. 

128 

129 pattern : str, optional. 

130 Pattern for filename of each package, in which `pkgname` 

131 signifies the package name. Default is `"{pkgname}.nc"`, 

132 so `model["river"]` would get written to `path / river.nc`. 

133 

134 kwargs : 

135 Additional kwargs to be forwarded to `xarray.Dataset.to_netcdf`. 

136 """ 

137 directory = pathlib.Path(directory) 

138 for pkgname, pkg in self.items(): 

139 try: 

140 pkg.dataset.to_netcdf( 

141 directory / pattern.format(pkgname=pkgname), **kwargs 

142 ) 

143 except Exception as e: 

144 raise type(e)("{e}\nPackage {pkgname} can not be written to NetCDF") 

145 

146 def _delete_empty_packages(self, verbose=False): 

147 to_del = [] 

148 for pkg in self.keys(): 

149 dv = list(self[pkg].dataset.data_vars)[0] 

150 if not self[pkg][dv].notnull().any().compute(): 

151 if verbose: 

152 warnings.warn( 

153 f"Deleting package {pkg}, found no data in parameter {dv}" 

154 ) 

155 to_del.append(pkg) 

156 for pkg in to_del: 

157 del self[pkg] 

158 

159 

160class SeawatModel(Model): 

161 """ 

162 iMOD-WQ SEAWAT model. 

163 

164 Attributes 

165 ---------- 

166 modelname : str 

167 check : str, optional 

168 When to perform model checks {None, "defer", "eager"}. 

169 Defaults to "defer". 

170 

171 Examples 

172 -------- 

173 

174 >>> m = SeawatModel("example") 

175 >>> m["riv"] = River(...) 

176 >>> # ...etc. 

177 >>> m.create_time_discretization(endtime) 

178 >>> m.write() 

179 """ 

180 

181 # These templates end up here since they require global information 

182 # from more than one package 

183 _PACKAGE_GROUPS = PackageGroups 

184 

185 _gen_template = jinja2.Template( 

186 "[gen]\n" 

187 " runtype = SEAWAT\n" 

188 " modelname = {{modelname}}\n" 

189 " writehelp = {{writehelp}}\n" 

190 " result_dir = {{result_dir}}\n" 

191 " packages = {{package_set|join(', ')}}\n" 

192 " coord_xll = {{xmin}}\n" 

193 " coord_yll = {{ymin}}\n" 

194 " start_year = {{start_date[:4]}}\n" 

195 " start_month = {{start_date[4:6]}}\n" 

196 " start_day = {{start_date[6:8]}}\n" 

197 " start_hour = {{start_date[8:10]}}\n" 

198 " start_minute = {{start_date[10:12]}}\n" 

199 ) 

200 

201 def __init__(self, modelname, check="defer"): 

202 super().__init__() 

203 self.modelname = modelname 

204 self.check = check 

205 

206 def _get_pkgkey(self, pkg_id): 

207 """ 

208 Get package key that belongs to a certain pkg_id, since the keys are 

209 user specified. 

210 """ 

211 key = [pkgname for pkgname, pkg in self.items() if pkg._pkg_id == pkg_id] 

212 nkey = len(key) 

213 if nkey > 1: 

214 raise ValueError(f"Multiple instances of {key} detected") 

215 elif nkey == 1: 

216 return key[0] 

217 else: 

218 return None 

219 

220 def _group(self): 

221 """ 

222 Group multiple systems of a single package E.g. all river or drainage 

223 sub-systems 

224 """ 

225 groups = {} 

226 has_group = set() 

227 groupable = set(self._PACKAGE_GROUPS.__members__.keys()) 

228 for key, package in self.items(): 

229 pkg_id = package._pkg_id 

230 if pkg_id in groupable: 

231 if pkg_id in has_group: # already exists 

232 groups[pkg_id][key] = package 

233 else: 

234 groups[pkg_id] = {key: package} 

235 has_group.update([pkg_id]) 

236 

237 package_groups = [] 

238 for pkg_id, group in groups.items(): 

239 # Create PackageGroup for every package 

240 # RiverGroup for rivers, DrainageGroup for drainage, etc. 

241 package_groups.append(self._PACKAGE_GROUPS[pkg_id].value(**group)) 

242 

243 return package_groups 

244 

245 def _hastime(self, pkg): 

246 return (pkg._pkg_id == "wel" and "time" in pkg.dataset) or ( 

247 "time" in pkg.dataset.coords 

248 ) 

249 

250 def _use_cftime(self): 

251 """ 

252 Also checks if datetime types are homogeneous across packages. 

253 """ 

254 types = [] 

255 for pkg in self.values(): 

256 if self._hastime(pkg): 

257 types.append(type(np.atleast_1d(pkg.dataset["time"].values)[0])) 

258 

259 # Types will be empty if there's no time dependent input 

260 set_of_types = set(types) 

261 if len(set_of_types) == 0: 

262 return None 

263 else: # there is time dependent input 

264 if not len(set_of_types) == 1: 

265 raise ValueError( 

266 f"Multiple datetime types detected: {set_of_types}. " 

267 "Use either cftime or numpy.datetime64[ns]." 

268 ) 

269 # Since we compare types and not instances, we use issubclass 

270 if issubclass(types[0], cftime.datetime): 

271 return True 

272 elif issubclass(types[0], np.datetime64): 

273 return False 

274 else: 

275 raise ValueError("Use either cftime or numpy.datetime64[ns].") 

276 

277 def time_discretization(self, times): 

278 warnings.warn( 

279 f"{self.__class__.__name__}.time_discretization() is deprecated. " 

280 f"In the future call {self.__class__.__name__}.create_time_discretization().", 

281 DeprecationWarning, 

282 ) 

283 self.create_time_discretization(additional_times=times) 

284 

285 def create_time_discretization(self, additional_times): 

286 """ 

287 Collect all unique times from model packages and additional given `times`. These 

288 unique times are used as stress periods in the model. All stress packages must 

289 have the same starting time. 

290 

291 The time discretization in imod-python works as follows: 

292 

293 - The datetimes of all packages you send in are always respected 

294 - Subsequently, the input data you use is always included fully as well 

295 - All times are treated as starting times for the stress: a stress is always applied until the next specified date 

296 - For this reason, a final time is required to determine the length of the last stress period 

297 - Additional times can be provided to force shorter stress periods & more detailed output 

298 - Every stress has to be defined on the first stress period (this is a modflow requirement) 

299 

300 Or visually (every letter a date in the time axes): 

301 

302 >>> recharge a - b - c - d - e - f 

303 >>> river g - - - - h - - - - j 

304 >>> times - - - - - - - - - - - i 

305 >>> model a - b - c h d - e - f i 

306 

307 with the stress periods defined between these dates. I.e. the model times are the set of all times you include in the model. 

308 

309 Parameters 

310 ---------- 

311 times : str, datetime; or iterable of str, datetimes. 

312 Times to add to the time discretization. At least one single time 

313 should be given, which will be used as the ending time of the 

314 simulation. 

315 

316 Note 

317 ---- 

318 To set the other parameters of the TimeDiscretization object, you have 

319 to set these to the object after calling this function. 

320 

321 Examples 

322 -------- 

323 Add a single time: 

324 

325 >>> m.create_time_discretization("2001-01-01") 

326 

327 Add a daterange: 

328 

329 >>> m.create_time_discretization(pd.daterange("2000-01-01", "2001-01-01")) 

330 

331 Add a list of times: 

332 

333 >>> m.create_time_discretization(["2000-01-01", "2001-01-01"]) 

334 

335 """ 

336 

337 # Make sure it's an iterable 

338 if not isinstance( 

339 additional_times, (np.ndarray, list, tuple, pd.DatetimeIndex) 

340 ): 

341 additional_times = [additional_times] 

342 

343 # Loop through all packages, check if cftime is required. 

344 self.use_cftime = self._use_cftime() 

345 # use_cftime is None if you no datetimes are present in packages 

346 # use_cftime is False if np.datetimes present in packages 

347 # use_cftime is True if cftime.datetime present in packages 

348 for time in additional_times: 

349 if issubclass(type(time), cftime.datetime): 

350 if self.use_cftime is None: 

351 self.use_cftime = True 

352 if self.use_cftime is False: 

353 raise ValueError( 

354 "Use either cftime or numpy.datetime64[ns]. " 

355 f"Received: {type(time)}." 

356 ) 

357 if self.use_cftime is None: 

358 self.use_cftime = False 

359 

360 times = [ 

361 to_datetime_internal(time, self.use_cftime) for time in additional_times 

362 ] 

363 first_times = {} # first time per package 

364 for key, pkg in self.items(): 

365 if self._hastime(pkg): 

366 pkgtimes = list(pkg.dataset["time"].values) 

367 first_times[key] = sorted(pkgtimes)[0] 

368 for var in pkg.dataset.data_vars: 

369 if "stress_repeats" in pkg.dataset[var].attrs: 

370 stress_repeats_times = list( 

371 pkg.dataset[var].attrs["stress_repeats"].keys() 

372 ) 

373 pkgtimes.extend(stress_repeats_times) 

374 times.extend(pkgtimes) 

375 

376 # np.unique also sorts 

377 times = np.unique(np.hstack(times)) 

378 

379 # Check if every transient package commences at the same time. 

380 for key, first_time in first_times.items(): 

381 time0 = times[0] 

382 if first_time != time0: 

383 raise ValueError( 

384 f"Package {key} does not have a value specified for the " 

385 f"first time: {time0}. Every input must be present in the " 

386 "first stress period. Values are only filled forward in " 

387 "time." 

388 ) 

389 

390 duration = timestep_duration(times, self.use_cftime) 

391 # Generate time discretization, just rely on default arguments 

392 # Probably won't be used that much anyway? 

393 timestep_duration_da = xr.DataArray( 

394 duration, coords={"time": np.array(times)[:-1]}, dims=("time",) 

395 ) 

396 self["time_discretization"] = imod.wq.TimeDiscretization( 

397 timestep_duration=timestep_duration_da 

398 ) 

399 

400 def _render_gen(self, modelname, globaltimes, writehelp, result_dir): 

401 package_set = { 

402 pkg._pkg_id for pkg in self.values() if pkg._pkg_id not in ("tvc", "mal") 

403 } 

404 package_set.update(("btn", "ssm")) 

405 package_set = sorted(package_set) 

406 baskey = self._get_pkgkey("bas6") 

407 bas = self[baskey] 

408 _, xmin, xmax, _, ymin, ymax = imod.util.spatial.spatial_reference( 

409 bas["ibound"] 

410 ) 

411 

412 if not self.use_cftime: 

413 start_time = pd.to_datetime(globaltimes[0]) 

414 else: 

415 start_time = globaltimes[0] 

416 

417 start_date = start_time.strftime("%Y%m%d%H%M%S") 

418 

419 d = {} 

420 d["modelname"] = modelname 

421 d["writehelp"] = writehelp 

422 d["result_dir"] = result_dir 

423 d["xmin"] = xmin 

424 d["xmax"] = xmax 

425 d["ymin"] = ymin 

426 d["ymax"] = ymax 

427 d["package_set"] = package_set 

428 d["start_date"] = start_date 

429 return self._gen_template.render(d) 

430 

431 def _render_pkg(self, key, directory, globaltimes, nlayer): 

432 """ 

433 Rendering method for straightforward packages 

434 """ 

435 # Get name of pkg, e.g. lookup "recharge" for rch _pkg_id 

436 pkgkey = self._get_pkgkey(key) 

437 if pkgkey is None: 

438 # Maybe do enum look for full package name? 

439 if (key == "rch") or (key == "evt"): # since recharge is optional 

440 return "" 

441 else: 

442 raise ValueError(f"No {key} package provided.") 

443 return self[pkgkey]._render( 

444 directory=directory / pkgkey, globaltimes=globaltimes, nlayer=nlayer 

445 ) 

446 

447 def _render_dis(self, directory, globaltimes, nlayer): 

448 baskey = self._get_pkgkey("bas6") 

449 diskey = self._get_pkgkey("dis") 

450 bas_content = self[baskey]._render_dis( 

451 directory=directory.joinpath(baskey), nlayer=nlayer 

452 ) 

453 dis_content = self[diskey]._render(globaltimes=globaltimes) 

454 return bas_content + dis_content 

455 

456 def _render_groups(self, directory, globaltimes): 

457 baskey = self._get_pkgkey("bas6") 

458 nlayer, nrow, ncol = self[baskey]["ibound"].shape 

459 package_groups = self._group() 

460 content = "\n\n".join( 

461 [ 

462 group.render(directory, globaltimes, nlayer, nrow, ncol) 

463 for group in package_groups 

464 ] 

465 ) 

466 ssm_content = "".join( 

467 [ 

468 group.render_ssm(directory, globaltimes, nlayer) 

469 for group in package_groups 

470 ] 

471 ) 

472 

473 # Calculate number of sinks and sources 

474 n_sinkssources = sum([group.max_n_sinkssources() for group in package_groups]) 

475 return content, ssm_content, n_sinkssources 

476 

477 def _render_flowsolver(self, directory): 

478 pcgkey = self._get_pkgkey("pcg") 

479 pksfkey = self._get_pkgkey("pksf") 

480 if pcgkey and pksfkey: 

481 raise ValueError("pcg and pksf solver both provided. Provide only one.") 

482 if not pcgkey and not pksfkey: 

483 raise ValueError("No flow solver provided") 

484 if pcgkey: 

485 return self[pcgkey]._render() 

486 else: 

487 baskey = self._get_pkgkey("bas6") 

488 self[pksfkey]._compute_load_balance_weight(self[baskey]["ibound"]) 

489 return self[pksfkey]._render(directory=directory.joinpath(pksfkey)) 

490 

491 def _render_btn(self, directory, globaltimes, nlayer): 

492 baskey = self._get_pkgkey("bas6") 

493 btnkey = self._get_pkgkey("btn") 

494 diskey = self._get_pkgkey("dis") 

495 self[btnkey].dataset["thickness"] = self[baskey].thickness() 

496 

497 if btnkey is None: 

498 raise ValueError("No BasicTransport package provided.") 

499 btn_content = self[btnkey]._render( 

500 directory=directory.joinpath(btnkey), nlayer=nlayer 

501 ) 

502 dis_content = self[diskey]._render_btn(globaltimes=globaltimes) 

503 return btn_content + dis_content 

504 

505 def _render_transportsolver(self, directory): 

506 gcgkey = self._get_pkgkey("gcg") 

507 pkstkey = self._get_pkgkey("pkst") 

508 if gcgkey and pkstkey: 

509 raise ValueError("gcg and pkst solver both provided. Provide only one.") 

510 if not gcgkey and not pkstkey: 

511 raise ValueError("No transport solver provided") 

512 if gcgkey: 

513 return self[gcgkey]._render() 

514 else: 

515 baskey = self._get_pkgkey("bas6") 

516 self[pkstkey]._compute_load_balance_weight(self[baskey]["ibound"]) 

517 return self[pkstkey]._render(directory=directory / pkstkey) 

518 

519 def _render_ssm_rch_evt_mal_tvc(self, directory, globaltimes, nlayer): 

520 out = "" 

521 for key, pkg in self.items(): 

522 if pkg._pkg_id in ("rch", "evt", "mal", "tvc"): 

523 out += pkg._render_ssm( 

524 directory=directory / key, globaltimes=globaltimes, nlayer=nlayer 

525 ) 

526 return out 

527 

528 def _bas_btn_rch_evt_mal_tvc_sinkssources(self): 

529 baskey = self._get_pkgkey("bas6") 

530 btnkey = self._get_pkgkey("btn") 

531 ibound = self[baskey].dataset["ibound"] 

532 icbund = self[btnkey].dataset["icbund"] 

533 n_extra = int(((ibound < 0) | (icbund < 0)).sum()) 

534 

535 nlayer, nrow, ncol = ibound.shape 

536 for key in ("rch", "evt", "mal", "tvc"): 

537 pkgkey = self._get_pkgkey(key) 

538 if pkgkey is not None: 

539 pkg = self[pkgkey] 

540 if key in ("rch", "evt"): 

541 pkg._set_ssm_layers(ibound) 

542 n_extra += pkg._ssm_layers.size * nrow * ncol 

543 elif key in ("mal", "tvc"): 

544 _ = pkg._max_active_n("concentration", nlayer, nrow, ncol) 

545 n_extra += pkg._ssm_cellcount 

546 

547 return n_extra 

548 

549 def render(self, directory, result_dir, writehelp): 

550 """ 

551 Render the runfile as a string, package by package. 

552 """ 

553 diskey = self._get_pkgkey("dis") 

554 globaltimes = self[diskey]["time"].values 

555 baskey = self._get_pkgkey("bas6") 

556 nlayer = self[baskey]["layer"].size 

557 

558 content = [] 

559 content.append( 

560 self._render_gen( 

561 modelname=self.modelname, 

562 globaltimes=globaltimes, 

563 writehelp=writehelp, 

564 result_dir=result_dir, 

565 ) 

566 ) 

567 content.append( 

568 self._render_dis( 

569 directory=directory, globaltimes=globaltimes, nlayer=nlayer 

570 ) 

571 ) 

572 # Modflow 

573 for key in ("bas6", "oc", "lpf", "rch", "evt"): 

574 content.append( 

575 self._render_pkg( 

576 key=key, directory=directory, globaltimes=globaltimes, nlayer=nlayer 

577 ) 

578 ) 

579 

580 # multi-system package group: chd, drn, ghb, riv, wel 

581 modflowcontent, ssm_content, n_sinkssources = self._render_groups( 

582 directory=directory, globaltimes=globaltimes 

583 ) 

584 # Add recharge to sinks and sources 

585 n_sinkssources += self._bas_btn_rch_evt_mal_tvc_sinkssources() 

586 # Add recharge, mass loading, time varying constant concentration to ssm_content 

587 ssm_content += self._render_ssm_rch_evt_mal_tvc( 

588 directory=directory, globaltimes=globaltimes, nlayer=nlayer 

589 ) 

590 

591 # Wrap up modflow part 

592 content.append(modflowcontent) 

593 content.append(self._render_flowsolver(directory=directory)) 

594 

595 # MT3D and Seawat settings 

596 # Make an estimate of the required number of particles. 

597 advkey = self._get_pkgkey("adv") 

598 if isinstance(self[advkey], (imod.wq.AdvectionMOC, imod.wq.AdvectionHybridMOC)): 

599 ibound = self[baskey]["ibound"] 

600 cell_max_nparticles = self[advkey]["cell_max_nparticles"] 

601 self[advkey]["max_nparticles"] = int( 

602 np.product(ibound.shape) * 0.5 * cell_max_nparticles 

603 ) 

604 

605 content.append( 

606 self._render_btn( 

607 directory=directory, globaltimes=globaltimes, nlayer=nlayer 

608 ) 

609 ) 

610 for key in ("vdf", "adv", "dsp"): 

611 content.append( 

612 self._render_pkg( 

613 key=key, directory=directory, globaltimes=globaltimes, nlayer=nlayer 

614 ) 

615 ) 

616 ssm_content = f"[ssm]\n mxss = {n_sinkssources}" + ssm_content 

617 

618 content.append(ssm_content) 

619 content.append(self._render_transportsolver(directory=directory)) 

620 

621 return "\n\n".join(content) 

622 

623 def _set_caching_packages(self, reldir): 

624 # TODO: 

625 # Basically every package should rely on bas for checks via ibound 

626 # basic domain checking, etc. 

627 # baskey = self._get_pkgkey("bas6") 

628 # So far, only slv does? Others only depend on themselves. 

629 for pkgname, pkg in self.items(): 

630 if hasattr(pkg, "_filehashself"): 

631 # Clear _outputfiles in case of repeat writes 

632 pkg._outputfiles = [] 

633 pkg._filehashes[pkgname] = pkg._filehashself 

634 pkg._reldir = reldir 

635 # If more complex dependencies do show up, probably push methods down 

636 # to the individual packages. 

637 

638 def write( 

639 self, directory=pathlib.Path("."), result_dir=None, resultdir_is_workdir=False 

640 ): 

641 """ 

642 Writes model input files. 

643 

644 Parameters 

645 ---------- 

646 directory : str, pathlib.Path 

647 Directory into which the model input will be written. The model 

648 input will be written into a directory called modelname. 

649 result_dir : str, pathlib.Path 

650 Path to directory in which output will be written when running the 

651 model. Is written as the value of the ``result_dir`` key in the 

652 runfile. 

653 

654 See the examples. 

655 resultdir_is_workdir: boolean, optional 

656 Wether the set all input paths in the runfile relative to the output 

657 directory. Because iMOD-wq generates a number of files in its working 

658 directory, it may be advantageous to set the working directory to 

659 a different path than the runfile location. 

660 

661 Returns 

662 ------- 

663 None 

664 

665 Examples 

666 -------- 

667 Say we wish to write the model input to a file called input, and we 

668 desire that when running the model, the results end up in a directory 

669 called output. We may run: 

670 

671 >>> model.write(directory="input", result_dir="output") 

672 

673 And in the runfile, a value of ``../../output`` will be written for 

674 result_dir. 

675 """ 

676 # Coerce to pathlib.Path 

677 directory = pathlib.Path(directory) 

678 if result_dir is None: 

679 result_dir = pathlib.Path("results") 

680 else: 

681 result_dir = pathlib.Path(result_dir) 

682 

683 # Create directories if necessary 

684 directory.mkdir(exist_ok=True, parents=True) 

685 result_dir.mkdir(exist_ok=True, parents=True) 

686 runfilepath = directory / f"{self.modelname}.run" 

687 results_runfilepath = result_dir / f"{self.modelname}.run" 

688 

689 # Where will the model run? 

690 # Default is inputdir, next to runfile: 

691 # in that case, resultdir is relative to inputdir 

692 # If resultdir_is_workdir, inputdir is relative to resultdir 

693 # render_dir is the inputdir that is printed in the runfile. 

694 # result_dir is the resultdir that is printed in the runfile. 

695 # caching_reldir is from where to check for files. This location 

696 # is the same as the eventual model working dir. 

697 if resultdir_is_workdir: 

698 caching_reldir = result_dir 

699 if not directory.is_absolute(): 

700 render_dir = _relpath(directory, result_dir) 

701 else: 

702 render_dir = directory 

703 result_dir = pathlib.Path(".") 

704 else: 

705 caching_reldir = directory 

706 render_dir = pathlib.Path(".") 

707 if not result_dir.is_absolute(): 

708 result_dir = _relpath(result_dir, directory) 

709 

710 # Check if any caching packages are present, and set necessary states. 

711 self._set_caching_packages(caching_reldir) 

712 

713 if self.check is not None: 

714 self.package_check() 

715 

716 # Delete packages without data 

717 self._delete_empty_packages(verbose=True) 

718 

719 runfile_content = self.render( 

720 directory=render_dir, result_dir=result_dir, writehelp=False 

721 ) 

722 

723 # Start writing 

724 # Write the runfile 

725 with open(runfilepath, "w") as f: 

726 f.write(runfile_content) 

727 # Also write the runfile in the workdir 

728 if resultdir_is_workdir: 

729 with open(results_runfilepath, "w") as f: 

730 f.write(runfile_content) 

731 

732 # Write all IDFs and IPFs 

733 for pkgname, pkg in self.items(): 

734 if ( 

735 "x" in pkg.dataset.coords 

736 and "y" in pkg.dataset.coords 

737 or pkg._pkg_id == "wel" 

738 ): 

739 try: 

740 pkg.save(directory=directory / pkgname) 

741 except Exception as error: 

742 raise RuntimeError( 

743 f"An error occured during saving of package: {pkgname}." 

744 ) from error 

745 

746 def package_check(self): 

747 baskey = self._get_pkgkey("bas6") 

748 if baskey is not None: 

749 ibound = self[baskey]["ibound"] 

750 else: 

751 ibound = None 

752 

753 for pkg in self.values(): 

754 pkg._pkgcheck(ibound=ibound) 

755 

756 def clip( 

757 self, 

758 extent, 

759 heads_boundary=None, 

760 concentration_boundary=None, 

761 delete_empty_pkg=False, 

762 ): 

763 """ 

764 Method to clip the model to a certain `extent`. The spatial resolution of the clipped model is unchanged. 

765 Boundary conditions of clipped model can be derived from parent model calculation results and are applied 

766 along the edge of `extent` (CHD and TVC). Packages from parent that have no data within extent are optionally removed. 

767 

768 Parameters 

769 ---------- 

770 extent : tuple, geopandas.GeoDataFrame, xarray.DataArray 

771 Extent of the clipped model. Tuple must be in the form of (`xmin`,`xmax`,`ymin`,`ymax`). If a GeoDataFrame, all 

772 polygons are included in the model extent. If a DataArray, non-null/non-zero values are taken as the new extent. 

773 

774 heads_boundary : xarray.DataArray, optional. 

775 Heads to be applied as a Constant Head boundary condition along the edge of the model extent. These heads are assumed 

776 to be derived from calculations with the parent model. Timestamp of boundary condition is shifted to correct for difference 

777 between 'end of period' timestamp of results and 'start of period' timestamp of boundary condition. 

778 If None (default), no constant heads boundary condition is applied. 

779 

780 concentration_boundary : xarray.DataArray, optional. 

781 Concentration to be applied as a Time Varying Concentration boundary condition along the edge of the model extent. 

782 These concentrations can be derived from calculations with the parent model. Timestamp of boundary condition is shifted 

783 to correct for difference between 'end of period' timestamp of results and 'start of period' timestamp of boundary condition. 

784 If None (default), no time varying concentration boundary condition is applied. 

785 

786 *Note that the Time Varying Concentration boundary sets a constant concentration for the entire stress period, 

787 unlike the linearly varying Constant Head. This will inevitably cause a time shift in concentrations along the boundary. 

788 This shift becomes more significant when stress periods are longer. If necessary, consider interpolating concentrations 

789 along the time axis, to reduce the length of stress periods (see examples).* 

790 

791 delete_empty_pkg : bool, optional. 

792 Set to True to delete packages that contain no data in the clipped model. Defaults to False. 

793 

794 Examples 

795 -------- 

796 Given a full model, clip a 1 x 1 km rectangular submodel without boundary conditions along its edge: 

797 

798 >>> extent = (1000., 2000., 5000., 6000.) # xmin, xmax, ymin, ymax 

799 >>> clipped = ml.clip(extent) 

800 

801 Load heads and concentrations from full model results: 

802 

803 >>> heads = imod.idf.open("head/head_*.idf") 

804 >>> conc = imod.idf.open("conc/conc_*.idf") 

805 >>> clipped = ml.clip(extent, heads, conc) 

806 

807 Use a shape of a model area: 

808 

809 >>> extent = geopandas.read_file("clipped_model_area.shp") 

810 >>> clipped = ml.clip(extent, heads, conc) 

811 

812 Interpolate concentration results to annual results using xarray.interp(), to improve time resolution of concentration boundary: 

813 

814 >>> conc = imod.idf.open("conc/conc_*.idf") 

815 >>> dates = pd.date_range(conc.time.values[0], conc.time.values[-1], freq="AS") 

816 >>> conc_interpolated = conc.load().interp(time=dates, method="linear") 

817 >>> clipped = ml.clip(extent, heads, conc_interpolated) 

818 """ 

819 baskey = self._get_pkgkey("bas6") 

820 like = self[baskey].dataset["ibound"].isel(layer=0).squeeze(drop=True) 

821 

822 if isinstance(extent, (list, tuple)): 

823 xmin, xmax, ymin, ymax = extent 

824 if not ((xmin < xmax) & (ymin < ymax)): 

825 raise ValueError( 

826 "Either xmin or ymin is equal to or larger than xmax or ymax. " 

827 "Correct order is xmin, xmax, ymin, ymax." 

828 ) 

829 extent = xr.ones_like(like) 

830 extent = extent.where( 

831 (extent.x >= xmin) 

832 & (extent.x <= xmax) 

833 & (extent.y >= ymin) 

834 & (extent.y <= ymax), 

835 0, 

836 ) 

837 elif isinstance(extent, xr.DataArray): 

838 pass 

839 else: 

840 import geopandas as gpd 

841 

842 if isinstance(extent, gpd.GeoDataFrame): 

843 extent = imod.prepare.rasterize(extent, like=like) 

844 else: 

845 raise ValueError( 

846 "extent must be of type tuple, GeoDataFrame or DataArray" 

847 ) 

848 

849 extent = xr.ones_like(like).where(extent > 0) 

850 

851 def get_clip_na_slices(da, dims=None): 

852 """Clips a DataArray to its maximum extent in different dimensions. 

853 if dims not given, clips to x and y. 

854 """ 

855 if dims is None: 

856 dims = ["x", "y"] 

857 slices = {} 

858 for d in dims: 

859 tmp = da.dropna(dim=d, how="all")[d] 

860 if len(tmp) > 2: 

861 dtmp = 0.5 * (tmp[1] - tmp[0]) 

862 else: 

863 dtmp = 0 

864 slices[d] = slice(float(tmp[0] - dtmp), float(tmp[-1] + dtmp)) 

865 return slices 

866 

867 # create edge around extent to apply boundary conditions 

868 if not (heads_boundary is None and concentration_boundary is None): 

869 extentwithedge = extent.copy(data=binary_dilation(extent.fillna(0).values)) 

870 extentwithedge = extentwithedge.where(extentwithedge) 

871 else: 

872 extentwithedge = extent 

873 # clip to extent with edge 

874 clip_slices = get_clip_na_slices(extentwithedge) 

875 extentwithedge = extentwithedge.sel(**clip_slices) 

876 extent = extent.sel(**clip_slices) 

877 edge = extentwithedge.where(extent.isnull()) 

878 

879 # Clip model to extent, set outside extent to nodata or 0 

880 ml = self.sel(**clip_slices) 

881 for pck in ml.keys(): 

882 for d in ml[pck].dataset.data_vars: 

883 if d in ["ibound", "icbund"]: 

884 ml[pck][d] = ml[pck][d].where(extentwithedge == 1, 0) 

885 elif "x" in ml[pck][d].dims: 

886 ml[pck][d] = ml[pck][d].where(extentwithedge == 1) 

887 

888 # Create boundary conditions as CHD and/or TVC package 

889 if concentration_boundary is not None: 

890 concentration_boundary = concentration_boundary.sel(**clip_slices) 

891 concentration_boundary = concentration_boundary.where(edge == 1) 

892 # Time shifts: assume heads and conc are calculation results. Then: 

893 # timestamp of result is _end_ of stress period, while timestamp input 

894 # is the _start_ of the stress period. 

895 if "time" in concentration_boundary.dims: 

896 concentration_boundary = concentration_boundary.shift( 

897 time=-1 

898 ).combine_first(concentration_boundary) 

899 

900 ml["tvc"] = imod.wq.TimeVaryingConstantConcentration( 

901 concentration=concentration_boundary 

902 ) 

903 

904 if heads_boundary is not None: 

905 heads_boundary = heads_boundary.sel(**clip_slices) 

906 head_end = heads_boundary.where(edge == 1) 

907 if "time" in heads_boundary.dims: 

908 head_start = head_end.shift(time=1).combine_first(head_end) 

909 else: 

910 head_start = head_end 

911 

912 ml["chd"] = imod.wq.ConstantHead( 

913 head_start=head_start, 

914 head_end=head_end, 

915 concentration=concentration_boundary, # concentration relevant for fwh calc 

916 ) 

917 

918 # delete packages if no data in sliced model 

919 if delete_empty_pkg: 

920 ml._delete_empty_packages(verbose=True) 

921 

922 return ml