Coverage for cardslib.py: 51%
675 statements
« prev ^ index » next coverage.py v7.7.0, created at 2025-03-20 20:51 +0100
« prev ^ index » next coverage.py v7.7.0, created at 2025-03-20 20:51 +0100
1import json
2import logging
3import pickle
4import re
5import warnings
6from base64 import b64decode, b64encode
7from collections import defaultdict
8from copy import deepcopy
9from functools import lru_cache
11import numpy as np
12import pandas as pd
13from numpy.lib import recfunctions as rfn
14from numtools.serializer import Serializer
15from numtools.misc import replace_nan
17from nastranio.constants import ELEMENT, PROPERTY, UNKNOWN
18from nastranio.decorators import cached_property
19from nastranio.fields_writer import (
20 DefaultDict,
21 fields_to_card,
22 get_field,
23 nbrows_by_fields,
24 trans,
25)
26from nastranio.pylib import autoconvert
27from nastranio.utils import array2dic, calcdiff, dic2array, transform_dict_of_list
29try:
30 import msgpack
32 ISMSGPACK = True
33except ImportError:
34 ISMSGPACK = False
36pat = re.compile(r"(?P<root>\w+)(?P<id>\d+)")
38# ============================================================================
39# json encoder / decoder
40# ============================================================================
43class PythonObjectEncoder(json.JSONEncoder):
44 """encode sets to json"""
46 def default(self, obj):
47 if isinstance(obj, (list, dict, str, int, float, bool, type(None))):
48 return super().default(obj)
49 return {"_python_object": b64encode(pickle.dumps(obj)).decode("utf-8")}
52def as_python_object(dct):
53 if "_python_object" in dct:
54 return pickle.loads(b64decode(dct["_python_object"].encode("utf-8")))
55 return dct
58# ============================================================================
59# msgpack encoder / decoder
60# ============================================================================
63def msgpack_encode(obj):
64 if isinstance(obj, set):
65 obj = {"__set__": True, "as_list": list(obj)}
66 return obj
69def msgpack_decode(obj):
70 if "__set__" in obj:
71 obj = set(obj["as_list"])
72 return obj
75# ============================================================================
76# parsing helpers
77# ============================================================================
80def parse_table(table, asruler=False, linesno=None):
81 r"""parse a Nastran user-guide style table,
82 and return a tuple (`card`, `fields`, `repeated`)
84 >>> table = (
85 ... '| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |\n'
86 ... '|------+-----+-----+-----+----+----+----+-----+----+----|\n'
87 ... '| PBAR | PID | MID | A | I1 | I2 | J | NSM | |\n'
88 ... '| "" | K1 | K2 | I12 | | | | | |\n')
89 >>> card, fields, repeated = parse_table(table)
90 >>> card
91 'PBAR'
92 >>> fields
93 {2: 'PID', 3: 'MID', 4: 'A', 5: 'I1', 6: 'I2', 7: 'J', 8: 'NSM', 12: 'K1', 13: 'K2', 14: 'I12'}
94 >>> repeated # -> None
96 It also handles tables using -etc.- fields:
98 >>> table = (
99 ... '| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |\n'
100 ... '|-------+------+----+--------+-------+--------+------+--------+-------+----|\n'
101 ... '| PCOMP | PID | Z0 | NSM | SB | FT | TREF | GE | LAM | \n'
102 ... '| "" | MID1 | T1 | THETA1 | SOUT1 | MID2 | T2 | THETA2 | SOUT2 | \n'
103 ... '| "" | MID3 | T3 | THETA3 | SOUT3 | -etc.- | | | | \n')
104 >>> card, fields, repeated = parse_table(table)
105 >>> card
106 'PCOMP'
107 >>> fields
108 {2: 'PID', 3: 'Z0', 4: 'NSM', 5: 'SB', 6: 'FT', 7: 'TREF', 8: 'GE', 9: 'LAM'}
109 >>> repeated
110 {'starts@': 12, 'fields': ['MID', 'T', 'THETA', 'SOUT']}
112 -etc.- is not supposed to be trailing:
114 >>> table = (
115 ... '| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |\n'
116 ... '|------+------+-----+-----+--------+--------+-----+-----+-----+----|\n'
117 ... '| RBE2 | EID | GN | CM | GM1 | GM2 | GM3 | GM4 | GM5 | | \n'
118 ... '| "" | GM6 | GM7 | GM8 | -etc.- | ALPHA | | | | | \n')
119 >>> card, fields, repeated = parse_table(table)
120 >>> card
121 'RBE2'
122 >>> fields
123 {2: 'EID', 3: 'GN', 4: 'CM'}
124 >>> repeated
125 {'starts@': 5, 'fields': ['GM'], 'floating': ['ALPHA']}
126 """
127 lines = [l.strip() for l in table.split("\n")]
128 lines = [l for l in lines if l]
129 # ---------------------------------------
130 # ensure first line is field numbering
131 if set(lines[0]) == {" ", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "|"}:
132 lines = lines[1:]
133 # ---------------------------------------
134 # check if second line is grid decoration
135 if set(lines[0]) == {"+", "-", "|"}:
136 lines = lines[1:]
137 if linesno:
138 lines = [l for i, l in enumerate(lines) if i in linesno]
139 # ---------------------------------------
140 # get fields
141 fields = {}
142 for i, line in enumerate(lines):
143 _fields = line.split("|")
144 # since grid has trailing and heading "|" characters, we need to remove first and last fields
145 _fields = [f.strip() for f in _fields[1:-1]]
146 fields.update(dict(zip(range(1 + i * 10, len(_fields) + 1 + i * 10), _fields)))
147 # ---------------------------------------
148 # clean fields: remove empty fields
149 for k, v in fields.copy().items():
150 if v.strip() in ("", '""'):
151 fields.pop(k)
152 if asruler:
153 # --------------------------------------------------------------------
154 # ensure all fields length < 8 characters
155 for k, fieldname in fields.items():
156 if len(fieldname) > 6:
157 fields[k] = fieldname[:5] + ".."
158 repeated = {}
159 elif "-etc.-" in fields.values():
160 etc_key = {v: k for k, v in fields.items()}["-etc.-"]
161 floating = list({k: v for k, v in fields.items() if k > etc_key}.values())
162 fields = {k: v for k, v in fields.items() if k < etc_key}
163 # repeated fields can be one or more.
164 # iterate from last field until we find a repeted pattern
165 i = -1
166 keys = list(fields.keys())
167 repeated = {"starts@": None, "fields": []}
168 while True:
169 key = keys[i]
170 value = fields[key]
171 field, nb = pat.search(value).groups()
172 if field in repeated["fields"]:
173 break
174 repeated["fields"].append(field)
175 i -= 1
176 repeated["fields"] = repeated["fields"][::-1]
177 # clean all the repeated fields
178 for key in keys[::-1]:
179 value = fields[key]
180 try:
181 field, nb = pat.search(value).groups()
182 except:
183 break
184 else:
185 fields.pop(key)
186 repeated["starts@"] = key
187 if floating:
188 repeated["floating"] = floating
189 else:
190 repeated = None
191 if 1 in fields:
192 card = fields.pop(1)
193 else:
194 card = None
195 return card, fields, repeated
198class DummyFields:
199 """drop-in replacement for fields dict. Used by anonymous cards"""
201 def __getitem__(self, x):
202 return "FIELD#%d" % x
205# ============================================================================
206# Cards Skeletons to be derived
207# ============================================================================
210class SimpleCard(Serializer):
211 """defines generically simple cards (eg. GRID, CQUAD4),
212 or cycling cards (eg. PCOMP)
213 """
215 TABLE = None
216 DEFAULTS = {}
217 MULT_TYPES_FIELDS = {}
218 type = UNKNOWN
220 def __init__(self, name=None, data=None):
221 if not name:
222 name = self.__class__.__name__
223 self.name = name
224 if hasattr(self, "TABLE") and self.TABLE is not None:
225 self.card, self.fields, self.repeated = parse_table(self.TABLE)
226 else:
227 self.card = name
228 self.fields = {} # DummyFields()
229 self.repeated = None
230 # --------------------------------------------------------------------
231 # main property (existing for all cards)
232 self.carddata = {"main": defaultdict(list)}
233 if data:
234 self.resume(data)
235 # if _is_dummy:
236 # # build fields based on field number
237 # _fields = dict(self.carddata["main"])
238 # fields = dict(zip(range(0, len(_fields)), _fields.keys()))
239 # self.fields = {
240 # k: v for k, v in fields.items() if (k % 10 != 0 and (k - 1) % 10 != 0)
241 # }
243 @classmethod
244 def fields_info(cls):
245 _data = parse_table(cls.TABLE)
246 all_fields = tuple(_data[1].values())
247 repeated = _data[2]
248 fields = {
249 # mandatory needs to keep order
250 "mandatory": tuple([f for f in all_fields if f not in cls.DEFAULTS]),
251 "optional": set([f for f in all_fields if f in cls.DEFAULTS]),
252 }
253 if repeated:
254 fields["repeated"] = tuple([f"{field}i" for field in repeated["fields"]])
255 return fields
257 @cached_property
258 def _rowno2id(self):
259 """map carddata row number to card IDs"""
260 return {i: id for i, id in enumerate(self.carddata["main"][self.XID_FIELDNAME])}
262 @cached_property
263 def _id2rowno(self):
264 """map card IDs to carddata row number"""
265 return {id: i for i, id in enumerate(self.carddata["main"][self.XID_FIELDNAME])}
267 def _list_caches(self):
268 return {k for k in self.__dict__ if k.startswith(cached_property.CACHED_PREFIX)}
270 def clear_caches(self, rebuild=True):
271 """delete internal caches"""
272 prefix = cached_property.CACHED_PREFIX
273 cache_names = self._list_caches() # _cache_XXX
274 cached_names = [k.replace(prefix, "") for k in cache_names] # XXX
275 logging.info("clean cached properties: %s", ", ".join(cached_names))
276 _rebuilt = []
277 for fcached, cache_name in zip(cached_names, cache_names):
278 self.__dict__.pop(cache_name)
279 if rebuild:
280 # fcached = cached_name.replace(prefix, "")
281 getattr(self, fcached)
282 _rebuilt.append(fcached)
283 if _rebuilt:
284 logging.info("rebuilt cache: %s", ", ".join(_rebuilt))
286 def default_to_nan(self, transfields=True):
287 def _default_to_nan(row):
288 """change default values to None"""
289 for k, v in row.items():
290 v = self._check_default(k, v, None)
291 if transfields:
292 v = trans(v)
293 row[k] = v
294 return row
296 df = pd.DataFrame(self.array)
297 df = df.astype(object).apply(_default_to_nan, axis=1)
298 if transfields:
299 df = df.fillna("")
300 return df
302 def diff(self, other):
303 return calcdiff((self.__dict__, other.__dict__))
305 def __eq__(self, other):
306 return len(self.diff(other)) == 0
308 def _reset(self):
309 name = self.name
310 self.__dict__ = {}
311 self.__init__(name=name)
313 # def __getattr__(self, attr):
314 # """bind carddata contents"""
315 # if attr in self.carddata:
316 # return self.carddata[attr]
317 # raise AttributeError(attr)
319 def __len__(self):
320 keys = list(self.carddata["main"].keys())
321 if len(keys) == 0:
322 return 0
323 key = keys[0]
324 return len(self.carddata["main"][key])
326 def help(self, doprint=False):
327 """
328 return self.TABLE usable as ruler
329 """
330 card, fields, repeated = parse_table(self.TABLE, asruler=True)
331 # substitute fields keys with appropriate placeholder
332 ruler_fieldsmap = DefaultDict(fn1=card)
333 ruler_fieldsmap.update({"fn%d" % k: field for k, field in fields.items()})
334 lines = fields_to_card(ruler_fieldsmap, leading="$", sep="▕")
335 if not doprint:
336 return lines
337 print("\n".join(lines))
339 def _check_default(self, key, value, default=None):
340 if default is None:
341 default = self.DEFAULTS
342 if key in default and value == default[key]:
343 value = None
344 return value
346 def _to_nastran(self, ids=frozenset(), ruler=False, additional_ruler_lines=()):
347 """
348 common code for all `to_nastran` functions
349 """
350 # create a DefaultDict dict mapping placeholders "fnXX" to field names
351 # {'fn1': 'CORD2R', 'fn2': 'CID', ..., 'fn12': 'C1', 'fn13': 'C2', 'fn14': 'C3', }
352 fieldsmap = DefaultDict(fn1=self.card) # , fn10='+', fn11='+')
353 try:
354 fieldsmap.update({"fn%d" % k: v for k, v in self.fields.items()})
355 except:
356 print(self.fields)
357 raise
358 lines = []
359 # creating a ruler is as simple as:
360 if ruler:
361 lines += self.help()
362 if additional_ruler_lines:
363 lines += additional_ruler_lines
364 # ---------------------------------------------------------------
365 # associate carddata['main'] entries to fields
366 field = self.fields[2]
367 all_ids = self.carddata["main"][field] # eg. [eid1, eid2, ...]
368 ids = set(ids) & set(all_ids)
369 if not ids:
370 ids = set(all_ids)
371 # associate card number in the list to card ID
372 full_array = self.array
373 _isin = np.isin(full_array[field], list(ids))
374 array = full_array[_isin]
375 ixs = np.flatnonzero(_isin)
376 return fieldsmap, lines, zip(ixs, array[field])
378 def _newline(self, fieldsmap, ix, with_default):
379 # build a new row
380 data = DefaultDict(fn1=self.card)
381 for fieldcode, fieldname in fieldsmap.items():
382 if fieldcode in ("fn1",) or fieldname in ("+"):
383 continue
384 value = replace_nan(self.carddata["main"][fieldname][ix])
385 if not with_default:
386 value = self._check_default(fieldname, value, default=self.DEFAULTS)
387 data[fieldcode] = trans(value)
388 return data
390 def to_nastran(
391 self, ids=frozenset(), ruler=False, with_default=True, comments=None
392 ):
393 """
394 Convert a collection of cards to 8-chars NASTRAN format.
395 SimpleCard to_nastran()"""
396 fieldsmap, lines, ixid = self._to_nastran(ids=ids, ruler=ruler)
397 for ix, id in ixid:
398 # initiate a new line and populate a data dict with fixed fields
399 try:
400 data = self._newline(fieldsmap, ix, with_default)
401 except ValueError as exc:
402 logging.critical(f"cannot make new line for {self.name} {ix}")
403 raise
404 if comments and hasattr(self, "COMMENTS_KEY"):
405 comment = comments.get(self.COMMENTS_KEY, dict()).get(id, dict())
406 if comment:
407 lines.append(f"$ Femap {self.COMMENTS_KEY} {id} : {comment}")
408 lines += fields_to_card(data)
409 return lines
411 def merge(self, other):
412 """attempt to create a universal merge() method working for
413 SimpleCard, SimpleCyclingCard and ComplexCard
414 """
415 # --------------------------------------------------------------------
416 # merge all fields but the extended data IDs
417 assert self.name == other.name
418 # count for later checks
419 nb_ini = len(self)
420 nb_to_add = len(other)
421 for k, v in other.carddata["main"].items():
422 # avoid reference fields
423 dataname, *_ = k.split("ID")
424 if dataname in self.carddata:
425 continue
426 self.carddata["main"][k] += v
427 # --------------------------------------------------------------------
428 # merge extended data
429 for dataname in self.carddata.keys():
430 if dataname == "main":
431 continue
432 colname = "%sID" % dataname
433 offset = max(self.carddata["main"][colname]) + 1
434 other._offset_repeated_ix(colname, offset=offset)
435 sd = self.carddata[dataname]
436 od = other.carddata[dataname]
437 sd += od
438 self.carddata["main"][colname] += other.carddata["main"][colname]
439 # --------------------------------------------------------------------
440 # deduplicate
441 self.dedup()
443 def dedup(self):
444 """
445 parse additional data to find and remove duplicates
446 """
447 for dataname in self.carddata.keys():
448 if dataname == "main":
449 continue
450 colname = "%sID" % dataname
451 data = self.carddata[dataname]
452 dedup_data = [] # this will override self.carddata[dataname]
453 for ix, dataset in enumerate(data):
454 if dataset not in dedup_data:
455 dedup_data.append(dataset)
456 continue
457 # dataset is duplicated; change referencing ID
458 already_ix = dedup_data.index(dataset)
459 self.carddata["main"][colname][ix] = already_ix
460 self.carddata[dataname] = dedup_data
462 def _offset_repeated_ix(self, colname, offset):
463 """offset indices of repeated data in both `main` and
464 `self.REPEATED_DATA_NAME`
465 """
466 old = self.carddata["main"][colname]
467 new = [(ix + offset) for ix in old]
468 old2new = dict(zip(old, new))
469 self.carddata["main"][colname] = [
470 old2new[ix] for ix in self.carddata["main"][colname]
471 ]
473 def append_sparams(self, sparams, **kwargs):
474 """fields are provided as `sparams` dictionnary header: value"""
475 _sparams = self.DEFAULTS.copy()
476 _sparams.update(sparams)
477 # reorder fields
478 _sparams = {k: _sparams[k] for k in self.fields.values()}
479 # if hasattr(self, "clean_sparams"):
480 # self.clean_params(_sparams)
481 for header, value in _sparams.items():
482 if value is None:
483 value = self.DEFAULTS.get(header, value)
484 self.carddata["main"][header].append(value)
485 # route multiple types fields
486 if header in self.MULT_TYPES_FIELDS:
487 alternatives = self.MULT_TYPES_FIELDS[header].copy()
488 try:
489 # a value (other than None) has been found
490 # we can therefore guess the field name
491 routed_header = alternatives.pop(type(value))
492 self.carddata["main"][routed_header].append(value)
493 except KeyError:
494 pass
495 # only remains the alternate field
496 for typ, routed_header in alternatives.items():
497 self.carddata["main"][routed_header].append(None)
498 # =====================================================================
499 # repeated fields
500 # LOAD: {'starts@': 4, 'fields': ['S', 'L']}
501 # PCOMP: {'starts@': 12, 'fields': ['MID', 'T', 'THETA', 'SOUT']}
502 # RBE2: {'starts@': 5, 'fields': ['GM'], 'floating': ['ALPHA']}
503 # RBE3: {'starts@': 33, 'fields': ['GM', 'CM'], 'floating': ['"ALPHA"', 'ALPHA']}
504 # SPC1: {'starts@': 4, 'fields': ['G']}
505 # =====================================================================
506 if not kwargs:
507 return
508 repeated_fields_specs = set(self.fields_info()["repeated"])
509 # ---------------------------------------------------------------------
510 # ensure we do not have floating repetitions
511 # for now, only simple single-field repetitions are handled
512 if "floating" in repeated_fields_specs:
513 raise NotImplementedError("do not handle for now floating repetitions")
514 # ---------------------------------------------------------------------
515 # repeated MUST finish with 'i'.
516 # TODO: discard this test?
517 for repeated_input in kwargs:
518 if not repeated_input.endswith("i"):
519 raise ValueError(f"repeated field {repeated_input} not understood")
520 # ---------------------------------------------------------------------
521 # get rid of training "i"
522 data = {k[:-1]: v for k, v in kwargs.items()} # 'Gi' -> 'G'
523 # and transform a dict of list into a list of dict
524 data = transform_dict_of_list(data)
525 # ---------------------------------------------------------------------
526 # append data
527 self.carddata["main"][self.REPEATED_DATA_NAME + "ID"].append(
528 self.nb_items() - 1
529 )
530 self.carddata[self.REPEATED_DATA_NAME].append(data)
532 def nb_items(self):
533 try:
534 return len(self.carddata["main"][next(iter(self.carddata["main"].keys()))])
535 except StopIteration:
536 return 0
538 def append_fields_list(self, fields):
539 """fields are provided as text, without the card name"""
540 # insert TWO dummy fields such as index in fields list match NASTRAN field
541 fields = ["_", "_"] + fields
542 # ==================================================================
543 # read fixed fields
544 # ==================================================================
545 if not hasattr(self.fields, "items"):
546 raise AttributeError("card %s has no fields items" % self)
547 kwargs = {}
548 for ix, header in self.fields.items():
549 try:
550 value = fields[ix]
551 except IndexError:
552 value = None
553 kwargs[header] = value
554 self.append_sparams(kwargs)
555 return fields
557 def append_legacy(self, fields):
558 """fields are provided as text, without the card name"""
559 # insert TWO dummy fields such as index in fields list match NASTRAN field
560 fields = ["_", "_"] + fields
561 # ==================================================================
562 # read fixed fields
563 # ==================================================================
564 if not hasattr(self.fields, "items"):
565 raise AttributeError("card %s has no fields items" % self)
566 for ix, header in self.fields.items():
567 try:
568 value = fields[ix]
569 except IndexError:
570 value = None
571 if value is None:
572 value = self.DEFAULTS.get(header, value)
573 self.carddata["main"][header].append(value)
574 # route multiple types fields
575 if header in self.MULT_TYPES_FIELDS:
576 alternatives = self.MULT_TYPES_FIELDS[header].copy()
577 routed_header = alternatives.pop(type(value))
578 self.carddata["main"][routed_header].append(value)
579 # only remains the alternate field
580 for typ, routed_header in alternatives.items():
581 self.carddata["main"][routed_header].append(None)
582 return fields
584 def parse(self, txt, debug=False):
585 """simple parser for NASTRAN small-fields bulk entry.
587 ⚠ Only to be used for testing purposes! ⚠
588 """
589 lines = [l.strip() for l in txt.split("\n") if l.strip()]
590 if debug:
591 import pprint
593 pprint.pprint(lines) # TODO: remove me!
594 fields = []
595 for l in lines:
596 if l.startswith("$"):
597 continue
598 for fieldID in range(10):
599 sub = slice(fieldID * 8, fieldID * 8 + 8)
600 fields.append(autoconvert(l[sub].strip()))
601 # trim None
602 while fields[-1] is None:
603 fields.pop(-1)
604 if debug:
605 print("fields:\n", fields)
606 self.append_fields_list(fields[1:])
608 def export_data(self):
609 """export_data data for serialization"""
610 res = deepcopy(self.carddata)
611 res["main"] = dict(res["main"]) # defaultdict -> dict
612 res["card"] = self.name
613 return res
615 def resume(self, data):
616 """resume card from data,
617 such as PCOMP = PCOMP.resume(PCOMP.export_data()) == PCOMP
618 """
619 self._reset()
620 # data = deepcopy(data)
621 cardname = data.pop("card")
622 if cardname != self.card:
623 raise ValueError(
624 f'provided cardname "{cardname}" differes from %s' % self.card
625 )
626 assert cardname == self.card
627 for ksrc, ktarget in data.items():
628 # keep defaultdict for 'main' data
629 if ksrc in self.carddata and hasattr(data[ksrc], "update"):
630 self.carddata[ksrc].update(data[ksrc])
631 else:
632 self.carddata[ksrc] = data[ksrc]
634 def __getstate__(self):
635 return self.__dict__
636 # return {k : v for k, v in self.__dict__.items() if not k.startswith('_cached_')}
638 @property
639 def array(self):
640 """return carddata['main'] as numpy structured arrays"""
641 return dic2array(self.carddata["main"])
643 def ids(self):
644 return set(self.carddata["main"][self.XID_FIELDNAME])
646 def query_id(self, value, asview=False, with_loc=False):
647 """
648 return a tuple (arr, rownbs)
649 """
650 if isinstance(value, int):
651 loc = np.where(self.array[self.XID_FIELDNAME] == value)
652 value = self.array[loc]
653 else:
654 # assume a list has been passed
655 mask = np.isin(self.array[self.XID_FIELDNAME], list(value))
656 value = self.array[np.where(mask)]
657 if with_loc:
658 loc = [i for i, v in enumerate(mask) if v]
659 if asview:
660 value = value.view(asview)
661 if with_loc:
662 return value, loc
663 return value
665 def query_id_fast(self, value, columns=None, asview=False):
666 """
667 fast version of query_id
668 return a tuple (arr, rownbs)
669 """
671 ids = self.carddata["main"][self.XID_FIELDNAME]
672 loc = self.query_loc(value)
673 if not columns:
674 subset = {k: [v[i] for i in loc] for k, v in self.carddata["main"].items()}
675 else:
676 subset = {k: [self.carddata["main"][k][i] for i in loc] for k in columns}
677 if asview:
678 subset = np.array(list(subset.values())).T
679 if len(loc) == 1:
680 return subset[0]
681 elif len(loc) == 1:
682 subset = {k: v[0] for k, v in subset.items()}
683 return subset
685 def query_loc(self, value):
686 """
687 return location in carddata lists
688 """
689 ids = self.carddata["main"][self.XID_FIELDNAME]
690 if isinstance(value, int):
691 return [ids.index(value)]
692 # assume a list has been passed
693 return [ids.index(v) for v in value]
695 def _extract_array(self, index, columns, values_type="float64", index_type="int"):
696 """transform a numpy structured array into {'data': values, 'index': index, 'columns': columns}"""
697 sarr = self.array
698 values = rfn.structured_to_unstructured(sarr[columns], dtype=values_type)
699 index = rfn.structured_to_unstructured(
700 sarr[[index]], dtype=index_type
701 ).flatten()
702 return {"data": values, "index": index, "columns": columns}
704 def subset(self, eids=None):
705 """return a card object with selected subset"""
706 if self.type != ELEMENT:
707 raise AttributeError("gids_header")
708 array = self.array
709 if eids is None:
710 eids = array[self.EID_FIELDNAME].tolist()
711 # --------------------------------------------------------------------
712 # convert eids to numpy indices
713 ixs = np.where(
714 np.isin(np.array(self.carddata["main"][self.EID_FIELDNAME]), eids)
715 )[0]
716 _array = array[ixs]
717 newdata = {"main": array2dic(_array, astype=list), "card": self.name}
718 for k, vs in self.carddata.items():
719 if k == "main":
720 continue
721 # ----------------------------------------------------------------
722 # restrict additional data to relevant eids
723 ix = newdata["main"][f"{k}ID"]
724 vs = [vs[i] for i in ix]
725 newdata[k] = vs
726 newdata["main"][f"{k}ID"] = list(range(len(ix)))
727 obj = self.__class__(data=newdata)
728 return obj
730 # ========================================================================
731 # cachable attributes previously set by decorators
732 # ========================================================================
733 @cached_property
734 def thk(self):
735 """return a dict {eid: thk}"""
736 if self.type != ELEMENT or self.dim != "2d" or self.THK_PATTERN is None:
737 raise AttributeError("thk")
738 # get thicknesses columns
739 gids_cols = [
740 col for col in self.carddata["main"] if self.THK_PATTERN.match(col)
741 ]
742 _data = self._extract_array("EID", gids_cols)
743 _data["data"] = _data["data"].mean(axis=1)
744 _data["name"] = "thk"
745 _data.pop("columns")
746 return _data
748 @cached_property
749 def gids_header(self):
750 """return a list of Grid IDs headers (eg. ['G1', 'G3', 'G3'])"""
751 return self._gids_header()
753 @classmethod
754 @lru_cache
755 def _gids_header(cls):
756 """return a list of Grid IDs headers (eg. ['G1', 'G3', 'G3'])"""
757 if cls.type != "element":
758 raise AttributeError("gids_header")
759 gids_cols = [
760 col for col in cls.fields_info()["mandatory"] if cls.GIDS_PATTERN.match(col)
761 ]
762 return gids_cols
764 @cached_property
765 def mids_header(self):
766 """return a list of Grid IDs headers (eg. ['G1', 'G3', 'G3'])"""
767 if self.type != PROPERTY:
768 raise AttributeError("mids_header")
769 mids_cols = [
770 col for col in self.carddata["main"] if self.MATS_PATTERN.match(col)
771 ]
772 return mids_cols
774 @cached_property
775 def pid2mids(self):
776 """return a mapping {PID: frozenset((MIDS))}
778 this basic property is OK for basic properties like PROD, PSHELL, *etc.*
779 but is not aware of PCOMP, for example. This needs therefore
780 to be overriden in those later cases
781 """
782 if self.type != PROPERTY:
783 raise AttributeError("pid2mids")
784 ret = {}
786 for ix, pid in enumerate(self.carddata["main"]["PID"]):
787 ret[pid] = set()
788 for midh in self.mids_header:
789 mid = self.carddata["main"][midh][ix]
790 # print(f'pid {pid}; midh {midh}:: {mid}')
791 if mid: # mid can be `None`; skip this case
792 ret[pid].add(mid)
793 ret[pid] = frozenset(ret[pid])
794 return ret
796 @cached_property
797 def _eid2gids_ordered(self):
798 if self.type != ELEMENT:
799 raise AttributeError("gids_header")
800 gids_cols = self.gids_header
801 eid2gids = defaultdict(set)
802 for ix, eid in enumerate(self.carddata["main"][self.EID_FIELDNAME]):
803 eid2gids[eid] = [self.carddata["main"][c][ix] for c in gids_cols]
804 if hasattr(self, "eid2gids_complement"):
805 # if list == set:
806 # eid2gids[eid] |= self.eid2gids_complement(eid=eid, ix=ix)
807 # else:
808 nb_meaningfull = len(self.gids_header)
809 if nb_meaningfull == 1:
810 msg = f"Only the first ID is meaningfull"
811 else:
812 msg = f"Only the first {nb_meaningfull} IDs are meaningfull"
813 logging.info(msg)
814 eid2gids[eid] += list(self.eid2gids_complement(eid=eid, ix=ix))
815 return dict(eid2gids)
817 @cached_property
818 def _eid2gids(self):
819 if self.type != ELEMENT:
820 raise AttributeError("gids_header")
821 gids_cols = self.gids_header
822 eid2gids = defaultdict(set)
823 for ix, eid in enumerate(self.carddata["main"][self.EID_FIELDNAME]):
824 eid2gids[eid] = set(self.carddata["main"][c][ix] for c in gids_cols)
825 if hasattr(self, "eid2gids_complement"):
826 eid2gids[eid] |= self.eid2gids_complement(eid=eid, ix=ix)
827 return dict(eid2gids)
830class RepeatedRowsCard(SimpleCard):
831 """Mother Card for cards dfining **ONE SINGLE** 'etc.'"""
833 # ------------------------------------------------------------------------
834 # tables
835 TABLE = None
836 REPEATED_ROWS_TABLE = None
837 TRAILING_ROWS_TABLE = None
838 # ------------------------------------------------------------------------
839 # more stuff
840 REPEATED_ROWS_NAME = None # eg. 'stations' for PBEAM
841 TRIGGER_REPEATED_ON = str
842 SKIP_NEXT_ROW_ON = () # e.g. for PBEAM: ('SO', ('YESA', 'NO'))
844 def __init__(self, name=None, data=None):
845 super().__init__(name=name, data=data)
846 self.REPEATED_ROWS_NAME = "{}_{}".format(
847 self.name.lower(), self.REPEATED_ROWS_NAME.lower()
848 )
849 # assert self.repeated is not None
850 # set-up an additional container for repeated rows
851 self.carddata[self.REPEATED_ROWS_NAME] = []
853 def append_fields_list(self, fields):
854 fields = super().append_fields_list(fields) # append regular fields
855 # ==================================================================
856 # read repeated rows
857 # ==================================================================
858 # card_subset: one single PER card
859 # example for PBEAM:
860 # {'SO': 'YES', 'X/XB': 0.3, etc...}
861 card_subset = dict()
862 # card_set: several (at least one) PER card
863 # example for PCOMP:
864 # [{'SO': 'YES', 'X/XB': 0.3, etc...},
865 # {'SO': 'YES', 'X/XB': 0.7, etc...}]
866 card_set = []
867 # --------------------------------------------------------------------
868 # analyse REPEATED_ROWS_TABLE and TRAILING_ROWS_TABLE
869 _, repfields, _ = parse_table(self.REPEATED_ROWS_TABLE)
870 _, trailfields, _ = parse_table(self.TRAILING_ROWS_TABLE)
871 # ====================================================================
872 # assuming that at least ONE block of repeated rows exists
873 # this is True for PBEAM, see [ref.] remark #4
874 # ====================================================================
875 remaining_fields = fields[get_field(max(self.fields.keys()) + 1) :]
876 remaining_fields = ["_", "_"] + remaining_fields
877 # nbrepeated_rows = nbrows_by_fields(repfields)
878 # offset = nbrepeated_rows * 10 # offset to apply each time we parse a block
879 # --------------------------------------------------------------------
880 # if some rows need to be skipped...
881 _rev_repfields = {
882 fieldname: fieldID for fieldID, fieldname in repfields.items()
883 }
884 try:
885 _skipped_fieldID = _rev_repfields[self.SKIP_NEXT_ROW_ON[0]]
886 _skipped_on = self.SKIP_NEXT_ROW_ON[1]
887 except:
888 _skipped_fieldID, _skip_on = None, None
889 __import__("pdb").set_trace()
890 while isinstance(remaining_fields[2], self.TRIGGER_REPEATED_ON):
891 card_subset = dict()
892 # number of rows to parse using repfields
893 _skipped_field = remaining_fields[_skipped_fieldID]
894 if _skipped_field in _skipped_on: # if SO in ('YESA', 'NO'):
895 block_length = 10
896 else:
897 block_length = 20
898 block = remaining_fields[:block_length]
899 remaining_fields = remaining_fields[block_length:]
900 # build card_subset
901 for ix, header in repfields.items():
902 try:
903 card_subset[header] = block[ix]
904 except IndexError:
905 card_subset[header] = None
906 card_set.append(card_subset)
907 card_set_ix = len(self.carddata[self.REPEATED_ROWS_NAME])
908 self.carddata["main"][self.REPEATED_ROWS_NAME + "ID"].append(card_set_ix)
909 self.carddata[self.REPEATED_ROWS_NAME].append(card_set)
910 # ====================================================================
911 # trailing fields are for the TRAILING_ROWS_TABLE
912 # ====================================================================
913 if remaining_fields:
914 for ix, header in trailfields.items():
915 try:
916 self.carddata["main"][header].append(remaining_fields[ix])
917 except IndexError:
918 self.carddata["main"][header].append(None)
919 return fields
921 def to_nastran(
922 self, ids=frozenset(), ruler=False, with_default=True, comments=None
923 ):
924 """
925 Convert a collection of cards to 8-chars NASTRAN format.
926 RepeatedRowsCard to_nastran()"""
928 ffieldsmap, lines, ixid = self._to_nastran(
929 ids=ids, ruler=ruler, additional_ruler_lines=["$ + repeated fields..."]
930 )
932 for ix, id in ixid:
933 # initiate a new line and populate a data dict with fixed fields
934 try:
935 data = self._newline(ffieldsmap, ix, with_default)
936 except ValueError as exc:
937 logging.critical(f"cannot make new line for {self.name} {ix}")
938 raise
939 # ----------------------------------------------------------------
940 # repeated rows
941 _skipped_fieldname, _skipped_on = self.SKIP_NEXT_ROW_ON
942 card_set = self.carddata[self.REPEATED_ROWS_NAME][ix]
943 _offset = get_field(max([int(k[2:]) for k in data.keys()]) + 1) - 2
944 for i, subset in enumerate(card_set):
945 if (
946 subset[_skipped_fieldname] in _skipped_on
947 ): # if subset['SO'] in ('YESA', 'NO'):
948 _, repfields, _ = parse_table(self.REPEATED_ROWS_TABLE, linesno=[0])
949 else:
950 _, repfields, _ = parse_table(self.REPEATED_ROWS_TABLE)
951 offset = _offset + i * _offset
952 fieldsmap = DefaultDict() # , fn10='+', fn11='+')
953 fieldsmap.update(
954 {"fn%d" % (k + offset): v for k, v in repfields.items()}
955 )
956 for fieldcode, fieldname in fieldsmap.items():
957 if fieldcode == "+":
958 continue
959 data[fieldcode] = trans(subset[fieldname])
960 # ----------------------------------------------------------------
961 # trailing rows
963 _, trailfields, _ = parse_table(self.TRAILING_ROWS_TABLE)
964 nextfield = get_field(max([int(k[2:]) for k in data.keys()]) + 1) - 2
965 trailfields = {k + nextfield: v for k, v in trailfields.items()}
966 for fieldcode, fieldname in trailfields.items():
967 if fieldcode == "+":
968 continue
969 data["fn%d" % fieldcode] = trans(self.carddata["main"][fieldname][ix])
970 try:
971 lines += fields_to_card(data)
972 except:
973 __import__("pdb").set_trace()
974 return lines
977class SimpleCyclingCard(SimpleCard):
978 """Mother Card for cards dfining **ONE SINGLE** 'etc.'"""
980 REPEATED_DATA_NAME = None # eg. 'LAYUP'
981 TABLE = None
982 REPEATED_DEFAULTS = {}
984 def __init__(self, name=None, data=None):
985 super().__init__(name=name, data=data)
986 if data is None:
987 self.REPEATED_DATA_NAME = "{}_{}".format(
988 self.name.lower(), self.REPEATED_DATA_NAME.lower()
989 )
990 # assert self.repeated is not None
991 # set-up an additional container for cycling data
992 self.carddata[self.REPEATED_DATA_NAME] = []
994 def to_nastran(
995 self, ids=frozenset(), ruler=False, with_default=True, comments=None
996 ):
997 """
998 Convert a collection of cards to 8-chars NASTRAN format.
999 SimpleCyclingCard to_nastran()"""
1001 fieldsmap, lines, ixid = self._to_nastran(ids=ids, ruler=ruler)
1002 for ix, id in ixid:
1003 # initiate a new line and populate a data dict with fixed fields
1004 try:
1005 data = self._newline(fieldsmap, ix, with_default)
1006 except ValueError as exc:
1007 logging.critical(
1008 f"cannot make new line for {self.name} {ix} {id=} {fieldsmap=} {with_default=}"
1009 )
1010 breakpoint()
1011 raise
1012 # repeated data
1013 # self.carddata['main'][self.REPEATED_DATA_NAME + 'ID']
1014 rdata_id = self.carddata["main"][self.REPEATED_DATA_NAME + "ID"][ix]
1015 rdata = self.carddata[self.REPEATED_DATA_NAME][rdata_id]
1016 # reset field pointer
1017 field_pnt = self.repeated["starts@"]
1018 # iterate over repeated data for the current line
1019 for id, dataset in enumerate(rdata):
1020 for fieldname in self.repeated["fields"]:
1021 field_pnt = get_field(field_pnt)
1022 fieldcode = "fn%d" % field_pnt
1023 value = dataset[fieldname]
1024 if not with_default:
1025 value = self._check_default(
1026 fieldname, value, default=self.REPEATED_DEFAULTS
1027 )
1028 data[fieldcode] = trans(value)
1029 field_pnt = field_pnt + 1
1030 # ----------------------------------------------------------------
1031 # floating (trailing) data
1032 floating_fields = self.repeated.get("floating", [])
1033 for fieldname in floating_fields:
1034 field_pnt = get_field(field_pnt)
1035 fieldcode = "fn%d" % field_pnt
1036 value = self.carddata["main"][fieldname][ix]
1037 if not with_default:
1038 value = self._check_default(fieldname, value, default=self.DEFAULTS)
1039 data[fieldcode] = trans(value)
1040 field_pnt = field_pnt + 1
1041 lines += fields_to_card(data)
1042 return lines
1044 def append_fields_list(self, fields):
1045 fields = super().append_fields_list(fields) # append regular fields
1046 # ==================================================================
1047 # read cycling fields, if any...
1048 # ==================================================================
1049 # card_subset: one single PER card
1050 # example for PCOMP:
1051 # {'MID': 2, 'SOUT': 'YES', 'T': 0.023622, 'THETA': 0.0}
1052 card_subset = dict()
1053 # card_set: several (at least one) PER card
1054 # example for PCOMP:
1055 # [{'MID': 2, 'SOUT': 'YES', 'T': 0.023622, 'THETA': 0.0},
1056 # {'MID': 3, 'SOUT': 'YES', 'T': 0.452756, 'THETA': 0.0},
1057 # {'MID': 2, 'SOUT': 'YES', 'T': 0.023622, 'THETA': 0.0}}]
1058 card_set = []
1059 # --------------------------------------------------------------------
1060 # some usefull intermediate variables
1061 field_nb = self.repeated["starts@"] - 1 # starting field index
1062 parsed_fields_counter = 0
1063 # --------------------------------------------------------------
1064 # we should stop on float if and only if -etc.- is not the last
1065 # field
1066 stop_cycling_on_float = "floating" in self.repeated
1067 while True: # field_nb < nb_of_fields_to_parse:
1068 # ----------------------------------------------------------
1069 # check if end of data to parse is reached
1070 try:
1071 # parse next field (if any)
1072 field_nb += 1
1073 value = fields[field_nb]
1074 except:
1075 # end of job. Well done!
1076 break
1077 if stop_cycling_on_float and isinstance(value, float):
1078 break
1079 # ----------------------------------------------------------
1080 # skip continuation fields:
1081 if field_nb % 10 == 0 or (field_nb - 1) % 10 == 0:
1082 continue
1083 # ----------------------------------------------------------
1084 # get the field name that will be come a fieldname for the `value`
1085 # already collected
1086 ix = parsed_fields_counter % len(self.repeated["fields"])
1087 if ix == 0 and value is None:
1088 break
1089 fieldname = self.repeated["fields"][ix]
1090 parsed_fields_counter += 1
1091 # ----------------------------------------------------------
1092 # check default value for repeated stuff
1093 if value is None:
1094 value = self.REPEATED_DEFAULTS.get(fieldname, value)
1095 if fieldname in card_subset:
1096 # time to change of `card_subset` since we find twice the same key
1097 card_set.append(card_subset.copy())
1098 card_subset = {fieldname: value}
1099 else:
1100 card_subset[fieldname] = value
1101 # --------------------------------------------------------------------
1102 # append last card_subset to set container
1103 card_set.append(card_subset)
1104 # check if `card_set` already exists
1105 container = self.carddata[self.REPEATED_DATA_NAME]
1106 for i, data in enumerate(container):
1107 if data == card_set:
1108 self.carddata["main"][self.REPEATED_DATA_NAME + "ID"].append(i)
1109 break
1110 else:
1111 # nothing existing found
1112 container.append(card_set)
1113 self.carddata["main"][self.REPEATED_DATA_NAME + "ID"].append(
1114 len(container) - 1
1115 )
1116 # --------------------------------------------------------------------
1117 # trailing floating fields (if any)
1118 floating_fields = self.repeated.get("floating", [])
1119 for i, fieldname in enumerate(floating_fields):
1120 try:
1121 self.carddata["main"][fieldname].append(fields[field_nb + i])
1122 except:
1123 # SHALL be in default
1124 self.carddata["main"][fieldname].append(self.DEFAULTS[fieldname])
1127class ComplexCard(SimpleCard):
1128 """Mother Card for specific processing (eg. "PBUSH" and its optional flags)"""
1130 TABLE = None
1132 def __init__(self, name=None, data=None):
1133 super().__init__(name=name, data=data)
1135 def append_checkin(self, fields):
1136 """hook triggered right before `append`"""
1137 return fields
1139 def append_checkout(self, fields):
1140 """hook triggered right after `append`"""
1141 return fields
1143 def append_fields_list(self, fields):
1144 fields = self.append_checkin(fields)
1145 super().append_fields_list(fields)
1146 self.append_checkout(fields)