def load_hdf5_file(self, hdf5_file, encoding): from pyNastran.utils.dict_to_h5py import _cast keys = list(hdf5_file.keys()) for key in keys: if key in ['_begin_count', 'debug', 'write_begin_bulk']: # scalars value = _cast(hdf5_file[key]) setattr(self, key, value) elif key in [ 'reject_lines', 'begin_bulk', 'lines', 'output_lines' ]: # lists of strings value_bytes = _cast(hdf5_file[key]).tolist() value_str = [line.decode(encoding) for line in value_bytes] elif key == 'subcases': subcase_group = hdf5_file[key] keys = list(subcase_group.keys()) keys.remove('keys') subcases = {} for key in keys: sub_group = subcase_group[key] ikey = int(key) subcase = Subcase(id=ikey) subcase.log = self.log subcase.load_hdf5_file(sub_group, encoding) subcases[ikey] = subcase str(subcase) self.subcases = subcases #print(value_bytes) else: # pragma: no cover self.log.warning('skipping CaseControlDeck/%s' % key) raise RuntimeError('error loading hdf5 CaseControlDeck/%s' % key)
def load_hdf5_file(self, hdf5_file, encoding: str) -> None: """loads the case control deck section from a hdf5 file""" from pyNastran.utils.dict_to_h5py import _cast keys = list(hdf5_file.keys()) for key in keys: if key in ['_begin_count', 'debug', 'write_begin_bulk']: # scalars value = _cast(hdf5_file[key]) setattr(self, key, value) elif key in [ 'reject_lines', 'begin_bulk', 'lines', 'output_lines' ]: # lists of strings unused_lines_str = decode_lines(_cast(hdf5_file[key]), encoding) elif key == 'subcases': subcase_group = hdf5_file[key] keys = list(subcase_group.keys()) keys.remove('keys') subcases = {} for key2 in keys: sub_group = subcase_group[key2] ikey2 = int(key2) subcase = Subcase(id=ikey2) subcase.log = self.log subcase.load_hdf5_file(sub_group, encoding) subcases[ikey2] = subcase str(subcase) self.subcases = subcases #print(value_bytes) else: # pragma: no cover self.log.warning('skipping CaseControlDeck/%s' % key) raise RuntimeError('error loading hdf5 CaseControlDeck/%s' % key)
def load_hdf5(cls, h5_file, encoding): from pyNastran.utils.dict_to_h5py import _cast value = h5_file['value'] try: value2 = _cast(value).decode(encoding) except AttributeError: print(cls.type, _cast(value)) raise return cls(value2), []
def load_hdf5(cls, h5_file, encoding): from pyNastran.utils.dict_to_h5py import _cast value = h5_file['value'] casted_value = _cast(value) if isinstance(casted_value, int): value2 = casted_value else: value2 = casted_value.decode( encoding) # if isinstance(value, bytes) else value return cls(value2), []
def load_hdf5(self, subgroup, encoding): """loads EXTSEOUT from an h5py HDF5 file""" from pyNastran.utils.dict_to_h5py import _cast for key in subgroup.keys(): subgroupi = subgroup[key] if key == 'data': data_keys = _cast(subgroupi['keys']) data_keys = decode_bytes_list(data_keys, encoding) keys_none = subgroupi['keys_none'] keys_none = decode_bytes_list(keys_none, encoding) data_values = _cast(subgroupi['values']) data_values = decode_bytes_list(data_values, encoding) keys = data_keys + keys_none values = data_values + [None] * len(keys_none) data = [(key, value) for (key, value) in zip(keys, values)] else: raise NotImplementedError(key) return EXTSEOUT(data), []
def load_hdf5(cls, h5_file, encoding): from pyNastran.utils.dict_to_h5py import _cast value = h5_file['value'] value2 = _cast(value) return cls(value2), []