예제 #1
0
def _eigenvalue_header(obj, header, itime, ntimes, dt):
    if obj.nonlinear_factor not in (None, np.nan):
        name = obj.data_code['name']
        if isinstance(dt, int):
            dt_line = ' %14s = %i\n' % (name.upper(), dt)
        elif isinstance(dt, (float, np.float32)):
            dt_line = ' %14s = %12.5E\n' % (name, dt)
        else:
            dt_line = ' %14s = %12.5E %12.5Ej\n' % (name, dt.real, dt.imag)
        header[1] = dt_line
        codes = getattr(obj, name + 's')
        if not len(codes) == ntimes:
            msg = '%ss in %s the wrong size; ntimes=%s; %ss=%s\n' % (
                name, obj.__class__.__name__, ntimes, name, codes)
            atts = object_attributes(obj)
            msg += 'names=%s\n' % atts
            msg += 'data_names=%s\n' % obj.data_names
            raise IndexError(msg)

        if hasattr(obj, 'eigr'):
            try:
                eigenvalue_real = obj.eigrs[itime]
            except IndexError:
                msg = 'eigrs[%s] not found; ntimes=%s; eigrs=%s' % (itime, ntimes, obj.eigrs)
                msg += 'names=%s' % object_attributes(obj)
                raise IndexError(msg)
            eigr_line = ' %14s = %12.6E\n' % ('EIGENVALUE', eigenvalue_real)
            header[2] = eigr_line
    return header
def _eigenvalue_header(obj, header, itime, ntimes, dt):
    if obj.nonlinear_factor is not None:
        name = obj.data_code['name']
        if isinstance(dt, int):
            dt_line = ' %14s = %i\n' % (name.upper(), dt)
        elif isinstance(dt, (float, float32)):
            dt_line = ' %14s = %12.5E\n' % (name, dt)
        else:
            dt_line = ' %14s = %12.5E %12.5Ej\n' % (name, dt.real, dt.imag)
        header[1] = dt_line
        codes = getattr(obj, name + 's')
        if not len(codes) == ntimes:
            msg = '%ss in %s the wrong size; ntimes=%s; %ss=%s\n' % (name,
                obj.__class__.__name__, ntimes, name, codes)
            atts = object_attributes(obj)
            msg += 'names=%s\n' % atts
            msg += 'data_names=%s\n' % obj.data_names
            raise IndexError(msg)

        if hasattr(obj, 'eigr'):
            try:
                eigenvalue_real = obj.eigrs[itime]
            except IndexError:
                msg = 'eigrs[%s] not found; ntimes=%s; eigrs=%s' % (itime, ntimes, obj.eigrs)
                msg += 'names=%s' % object_attributes(obj)
                raise IndexError(msg)
            eigr_line = ' %14s = %12.6E\n' % ('EIGENVALUE', eigenvalue_real)
            header[2] = eigr_line
    return header
예제 #3
0
def _eigenvalue_header(obj, header, itime: int, ntimes: int, dt):
    if obj.nonlinear_factor not in (None, np.nan):
        name = obj.data_code['name']
        if isinstance(dt, (int, np.int32, np.int64)):
            dt_line = ' %14s = %i\n' % (name.upper(), dt)
        elif isinstance(dt, (float, np.float32, np.float64)):
            dt_line = ' %14s = %12.5E\n' % (name, dt)
        #elif isinstance(dt, np.complex):
        #dt_line = ' %14s = %12.5E %12.5Ej\n' % (name, dt.real, dt.imag)
        else:
            raise NotImplementedError(type(dt))
        header[1] = dt_line
        codes = getattr(obj, name + 's')
        if not len(codes) == ntimes:
            msg = (f'{name}s in {obj.__class__.__name__} the wrong size; '
                   f'ntimes={ntimes}; {name}s={codes}\n')
            atts = object_attributes(obj)
            msg += f'names={atts}\n'
            msg += f'data_names={obj.data_names}\n'
            raise IndexError(msg)

        if hasattr(obj, 'eigr'):
            try:
                eigenvalue_real = obj.eigrs[itime]
            except IndexError:
                msg = 'eigrs[%s] not found; ntimes=%s; eigrs=%s' % (
                    itime, ntimes, obj.eigrs)
                msg += 'names=%s' % object_attributes(obj)
                raise IndexError(msg)
            eigr_line = ' %14s = %12.6E\n' % ('EIGENVALUE', eigenvalue_real)
            header[2] = eigr_line
    return header
예제 #4
0
    def test_object_attributes_introspection(self):
        attributes = object_attributes(self.b)
        self.assertEqual(attributes, ['a', 'b', 'c'])

        attributes = object_attributes(self.b, "private")
        self.assertEqual(attributes, ['_a', '_b'])

        attributes = object_attributes(self.b, "both")
        self.assertEqual(attributes, ['_a', '_b', 'a', 'b', 'c'])
예제 #5
0
    def test_object_attributes_introspection(self):
        attributes = object_attributes(self.b)
        self.assertEqual(attributes, ['a', 'b', 'c'])

        attributes = object_attributes(self.b, "private")
        self.assertEqual(attributes, ['_a', '_b'])

        attributes = object_attributes(self.b, "both")
        self.assertEqual(attributes, ['_a', '_b', 'a', 'b', 'c'])
예제 #6
0
    def test_object_attributes_introspection(self):
        """object methods determines the public/private attributes of a class"""
        b = B1(7)
        attributes = object_attributes(b)
        self.assertEqual(attributes, ['a', 'b', 'c'])

        attributes = object_attributes(b, "private")
        self.assertEqual(attributes, ['_a', '_b'])

        attributes = object_attributes(b, "both")
        self.assertEqual(attributes, ['_a', '_b', 'a', 'b', 'c'])
예제 #7
0
    def test_object_attributes_introspection(self):
        """object methods determines the public/private attributes of a class"""
        b = B1(7)
        attributes = object_attributes(b)
        self.assertEqual(attributes, ["a", "b", "c"])

        attributes = object_attributes(b, "private")
        self.assertEqual(attributes, ["_a", "_b"])

        attributes = object_attributes(b, "both")
        self.assertEqual(attributes, ["_a", "_b", "a", "b", "c"])
예제 #8
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = [
        ]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
예제 #9
0
 def test_object_attributes_introspection_2(self):
     attributes = object_attributes(self.b, "all")
     self.assertEqual(attributes, ['__class__', '__delattr__', '__dict__',
             '__doc__', '__format__', '__getattribute__', '__hash__',
             '__module__', '__new__', '__reduce__', '__reduce_ex__',
             '__repr__', '__setattr__', '__sizeof__', '__str__',
             '__subclasshook__', '__weakref__', '_a', '_b', 'a', 'b', 'c'])
예제 #10
0
    def load(self, obj_filename: str='model.obj') -> None:
        """Loads a pickleable object"""
        with open(obj_filename, 'rb') as obj_file:
            obj = load(obj_file)
        keys_to_skip = ['ask', 'binary_debug', '_close_op2', '_count',
						'_results', '_table_mapper', 'additional_matrices',
						'apply_symmetry', 'debug_file', 'expected_times',
						'f', 'generalized_tables', 'is_all_subcases', 
						'is_debug_file', 'is_geometry', 'is_vectorized',
						'isubcase', 'log', 'matrix_tables', 'mode', 'n',
						'ntotal', 'num_wide', 'op2_reader', 'table_name',
						'use_vector', 'words']

        keys = object_attributes(self, mode="all", keys_to_skip=keys_to_skip,
                                 filter_properties=True)
        for key in keys:
            if key.startswith('__') and key.endswith('__'):
                continue
            try:
                val = getattr(obj, key)
            except NameError:
                self.log.warning(f'key={key!r} val={val}')
                continue
           
            try:
                setattr(self, key, val)
            except AttributeError:
                print(f'key={key!r} val={val}')
                raise
        self.log.debug('done loading!')
예제 #11
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        """
        List the names of attributes of a class as strings. Returns public
        attributes as default.

        Parameters
        ----------
        obj : instance
            the object for checking
        mode : str
            defines what kind of attributes will be listed
            * 'public' - names that do not begin with underscore
            * 'private' - names that begin with single underscore
            * 'both' - private and public
            * 'all' - all attributes that are defined for the object
        keys_to_skip : List[str]; default=None -> []
            names to not consider to avoid deprecation warnings

        Returns
        -------
        attribute_names : List[str]
            sorted list of the names of attributes of a given type or None
            if the mode is wrong
        """
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip)
예제 #12
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        """
        List the names of attributes of a class as strings. Returns public
        attributes as default.

        Parameters
        ----------
        obj : instance
            the object for checking
        mode : str
            defines what kind of attributes will be listed
            * 'public' - names that do not begin with underscore
            * 'private' - names that begin with single underscore
            * 'both' - private and public
            * 'all' - all attributes that are defined for the object
        keys_to_skip : List[str]; default=None -> []
            names to not consider to avoid deprecation warnings

        Returns
        -------
        attribute_names : List[str]
            sorted list of the names of attributes of a given type or None
            if the mode is wrong
        """
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip)
예제 #13
0
 def test_object_attributes_introspection_3(self):
     """object methods determines the public/private attributes of a class"""
     b = B1(7)
     attributes = object_attributes(b, "all")
     version_info = sys.version_info
     if version_info < (3, 0):
         self.assertEqual(attributes, [
             '__class__', '__delattr__', '__dict__',
             '__doc__', '__format__', '__getattribute__', '__hash__',
             '__module__', '__new__', '__reduce__', '__reduce_ex__',
             '__repr__', '__setattr__', '__sizeof__', '__str__',
             '__subclasshook__', '__weakref__', '_a', '_b', 'a', 'b', 'c'])
     else:
         expected = [
             '__class__', '__delattr__', '__dict__',
             '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__',
             '__gt__', '__hash__', '__le__', '__lt__', '__module__',
             '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
             '__setattr__', '__sizeof__', '__str__', '__subclasshook__',
             '__weakref__', '_a', '_b', 'a', 'b', 'c']
         if version_info > (3, 3): # inclusive
             expected.append('__dir__')
         if version_info > (3, 6):
             expected.append('__init_subclass__')
             #print('\nactual   = %s' % ','.join(list(sorted(attributes))))
             #print('expected = %s' % ','.join(list(sorted(expected))))
         self.assertEqual(sorted(attributes), sorted(expected))
예제 #14
0
 def test_object_attributes_introspection_2(self):
     attributes = object_attributes(self.b, "all")
     self.assertEqual(attributes, ['__class__', '__delattr__', '__dict__',
             '__doc__', '__format__', '__getattribute__', '__hash__',
             '__module__', '__new__', '__reduce__', '__reduce_ex__',
             '__repr__', '__setattr__', '__sizeof__', '__str__',
             '__subclasshook__', '__weakref__', '_a', '_b', 'a', 'b', 'c'])
예제 #15
0
    def resize(self, n, refcheck=True):
        names = object_attributes(self, mode="public")
        for name in names:
            attr = getattr(self, name)
            if isinstance(attr, ndarray):
                #self.model.log.info('resizing %r; shape=%s; size=%s' % (name, attr.shape, attr.size))
                # resize the array
                shape2 = list(attr.shape)
                shape2[0] = n
                attr.resize(tuple(shape2), refcheck=refcheck)

                if n > self.n:
                    # TODO: fill the data with nan values ideally, but it's not working
                    if attr.ndim == 1:
                        attr[self.n:] = 0
                    elif attr.ndim == 2:
                        attr[self.n:, :] = 0
                    elif attr.ndim == 3:
                        attr[self.n:, :, :] = 0
                    else:
                        raise NotImplementedError(attr.shape)
                    #print(attr)
            else:
                # metadata
                pass
        if self.i >= n:
            self.i = n
        self.n = n
예제 #16
0
def write_class(name, obj, nspaces=0, nbase=0):
    objectType = obj.__class__.__name__
    obj_attrs = object_attributes(obj, 'both')
    if not obj_attrs:
        return "%s()" % objectType

    spaces = ' ' * nspaces
    nspaces2 = nspaces + 4
    #spaces2 = nspaces2 * ' '
    msg = "%s(\n" % objectType
    for attr in obj_attrs[:-1]:
        value = getattr(obj, attr)
        #msg += '?'
        msg += write_object_attributes(attr,
                                       value,
                                       nspaces2,
                                       nbase,
                                       isClass=True)
    attr = obj_attrs[-1]
    value = getattr(obj, attr)
    msg += write_object_attributes(attr, value, nspaces2, nbase, isClass=True)
    msg += '%s)' % spaces

    #print "dir(obj) =", dir(obj)
    #print "obj_attrs =", obj_attrs
    return msg
예제 #17
0
    def object_attributes(self, mode="public", keys_to_skip=None):
        """..see:: `pyNastran.utils.object_attributes(...)`"""
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = []
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip + my_keys_to_skip)
예제 #18
0
    def resize(self, n, refcheck=True):
        names = object_attributes(self, mode="public")
        for name in names:
            attr = getattr(self, name)
            if isinstance(attr, ndarray):
                #self.model.log.info('resizing %r; shape=%s; size=%s' % (
                #name, attr.shape, attr.size))
                # resize the array
                shape2 = list(attr.shape)
                shape2[0] = n
                attr.resize(tuple(shape2), refcheck=refcheck)

                if n > self.n:
                    # TODO: fill the data with nan values ideally, but it's not working
                    if attr.ndim == 1:
                        attr[self.n:] = 0
                    elif attr.ndim == 2:
                        attr[self.n:, :] = 0
                    elif attr.ndim == 3:
                        attr[self.n:, :, :] = 0
                    else:
                        raise NotImplementedError(attr.shape)
                    #print(attr)
            else:
                # metadata
                pass
        if self.i >= n:
            self.i = n
        self.n = n
예제 #19
0
    def object_attributes(self,
                          mode: str = 'public',
                          keys_to_skip: Optional[List[str]] = None,
                          filter_properties: bool = False) -> List[str]:
        """
        List the names of attributes of a class as strings. Returns public
        attributes as default.

        Parameters
        ----------
        mode : str
            defines what kind of attributes will be listed
            * 'public' - names that do not begin with underscore
            * 'private' - names that begin with single underscore
            * 'both' - private and public
            * 'all' - all attributes that are defined for the object
        keys_to_skip : List[str]; default=None -> []
            names to not consider to avoid deprecation warnings
        filter_properties: bool: default=False
            filters the @property objects

        Returns
        -------
        attribute_names : List[str]
            sorted list of the names of attributes of a given type or None
            if the mode is wrong
        """
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = [
            #'case_control_deck',
            'log',
            'node_ids',
            'coord_ids',
            'element_ids',
            'property_ids',
            'material_ids',
            'caero_ids',
            'is_long_ids',
            'nnodes',
            'ncoords',
            'nelements',
            'nproperties',
            'nmaterials',
            'ncaeros',
            'npoints',
            'point_ids',
            'subcases',
            '_card_parser',
            '_card_parser_b',
            '_card_parser_prepare',
            'object_methods',
            'object_attributes',
        ]
        return object_attributes(self,
                                 mode=mode,
                                 keys_to_skip=keys_to_skip + my_keys_to_skip,
                                 filter_properties=filter_properties)
예제 #20
0
 def object_attributes(self, mode='public', keys_to_skip=None):
     """.. seealso:: `pyNastran.utils.object_attributes(...)`"""
     if keys_to_skip is None:
         keys_to_skip = []
     my_keys_to_skip = []
     return object_attributes(self,
                              mode=mode,
                              keys_to_skip=keys_to_skip + my_keys_to_skip)
예제 #21
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = [
            'object_methods', 'object_attributes',
        ]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
예제 #22
0
 def __repr__(self):
     msg = '<Settings>\n'
     for key in object_attributes(self, mode='public', keys_to_skip=None):
         value = getattr(self, key)
         if isinstance(value, tuple):
             value = str(value)
         msg += '  %r = %r\n' % (key, value)
     return msg
예제 #23
0
 def object_attributes(self, mode: str='public',
                       keys_to_skip: Optional[List[str]]=None,
                       filter_properties: bool=False) -> List[str]:
     """.. seealso:: `pyNastran.utils.object_attributes(...)`"""
     if keys_to_skip is None:
         keys_to_skip = []
     my_keys_to_skip = []  # type: List[str]
     return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip,
                              filter_properties=filter_properties)
예제 #24
0
 def object_attributes(self, mode='public', keys_to_skip=None):
     # type: (str, Optional[List[str]]) -> List[str]
     """.. seealso:: `pyNastran.utils.object_attributes(...)`"""
     if keys_to_skip is None:
         keys_to_skip = []
     my_keys_to_skip = []
     return object_attributes(self,
                              mode=mode,
                              keys_to_skip=keys_to_skip + my_keys_to_skip)
예제 #25
0
    def on_reload_usm3d(self):
        """
        For USM3D, we dynamically load the latest CFD results time step,
        hich is really handy when you're running a job.
        """
        # minimum is 1
        nstep = 100

        if self.gui.out_filename is None:
            msg = 'usm3d_filename=%r must not be None\n' % self.gui.out_filename
            dir_gui = []
            for key in object_attributes(self.gui):
                try:
                    value = getattr(self.gui, key)
                except KeyError:
                    # self.edge_actor is a
                    if key not in ['edge_actor']:
                        self.gui.log.warning('key=%s is undefined...' % key)

                if isinstance(value, (integer_float_types, string_types)):
                    dir_gui.append(key)
            dir_gui.sort()
            msg += 'dir(gui) = [%s]' % ', '.join(dir_gui)
            raise RuntimeError(msg)
        flo_filename = self.gui.out_filename
        dirname = os.path.dirname(flo_filename)
        if dirname == '':
            dirname = os.getcwd()
        basename = os.path.basename(flo_filename)
        base = os.path.splitext(basename)[0]


        # box.flo -> box_100.flo
        if '_' in base:
            model_name, n = base.rsplit('_', 1)
            #print("model_name=%r n=%r" % (model_name, n))
            n = int(n)
            n_list = get_n_list(dirname, model_name)
            inn = n_list.index(n)
            if inn+nstep < len(n_list):
                nnew = n_list[inn+nstep]
            else:
                nnew = max(n_list)
                if nnew == n:
                    raise RuntimeError('%r is the last file' % self.gui.out_filename)
            #print("inn=%r nnew=%r" % (inn, nnew))
            flo_filename = model_name + '_%s.flo' % nnew
        else:
            flo_filename = self.gui.out_filename
            #msg = (
                #'The current file is must have the format of '
                #'xxx_%%i.flo, not %r' % self.out_filename)
            #raise RuntimeError(msg)
        #print("loading %r" % flo_filename)
        self.load_usm3d_results(flo_filename)
        self.gui.out_filename = os.path.join(dirname, flo_filename)
예제 #26
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        if keys_to_skip is None:
            keys_to_skip = []
        elif isinstance(keys_to_skip, string_types):
            keys_to_skip = [keys_to_skip]

        my_keys_to_skip = [
            'object_methods', 'object_attributes',
        ]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
    def object_attributes(self, mode='public', keys_to_skip=None):
        if keys_to_skip is None:
            keys_to_skip = []
        elif isinstance(keys_to_skip, string_types):
            keys_to_skip = [keys_to_skip]

        my_keys_to_skip = [
            'object_methods', 'object_attributes',
        ]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
예제 #28
0
    def object_attributes(self, mode='public', keys_to_skip=None,
                          filter_properties=False):
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = [
            'object_methods', 'object_attributes',
        ]
        return object_attributes(self, mode=mode,
                                 keys_to_skip=keys_to_skip+my_keys_to_skip,
                                 filter_properties=filter_properties)
예제 #29
0
    def export_to_hdf5(self, hdf5_file, model: BDF, encoding: str) -> None:
        """exports the case control deck section to an hdf5 file"""
        keys_to_skip = [
            'type', 'log', 'sol_200_map', 'rsolmap_to_str', 'solmap_to_value'
        ]

        # scalars----
        #_begin_count
        #debug
        #write_begin_bulk

        # lines----
        #begin_bulk
        #lines
        #output_lines
        #reject_lines

        # subcases-----
        #subcases

        h5attrs = object_attributes(self,
                                    mode='both',
                                    keys_to_skip=keys_to_skip)
        for h5attr in h5attrs:
            value = getattr(self, h5attr)
            if h5attr in [
                    '_begin_count', 'debug', 'write_begin_bulk',
                    'use_card_dict'
            ]:  # scalars
                # simple export
                hdf5_file.create_dataset(h5attr, data=value)
            elif h5attr in [
                    'reject_lines', 'begin_bulk', 'lines', 'output_lines'
            ]:
                # lists of strings
                if len(value) == 0:
                    continue
                value_bytes = [line.encode(encoding) for line in value]
                #print(value_bytes)
                hdf5_file.create_dataset(h5attr, data=value_bytes)
            elif h5attr == 'subcases':
                keys = list(self.subcases.keys())
                subcase_group = hdf5_file.create_group('subcases')
                subcase_group.create_dataset('keys', data=keys)
                for key, subcase in self.subcases.items():
                    #print('***key =', key)
                    sub_group = subcase_group.create_group(str(key))
                    subcase.export_to_hdf5(sub_group, encoding)
            else:
                self.log.warning('skipping CaseControlDeck/%s' % h5attr)
                raise RuntimeError('error exporting hdf5 CaseControlDeck/%s' %
                                   h5attr)
예제 #30
0
 def _get_table_types_testing(self):
     """
     testing method...don't use
     """
     table_types = self.get_table_types()
     tables = object_attributes(self, 'public')
     tables = [table for table in tables
               if isinstance(getattr(self, table), dict)
               and table not in ['card_count', 'data_code', 'element_mapper', 'iSubcaseNameMap',
               'labels', 'subtitles', 'additional_matrices', 'matrices']]
     for table in tables:
         assert table in table_types, table
     return table_types
예제 #31
0
 def _get_table_types_testing(self):
     """testing method...don't use"""
     table_types = self.get_table_types()
     tables = object_attributes(self, 'public')
     tables = [table for table in tables
               if isinstance(getattr(self, table), dict)
               and table not in [
                   'card_count', 'data_code', 'element_mapper', 'isubcase_name_map',
                   'labels', 'subtitles', 'additional_matrices', 'matrices', 'subcase_key',
                   'end_options', 'expected_times', 'generalized_tables']]
     for table in tables:
         if self.make_geom:
             break
         assert table in table_types, table
     return table_types
예제 #32
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = [
            #'case_control_deck',
            'log', 'mpcObject', 'spcObject',
            'node_ids', 'coord_ids', 'element_ids', 'property_ids',
            'material_ids', 'caero_ids', 'is_long_ids',
            'nnodes', 'ncoords', 'nelements', 'nproperties',
            'nmaterials', 'ncaeros',

            'point_ids', 'subcases',
            '_card_parser', '_card_parser_b',
            'object_methods', 'object_attributes',
        ]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
예제 #33
0
    def _cleanup_data_members(self):
        """deletes variables from previous tables"""
        del_words = [
            'words',
            #'Title',
            #'ID',
            'analysis_code',
            #'result_names',
            #'labels',
            #'data_names',
        ]
        msg = ''
        if hasattr(self, 'words'):
            if not len(self.words) in [0, 28]:
                msg = 'table_name=%r len(self.words)=%s words=%s' % (
                    self.table_name, len(self.words), self.words)
                raise RuntimeError(msg)

            for word in self.words:
                if word in ['???', 'Title']:
                    continue
                if not hasattr(self, word):
                    continue
                delattr(self, word)
            self.words = []
        if hasattr(self, 'analysis_code'):
            del self.analysis_code
        #if hasattr(self, 'data_names') and self.data_names is not None:
        #print(object_attributes(self))

        if hasattr(self, 'data_code'):
            del self.data_code
        if hasattr(self, 'mode'):
            del self.mode

        for word in del_words:
            if hasattr(self, word):
                val = getattr(self, word)
                if isinstance(val, list) and len(val) == 0:
                    continue
                msg += '  %s=%s\n' % (word, val)
        if msg:
            print(object_attributes(self))
            print(msg)
예제 #34
0
 def test_object_attributes_introspection_3(self):
     attributes = object_attributes(self.b, "all")
     version_info = sys.version_info
     if sys.version_info < (3,0):
         self.assertEqual(attributes, [
             '__class__', '__delattr__', '__dict__',
             '__doc__', '__format__', '__getattribute__', '__hash__',
             '__module__', '__new__', '__reduce__', '__reduce_ex__',
             '__repr__', '__setattr__', '__sizeof__', '__str__',
             '__subclasshook__', '__weakref__', '_a', '_b', 'a', 'b', 'c'])
     else:
         expected = [
             '__class__', '__delattr__', '__dict__',
             '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__',
             '__gt__', '__hash__', '__le__', '__lt__', '__module__',
             '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
             '__setattr__', '__sizeof__', '__str__', '__subclasshook__',
             '__weakref__', '_a', '_b', 'a', 'b', 'c']
         if sys.version_info > (3, 3):
             expected.append('__dir__')
         self.assertEqual(sorted(attributes), sorted(expected))
예제 #35
0
 def test_object_attributes_introspection_3(self):
     """object methods determines the public/private attributes of a class"""
     b = B1(7)
     attributes = object_attributes(b, "all")
     #version_info = sys.version_info
     expected = [
         '__class__',
         '__delattr__',
         '__dict__',
         '__doc__',
         '__eq__',
         '__format__',
         '__ge__',
         '__getattribute__',
         '__gt__',
         '__hash__',
         '__le__',
         '__lt__',
         '__module__',
         '__ne__',
         '__new__',
         '__reduce__',
         '__reduce_ex__',
         '__repr__',
         '__setattr__',
         '__sizeof__',
         '__str__',
         '__subclasshook__',
         '__weakref__',
         '__dir__',
         '__init_subclass__',
         '_a',
         '_b',
         'a',
         'b',
         'c',
     ]
     #print('\nactual   = %s' % ','.join(list(sorted(attributes))))
     #print('expected = %s' % ','.join(list(sorted(expected))))
     self.assertEqual(list(sorted(attributes)), list(sorted(expected)))
예제 #36
0
파일: dev.py 프로젝트: dsegroup22/pyNastran
def write_class(obj, nspaces=0):
    """write a class' public and private members"""
    #print('write_class')
    object_type = obj.__class__.__name__
    obj_attrs = object_attributes(obj, 'both')
    if not obj_attrs:
        return "%s()" % object_type

    spaces = ' ' * nspaces
    nspaces2 = nspaces + 4
    if nspaces == 0:
        nspaces2 = 4
    else:
        nspaces2 = nspaces + 4
    spaces2 = ' ' * nspaces2
    msg = "%s(\n" % object_type
    for attr in obj_attrs:
        value = getattr(obj, attr)
        msg += '%s%s = ' % (spaces2, attr)
        msg += write_value(value, nspaces2) + ',\n'
    msg += '%s)' % (spaces)
    return msg
예제 #37
0
파일: dev.py 프로젝트: hurlei/pyNastran
def write_class(obj, nspaces=0):
    """write a class' public and private members"""
    # print('write_class')
    object_type = obj.__class__.__name__
    obj_attrs = object_attributes(obj, "both")
    if not obj_attrs:
        return "%s()" % object_type

    spaces = " " * nspaces
    nspaces2 = nspaces + 4
    if nspaces == 0:
        nspaces2 = 4
    else:
        nspaces2 = nspaces + 4
    spaces2 = " " * nspaces2
    msg = "%s(\n" % object_type
    for attr in obj_attrs:
        value = getattr(obj, attr)
        msg += "%s%s = " % (spaces2, attr)
        msg += write_value(value, nspaces2) + ",\n"
    msg += "%s)" % (spaces)
    return msg
예제 #38
0
파일: dev.py 프로젝트: FrankNaets/pyNastran
def write_class(name, obj, nspaces=0, nbase=0):
    objectType = obj.__class__.__name__
    obj_attrs = object_attributes(obj, 'both')
    if not obj_attrs:
        return "%s()" % objectType

    spaces = ' ' * nspaces
    nspaces2 = nspaces + 4
    #spaces2 = nspaces2 * ' '
    msg = "%s(\n" % objectType
    for attr in obj_attrs[:-1]:
        value = getattr(obj, attr)
        #msg += '?'
        msg += write_object_attributes(attr, value, nspaces2, nbase, isClass=True)
    attr = obj_attrs[-1]
    value = getattr(obj, attr)
    msg += write_object_attributes(attr, value, nspaces2, nbase, isClass=True)
    msg += '%s)' % spaces

    #print "dir(obj) =", dir(obj)
    #print "obj_attrs =", obj_attrs
    return msg
예제 #39
0
    def object_attributes(self, mode='public', keys_to_skip=None):
        """
        List the names of attributes of a class as strings. Returns public
        attributes as default.

        Parameters
        ----------
        mode : str
            defines what kind of attributes will be listed
            * 'public' - names that do not begin with underscore
            * 'private' - names that begin with single underscore
            * 'both' - private and public
            * 'all' - all attributes that are defined for the object
        keys_to_skip : List[str]; default=None -> []
            names to not consider to avoid deprecation warnings

        Returns
        -------
        attribute_names : List[str]
            sorted list of the names of attributes of a given type or None
            if the mode is wrong
        """
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = [
            #'case_control_deck',
            'log', 'mpcObject', 'spcObject',
            'node_ids', 'coord_ids', 'element_ids', 'property_ids',
            'material_ids', 'caero_ids', 'is_long_ids',
            'nnodes', 'ncoords', 'nelements', 'nproperties',
            'nmaterials', 'ncaeros',

            'point_ids', 'subcases',
            '_card_parser', '_card_parser_b',
            'object_methods', 'object_attributes',
        ]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
예제 #40
0
 def test_object_attributes_introspection_3(self):
     """object methods determines the public/private attributes of a class"""
     b = B1(7)
     attributes = object_attributes(b, "all")
     version_info = sys.version_info
     if sys.version_info < (3, 0):
         self.assertEqual(
             attributes,
             [
                 "__class__",
                 "__delattr__",
                 "__dict__",
                 "__doc__",
                 "__format__",
                 "__getattribute__",
                 "__hash__",
                 "__module__",
                 "__new__",
                 "__reduce__",
                 "__reduce_ex__",
                 "__repr__",
                 "__setattr__",
                 "__sizeof__",
                 "__str__",
                 "__subclasshook__",
                 "__weakref__",
                 "_a",
                 "_b",
                 "a",
                 "b",
                 "c",
             ],
         )
     else:
         expected = [
             "__class__",
             "__delattr__",
             "__dict__",
             "__doc__",
             "__eq__",
             "__format__",
             "__ge__",
             "__getattribute__",
             "__gt__",
             "__hash__",
             "__le__",
             "__lt__",
             "__module__",
             "__ne__",
             "__new__",
             "__reduce__",
             "__reduce_ex__",
             "__repr__",
             "__setattr__",
             "__sizeof__",
             "__str__",
             "__subclasshook__",
             "__weakref__",
             "_a",
             "_b",
             "a",
             "b",
             "c",
         ]
         if sys.version_info > (3, 3):
             expected.append("__dir__")
         self.assertEqual(sorted(attributes), sorted(expected))
예제 #41
0
bdf_filename = r'C:\Users\Steve\Dropbox\pyNastran_examples\iSat\ISat_Launch_Sm_Rgd.dat'
bdf.read_bdf(bdf_filename, xref=False)

# <codecell>

#bdf_filename = r'D:\work\pynastran_0.7.0_py27\models\iSat\ISat_Launch_Sm_Rgd.dat'
bdf_filename = r'C:\Users\Steve\Dropbox\pyNastran_examples\iSat\ISat_Launch_Sm_Rgd.dat'

# read the file as a path
bdf2 = BDF()
bdf2.read_bdf(bdf_filename, xref=True)

# <codecell>

print "attributes =", object_attributes(bdf)
print ""
print "methods =",object_methods(bdf)
print bdf.card_stats()
print bdf.card_count

# <codecell>

# explanation of cross-referencing

# no cross referencing (xref=False)
cquad = bdf.elements[1]
nid1 = cquad.nodes[0]
n1 = bdf.nodes[nid1]
cd4 = n1.cd
c4 = bdf.coords[cd4]
예제 #42
0
    def _read_subtable_results(self, table4_parser, record_len):
        """
        # if reading the data
        # 0 - non-vectorized
        # 1 - 1st pass to size the array (vectorized)
        # 2 - 2nd pass to read the data  (vectorized)

        Parameters
        ----------
        self : OP2
            the OP2 object pointer
        table4_parser : function
            the parser function for table 4
        record_len : int
            the length of the record block
        """
        datai = b''
        n = 0
        if self.read_mode in [0, 2]:
            self.ntotal = 0

            if 0:
                # we stream the record because we get it in partial blocks
                for data in self._stream_record():
                    data = datai + data
                    ndata = len(data)
                    n = table4_parser(data, ndata)
                    assert isinstance(n, int), self.table_name
                    datai = data[n:]
            else:
                data, ndata = self._read_record_ndata()
                n = table4_parser(data, ndata)
                assert isinstance(n, int), self.table_name

            # PCOMPs are stupid, so we need an element flag
            if hasattr(self, 'eid_old'):
                del self.eid_old

            # if reading the data
            # 0 - non-vectorized
            # 1 - 1st pass to size the array (vectorized)
            # 2 - 2nd pass to read the data  (vectorized)
            if self.read_mode == 2:
                # vectorized objects are stored as self.obj
                # they have obj.itime which is their table3 counter
                if hasattr(self, 'obj') and hasattr(self.obj, 'itime'):
                    #ntotal = record_len // (self.num_wide * 4) * self._data_factor

                    # we reset the itime counter when we fill up the
                    # total number of nodes/elements/layers in the
                    # result, where ntotal is the critical length of
                    # interest.  This let's us start back at the correct
                    # spot the next time we read table3
                    #
                    # For displacements, ntotal=nnodes
                    #
                    # For a CBAR, it's ntotal=nelements*2, where 2 is
                    # the number of nodes; points A/B
                    #
                    # For a CTRIA3 / linear CQUAD4, it's
                    # ntotal=nelements*2, where 2 is the number of
                    # layers (top/btm) and we only get a centroidal
                    # result.
                    #
                    # For a CQUAD4 bilinear, it's
                    # ntotal=nelements*(nnodes+1)*2, where 2 is the
                    # number of layers and nnodes is 4 (we get an extra
                    # result at the centroid).
                    #
                    # For a PCOMP, it's ntotal=sum(nelements*nlayers),
                    # where each element can have a different number
                    # of layers
                    if self.obj.ntotal == self.obj.data.shape[1]:
                        self.obj._reset_indices()
                        self.obj.words = self.words
                        self.obj.itime += 1
                    else:
                        self.log.debug('self.obj.name=%r has itime' % self.obj.__class__.__name__)
                        self.log.debug('ntotal=%s shape=%s shape[1]=%s _data_factor=%s' % (
                            self.obj.ntotal, str(self.obj.data.shape),
                            self.obj.data.shape[1], self._data_factor))
                #else:
                    #print('self.obj.name=%r doesnt have itime' % self.obj.__class__.__name__)

        elif self.read_mode == 1:
            # if we're checking the array size

            #n = self._skip_record()
            #n = table4_parser(datai, 300000)
            if 0:
                self.ntotal = 0
                #n = self.n
                n = 0
                for i, data in enumerate(self._stream_record()):
                    data = datai + data
                    ndata = len(data)
                    n = table4_parser(data, ndata)
                    assert isinstance(n, int), self.table_name
                    datai = data[n:]
                assert len(datai) == 0, len(datai)
                #n = record_len
                #break
            else:
                data, ndata = self._skip_record_ndata()
                n = table4_parser(data, ndata)
                assert isinstance(n, int), self.table_name

            #self.goto(n)
            #n = self._skip_record()

            if hasattr(self, 'obj') and self.obj is not None:
                if hasattr(self.obj, 'ntimes'):
                    if not hasattr(self.obj, '_reset_indices'):
                        #methods = '\ndir(obj)=%s' % ', '.join(sorted(dir(self.obj)))
                        #msg = 'is %s vectorized because its missing _reset_indices...%s' % (
                            #self.obj.__class__.__name__, methods)
                        return None
                        #raise RuntimeError(msg)
                    self.obj._reset_indices()
                    self.obj.ntimes += 1
                    self.obj.ntotal = record_len // (self.num_wide * 4) * self._data_factor
                    assert isinstance(self.obj.ntotal, int), type(self.obj.ntotal)
                else:
                    print('obj=%s doesnt have ntimes' % self.obj.__class__.__name__)
        else:
            raise RuntimeError(self.read_mode)
        del_words = [
            'words',
            #'Title',
            #'ID',
            'analysis_code',
            #'result_names',
            #'labels',
            #'dataNames',
        ]
        msg = ''
        if hasattr(self, 'words'):
            for word in self.words:
                if word in ['???', 'Title']:
                    continue
                if not hasattr(self, word):
                    continue
                delattr(self, word)
            self.words = []
        if hasattr(self, 'analysis_code'):
            del self.analysis_code
        #if hasattr(self, 'dataNames') and self.dataNames is not None:
            #print(object_attributes(self))

        if hasattr(self, 'data_code'):
            del self.data_code

        for word in del_words:
            if hasattr(self, word):
                val = getattr(self, word)
                if isinstance(val, list) and len(val) == 0:
                    continue
                msg += '  %s=%s\n' % (word, val)
        if msg:
            print(object_attributes(self))
            print(msg)
        return n
예제 #43
0
파일: op2.py 프로젝트: hurlei/pyNastran
    def object_attributes(self, mode="public", keys_to_skip=None):
        if keys_to_skip is None:
            keys_to_skip = []

        my_keys_to_skip = ["object_methods", "object_attributes"]
        return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip + my_keys_to_skip)
예제 #44
0
def bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_id_dict=None):
    """
    Renumbers a BDF

    Parameters
    ----------
    bdf_filename : str
        a bdf_filename (string; supported) or a BDF model (BDF)
        that has been cross referenced and is fully valid (a equivalenced deck is not valid)
    bdf_filename_out : str
        a bdf_filename to write
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write

    .. todo :: bdf_model option for bdf_filename hasn't been tested
    .. todo :: add support for subsets (e.g. renumber only a subset of nodes/elements)
    ..warning :: spoints might be problematic...check
    ..warning :: still in development, but it usually brutally crashes if it's not supported
    ..warning :: be careful of unsupported cards

    Supports
    ========
     - GRIDs
       - no superelements
     - COORDx

     - elements
        - CELASx/CONROD/CBAR/CBEAM/CQUAD4/CTRIA3/CTETRA/CPENTA/CHEXA
        - RBAR/RBAR1/RBE1/RBE2/RBE3

     - properties
        - PSHELL/PCOMP/PCOMPG/PSOLID/PSHEAR/PBAR/PBARL
          PROD/PTUBE/PBEAM
     - mass
        - CMASSx/CONMx/PMASS

     - aero
       - FLFACT
       - SPLINEx
       - FLUTTER

     - partial case control
       - METHOD/CMETHOD/FREQENCY
       - LOAD/DLOAD/LSEQ/LOADSET...LOADSET/LSEQ is iffy
       - SET cards
         - nodes
         - elements
       - SPC/MPC/FLUTTER/FLFACT

    - constraints
       - SPC/SPCADD/SPCAX/SPCD
       - MPC/MPCADD
       - SUPORT/SUPORT1

    - solution control/methods
       - TSTEP/TSTEPNL
       - NLPARM
       - EIGB/EIGC/EIGRL/EIGR

    - sets
       - USET

    - other
      - tables
      - materials
      - loads/dloads


    Not Done
    ========
     - SPOINT
     - any cards with SPOINTs
       - DMIG/DMI/DMIJ/DMIJI/DMIK/etc.
       - CELASx
       - CDAMPx
     - superelements
     - aero cards
       - CAEROx
       - PAEROx
     - thermal cards?
     - optimization cards
     - SETx
     - PARAM,GRDPNT,x; where x>0
     - GRID SEID
     - case control
       - STATSUB
       - SUBCASE
       - global SET cards won't be renumbered properly
    """
    starting_id_dict_default = {
        'cid' : 50,
        'nid' : 101,
        'eid' : 301,
        'pid' : 401,
        'mid' : 501,
        'spc_id' : 501,
        'mpc_id' : 601,
        'load_id' : 701,
        'dload_id' : 801,

        'method_id' : 901,
        'cmethod_id' : 1001,
        'spline_id' : 1101,
        'table_id' : 1201,
        'flfact_id' : 1301,
        'flutter_id' : 1401,
        'freq_id' : 1501,
        'tstep_id' : 1601,
        'tstepnl_id' : 1701,
        'suport_id' : 1801,
        'suport1_id' : 1901,
        'tf_id' : 2001,
    }
    if starting_id_dict is None:
        starting_id_dict = starting_id_dict_default
    else:
        for key, value in iteritems(starting_id_dict_default):
            if key not in starting_id_dict:
                starting_id_dict[key] = value

    for key, value in sorted(iteritems(starting_id_dict)):
        assert isinstance(key, string_types), key
        assert key in starting_id_dict_default, 'key=%s is invalid' % (key)
        assert isidentifier(key), 'key=%s is invalid' % key
        assert isinstance(value, integer_types), 'value=%s must be an integer; type(value)=%s' % (value, type(value))
        call = '%s = %s' % (key, value)

        # this exec is safe because we checked the identifier
        exec(call)


    eid_map = {}
    nid_map = {}
    reverse_nid_map = {}
    mid_map = {}
    cid_map = {}
    mpc_map = {}
    spc_map = {}
    dload_map = {}
    load_map = {}

    cmethod_map = {}
    method_map = {}
    flfact_map = {}
    flutter_map = {}
    freq_map = {}
    tstep_map = {}
    tstepnl_map = {}
    suport_map = {}
    suport1_map = {}

    if isinstance(bdf_filename, string_types):
        model = BDF()
        model.read_bdf(bdf_filename)
    else:
        model = bdf_filename

    if model.spoints is None:
        spoints = []
    else:
        spoints = list(model.spoints.points)
    if model.epoints is None:
        epoints = []
    else:
        epoints = list(model.epoints.points)

    nids = model.nodes.keys()

    spoints_nids = spoints + nids
    spoints_nids.sort()
    i = 1
    nnodes = len(spoints_nids)

    i = 1
    #j = 1
    #print(spoints_nids)
    #k = 0

    i = nid
    #banned_nodes = spoints
    for nidi in spoints_nids:
        if nidi in spoints:
            pass
            #print('sid=%s -> %s' % (nid, i))
            #i += 1
        else:
            while i in spoints:
                #print('*bump')
                i += 1
            #print('nid=%s -> %s' % (nid, i))
            nid_map[nidi] = i
            reverse_nid_map[i] = nidi
            i += 1
    #for nid in sorted(nids):
        #nid_map[nid] = i
        #reverse_nid_map[i] = nid
        #i += 1
    #print(nid_map)
    #print(reverse_nid_map)

    all_materials = (
        model.materials,
        model.creepMaterials,
        model.thermalMaterials,
        model.hyperelasticMaterials,
        model.MATT1,
        model.MATT2,
        model.MATT3,
        model.MATT4,
        model.MATT5,
        #model.MATT6,
        #model.MATT7,
        model.MATT8,
        model.MATT9,
        model.MATS1,
        model.MATS3,
        model.MATS8,
    )
    mids = []
    for materials in all_materials:
        mids += materials.keys()
    mids = unique(mids)
    mids.sort()
    nmaterials = len(mids)

    for i in range(nmaterials):
        midi = mids[i]
        mid_map[midi] = mid + i

    #spoints2 = arange(1, len(spoints) + 1)
    for nid, node in sorted(iteritems(model.nodes)):
        nid_new = nid_map[nid]
        node.nid = nid_new

    # properties
    for pidi, prop in sorted(iteritems(model.properties)):
        prop.pid = pid
        pid += 1
    for pidi, prop in sorted(iteritems(model.properties_mass)):
        # PMASS
        prop.pid = pid
        pid += 1
    for pidi, prop in sorted(iteritems(model.convectionProperties)):
        # PCONV
        prop.pid = pid
        pid += 1
    for pidi, prop in sorted(iteritems(model.phbdys)):
        # PHBDY
        prop.pid = pid
        pid += 1

    # elements
    for eidi, element in sorted(iteritems(model.elements)):
        element.eid = eid
        eid_map[eidi] = eid
        eid += 1
    for eidi, element in sorted(iteritems(model.masses)):
        # CONM1, CONM2, CMASSx
        element.eid = eid
        eid_map[eidi] = eid
        eid += 1
    for eidi, elem in sorted(iteritems(model.rigidElements)):
        # RBAR/RBAR1/RBE1/RBE2/RBE3
        elem.eid = eid
        eid_map[eidi] = eid
        eid += 1
    #for eidi, elem in iteritems(model.caeros):
        #pass

    #mid = 1
    for materials in all_materials:
        for midi, material in iteritems(materials):
            mid = mid_map[midi]
            assert hasattr(material, 'mid')
            material.mid = mid

    # spc
    for spc_idi, spc_group in sorted(iteritems(model.spcs)):
        for i, spc in enumerate(spc_group):
            assert hasattr(spc, 'conid')
            spc.conid = spc_id
        spc_map[spc_idi] = spc_id
        spc_id += 1
    for spc_idi, spcadd in sorted(iteritems(model.spcadds)):
        assert hasattr(spcadd, 'conid')
        spcadd.conid = spc_id
        spc_map[spc_idi] = spc_id
        spc_id += 1

    # mpc
    for mpc_idi, mpc_group in sorted(iteritems(model.mpcs)):
        for i, mpc in enumerate(mpc_group):
            assert hasattr(mpc, 'conid')
            mpc.conid = mpc_id
        mpc_map[mpc_idi] = mpc_id
        mpc_id += 1
    for mpc_idi, mpcadd in sorted(iteritems(model.mpcadds)):
        assert hasattr(mpcadd, 'conid')
        mpcadd.conid = mpc_id
        mpc_map[mpc_idi] = mpc_id
        mpc_id += 1

    # coords
    for cidi, coord in sorted(iteritems(model.coords)):
        if cidi == 0:
            cid_map[0] = 0
            continue
        coord.cid = cid
        cid_map[cidi] = cid
        cid += 1

    nlparm_map = {}
    nlpci_map = {}
    table_sdamping_map = {}
    dconstr_map = {}
    dconadd_map = {}
    dresp_map = {}
    gust_map = {}
    trim_map = {}
    tic_map = {}
    csschd_map = {}
    tranfer_function_map = {}
    data = (
        (model.methods, 'sid', method_map),
        (model.cMethods, 'sid', cmethod_map),
        (model.flfacts, 'sid', flfact_map),
        (model.flutters, 'sid', flutter_map),
        (model.frequencies, 'sid', freq_map),
        (model.tsteps, 'sid', tstep_map),
        (model.tstepnls, 'sid', tstepnl_map),
        (model.splines, 'eid', None),
        (model.suport1, 'conid', suport1_map),
        (model.nlparms, 'nlparm_id', nlparm_map),
        (model.nlpcis, 'nlpci_id', nlpci_map),
        (model.tables_sdamping, 'tid', table_sdamping_map),
        (model.dconadds, 'dcid', dconadd_map),
        (model.dconstrs, 'oid', dconstr_map),
        (model.dresps, 'oid', dresp_map),
        (model.gusts, 'sid', gust_map),
        (model.trims, 'sid', trim_map),
        (model.tics, 'sid', tic_map),
        (model.csschds, 'sid', csschd_map),
        (model.aefacts, 'sid', None),
        (model.aelinks, 'sid', None),
        (model.aelists, 'sid', None),
        (model.paeros, 'pid', None),

        (model.sets, 'sid', None),
        #(model.asets, 'sid', None),
        (model.dareas, 'sid', None),
        (model.transfer_functions, 'sid', tranfer_function_map)
        #(model.bsets, 'sid', None),
        #(model.csets, 'sid', None),
        #(model.qsets, 'sid', None),
        #(model.usets, 'sid', None),

        #(model.se_sets, 'sid', None),
        #(model.se_asets, 'sid', None),
        #(model.se_bsets, 'sid', None),
        #(model.se_csets, 'sid', None),
        #(model.se_qsets, 'sid', None),
        #(model.se_usets, 'sid', None),
    )
    param_id = 9999
    for (dict_obj, param_name, mmap) in sorted(data):
        param_id = _roundup(param_id, 1000) + 1
        for idi, param in sorted(iteritems(dict_obj)):
            msg = '%s has no %r; use %s' % (param.type, param_name, object_attributes(param))
            assert hasattr(param, param_name), msg
            setattr(param, param_name, param_id)
            if mmap is not None:
                mmap[idi] = param_id
            param_id += 1

    dessub_map = dconadd_map
    for key, value in iteritems(dconstr_map):
        if key in dessub_map:
            raise NotImplementedError()
        dessub_map[key] = value

    # tables
    for table_idi, table in sorted(sorted(iteritems(model.tables))):
        assert hasattr(table, 'tid')
        table.tid = table_id
        table_id += 1
    for table_idi, table in sorted(sorted(iteritems(model.randomTables))):
        assert hasattr(table, 'tid')
        table.tid = table_id
        table_id += 1

    # dloads
    for dload_idi, dloads in sorted(iteritems(model.dloads)):
        for dload in dloads:
            assert hasattr(dload, 'sid')
            dload.sid = dload_id
        dload_map[dload_idi] = dload_id
        dload_id += 1
    for dload_idi, dloads in sorted(iteritems(model.dload_entries)):
        for dload in dloads:
            assert hasattr(dload, 'sid')
            dload.sid = dload_id
        dload_map[dload_idi] = dload_id
        dload_id += 1


    # loads
    for load_idi, loads in sorted(iteritems(model.loads)):
        for load in loads:
            assert hasattr(load, 'sid')
            load.sid = load_id
        load_map[load_idi] = load_id
        load_id += 1

    # transfer_functions
    for tf_idi, tfs in sorted(iteritems(model.transfer_functions)):
        for tf in tfs:
            assert hasattr(tf, 'sid')
            tf.sid = tf_id
        tranfer_function_map[tf_idi] = tf_id
        load_id += 1

    lseq_map = load_map # wrong???
    temp_map = load_map # wrong???
    mapper = {
        'elements' : eid_map,
        'nodes' : nid_map,
        'coords' : cid_map,
        'materials' : mid_map,
        'SPC' : spc_map,
        'MPC' : mpc_map,
        'METHOD' : method_map,
        'CMETHOD' : cmethod_map,
        'FLFACT' : flfact_map,
        'FMETHOD' : flutter_map,
        'FREQUENCY' : freq_map,

        'DLOAD' : dload_map,
        'LOAD' : load_map,
        'LOADSET' : lseq_map,
        'TSTEP' : tstep_map,
        'TSTEPNL' : tstepnl_map,
        'SUPORT1' : suport1_map,
        'NLPARM' : nlparm_map,
        'SDAMPING' : table_sdamping_map,
        'DESSUB' : dessub_map,
        'DESOBJ' : dresp_map,
        'GUST' : gust_map,
        'TRIM' : trim_map,
        'IC' : tic_map,
        'CSSCHD' : csschd_map,
        'TFL' : tranfer_function_map,
        #'DESSUB' : dessub_map,
        # bad...
        'TEMPERATURE(LOAD)' : temp_map,
        'TEMPERATURE(INITIAL)' : temp_map,
        #'DATAREC' : datarec_map,
        #'ADAPT' : adapt_map,
        #'SUPER' : super_map,
        #'BOUTPUT' : boutput_map,
        #'OUTRCV' : outrcv_map,
    }
    #print('****suport1_map', suport1_map)
    #print('****dessub_map', dessub_map)
    #print('****dresp_map', dresp_map)
    _update_case_control(model, mapper)
    if bdf_filename_out is not None:
        model.write_bdf(bdf_filename_out, size=size, is_double=is_double,
                        interspersed=False)
    return model
예제 #45
0
    def _add_dataset(self, hdf5_file, key, value, user_custom_types, nlevels):
        #print(key, type(key))
        #print(key, type(key), value, type(value))
        if value is None:
            # Nones can't be stored, so we create a custom type
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = 'None'
            return

        if isinstance(key, (integer_types, float_types)):
            raise TypeError(
                'key=%r; key must be a string, not %s\nvalue:\n%r' %
                (key, type(key), value))

        custom_types_list = user_custom_types + [
            'BDF', 'OP2', 'OP2Geom', 'StandardScaler', 'lil_matrix'
        ]
        class_name = value.__class__.__name__

        if isinstance(value, dict):
            try:
                sub_group = hdf5_file.create_group(key)
            except:
                print('key = %s; type=%s' % (key, type(key)))
                raise
            sub_group.attrs['type'] = 'dict'
            self._create_dict_group(sub_group, value, user_custom_types,
                                    nlevels + 1)

        elif isinstance(
                value, (integer_types, float_types, string_types, np.ndarray)):
            try:
                hdf5_file.create_dataset(key, data=value)
            except (TypeError, RuntimeError):
                print('key=%r value=%s type=%s' %
                      (key, str(value), type(value)))
                raise

        elif isinstance(value, tuple):
            self._add_list_tuple(hdf5_file, key, value, 'tuple')
        elif isinstance(value, list):
            self._add_list_tuple(hdf5_file, key, value, 'list')
        elif isinstance(value, set):
            self._add_list_tuple(hdf5_file, key, value, 'set')
        elif hasattr(value, 'export_hdf5_file'):
            #print('export_hdf5_file', key)
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            value.export_hdf5_file(sub_group, exporter=self)
        elif hasattr(value, 'get_h5attrs'):
            h5attrs = value.get_h5attrs()
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif hasattr(value, 'object_attributes'):
            h5attrs = value.object_attributes(mode='both')
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif class_name == 'lil_matrix':
            h5attrs = ['dtype', 'shape', 'ndim', 'nnz']  # 'data', 'rows'
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif class_name == 'dtype':
            h5attrs = ['dtype']
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif class_name in custom_types_list:
            attrs = object_attributes(value, mode='both', keys_to_skip=None)
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = class_name
            self._add_attrs(sub_group, value, attrs, user_custom_types,
                            nlevels + 1)
        else:
            print('string_types =', string_types)
            print('value =', value)
            msg = (
                'key=%r Type=%r is not in custom_types=%s and does not have:\n'
                ' - export_hdf5_file(h5_file)\n'
                ' - object_attributes()\n'
                ' - get_h5attrs(self)' % (key, class_name, custom_types_list))
            raise TypeError(msg)
예제 #46
0
    def _add_dataset(self, hdf5_file, key, value, user_custom_types, nlevels):
        # print(key, type(key))
        # print(key, type(key), value, type(value))
        if value is None:
            # Nones can't be stored, so we create a custom type
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = 'None'
            return

        if isinstance(key, (integer_types, float_types)):
            msg = (f'key={key!r}; key must be a string, not {type(key)}\n'
                   f'value:\n{value!r}')
            raise TypeError(msg)

        custom_types_list = user_custom_types + [
            'BDF', 'OP2', 'OP2Geom', 'StandardScaler', 'lil_matrix'
        ]
        class_name = value.__class__.__name__
        if isinstance(value, dict):
            try:
                sub_group = hdf5_file.create_group(key)
            except Exception as e:
                print(f'key = {key}; type = {type(key)}')
                raise e
            sub_group.attrs['type'] = 'dict'
            self._create_dict_group(sub_group, value, user_custom_types,
                                    nlevels + 1)
        elif isinstance(value, (integer_types, float_types, str, np.ndarray)):
            try:
                hdf5_file.create_dataset(key, data=value)
            except (TypeError, RuntimeError):
                print(f'key={key!r} value={str(value)} type={type(value)}')
                raise
        elif isinstance(value, bytes):
            # https://docs.h5py.org/en/stable/strings.html
            # this is incomplete; we need to flag it as binary
            #
            # value_bytes = np.void(value)
            # hdf5_file.create_dataset(key, data=value_bytes)
            msg = f'bytes is not supported (key={key}; value={value})'
            raise NotImplementedError(msg)
        elif isinstance(value, tuple):
            add_list_tuple(hdf5_file, key, value, 'tuple', self.log)
        elif isinstance(value, list):
            add_list_tuple(hdf5_file, key, value, 'list', self.log)
        elif isinstance(value, set):
            add_list_tuple(hdf5_file, key, value, 'set', self.log)
        elif hasattr(value, 'export_hdf5_file'):
            # print('export_hdf5_file', key)
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            value.export_hdf5_file(sub_group, exporter=self)
        elif hasattr(value, 'get_h5attrs'):
            h5attrs = value.get_h5attrs()
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif hasattr(value, 'object_attributes'):
            h5attrs = value.object_attributes(mode='both')
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif class_name == 'lil_matrix':
            h5attrs = ['dtype', 'shape', 'ndim', 'nnz']  # 'data', 'rows'
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif class_name == 'dtype':
            h5attrs = ['dtype']
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = value.__class__.__name__
            self._add_attrs(sub_group, value, h5attrs, user_custom_types,
                            nlevels + 1)
        elif class_name in custom_types_list:
            attrs = object_attributes(value, mode='both', keys_to_skip=None)
            sub_group = hdf5_file.create_group(key)
            sub_group.attrs['type'] = class_name
            self._add_attrs(sub_group, value, attrs, user_custom_types,
                            nlevels + 1)
        else:
            # print(f'string_types = {string_types}')   # undefined variable?
            #print('string_types =', string_types)
            print('value =', value)
            msg = (f'key={key!r} Type={class_name!r} is not in custom_types='
                   f'{custom_types_list} and does not have:\n - '
                   f'export_hdf5_file(h5_file)\n - object_attributes()\n'
                   f'- get_h5attrs(self)')
            raise TypeError(msg)
예제 #47
0
 def object_attributes(self):
     return object_attributes(self, keys_to_skip=['object_attributes', 'object_methods'])
예제 #48
0
def bdf_renumber(bdf_filename,
                 bdf_filename_out,
                 size=8,
                 is_double=False,
                 starting_id_dict=None,
                 round_ids=False,
                 cards_to_skip=None,
                 log=None,
                 debug=False):
    """
    Renumbers a BDF

    Parameters
    ----------
    bdf_filename : str
        a bdf_filename (string; supported) or a BDF model (BDF)
        that has been cross referenced and is fully valid (an
        equivalenced deck is not valid)
    bdf_filename_out : str
        a bdf_filename to write
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write
    starting_id_dict : dict, None (default=None)
        None : renumber everything starting from 1
        dict : {key : starting_id}
            key : str
                the key (e.g. eid, nid, cid, ...)
            starting_id : int, None
                int : the value to start from
                None : don't renumber this key
    round_ids : bool; default=False
        Should a rounding up be applied for each variable?
        This makes it easier to read a deck and verify that it's been
        renumbered properly.
        This only really applies when starting_id_dict is None
    cards_to_skip : List[str]; (default=None -> don't skip any cards)
        There are edge cases (e.g. FLUTTER analysis) where things can
        break due to uncross-referenced cards.  You need to disable
        entire classes of cards in that case (e.g. all aero cards).

    .. todo:: bdf_model option for bdf_filename hasn't been tested
    .. todo:: add support for subsets (e.g. renumber only a subset of nodes/elements)
    .. todo:: doesn't support partial renumbering
    .. todo:: doesn't support element material coordinate systems

    ..warning :: spoints might be problematic...check
    ..warning :: still in development, but it usually brutally crashes
                 if it's not supported
    ..warning :: be careful of card unsupported cards (e.g. ones not read in)

    Supports
    ========
     - GRIDs
       - no superelements
     - COORDx

     - elements
        - CELASx/CONROD/CBAR/CBEAM/CQUAD4/CTRIA3/CTETRA/CPENTA/CHEXA
        - RBAR/RBAR1/RBE1/RBE2/RBE3/RSPLINE

     - properties
        - PSHELL/PCOMP/PCOMPG/PSOLID/PSHEAR/PBAR/PBARL
          PROD/PTUBE/PBEAM
     - mass
        - CMASSx/CONMx/PMASS

     - aero
       - FLFACT
       - SPLINEx
       - FLUTTER

     - partial case control
       - METHOD/CMETHOD/FREQENCY
       - LOAD/DLOAD/LSEQ/LOADSET...LOADSET/LSEQ is iffy
       - SET cards
         - nodes
         - elements
       - SPC/MPC/FLUTTER/FLFACT

    - constraints
       - SPC/SPCADD/SPCAX/SPCD
       - MPC/MPCADD
       - SUPORT/SUPORT1

    - solution control/methods
       - TSTEP/TSTEPNL
       - NLPARM
       - EIGB/EIGC/EIGRL/EIGR

    - sets
       - USET

    - other
      - tables
      - materials
      - loads/dloads


    Not Done
    ========
     - SPOINT
     - any cards with SPOINTs
       - DMIG/DMI/DMIJ/DMIJI/DMIK/etc.
       - CELASx
       - CDAMPx
     - superelements
     - aero cards
       - CAEROx
       - PAEROx
     - thermal cards?
     - optimization cards
     - SETx
     - PARAM,GRDPNT,x; where x>0
     - GRID SEID
     - case control
       - STATSUB
       - SUBCASE
       - global SET cards won't be renumbered properly

    Example 1 - Renumber Everything; Start from 1
    ---------------------------------------------
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 round_ids=False)

    Example 2 - Renumber Everything; Start Material IDs from 100
    ------------------------------------------------------------
    starting_id_dict = {
        'mid' : 100,
    }
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_ids_dict=starting_ids_dict, round_ids=False)

    Example 3 - Only Renumber Material IDs
    --------------------------------------
    starting_id_dict = {
        'cid' : None,
        'nid' : None,
        'eid' : None,
        'pid' : None,
        'mid' : 1,
        'spc_id' : None,
        'mpc_id' : None,
        'load_id' : None,
        'dload_id' : None,

        'method_id' : None,
        'cmethod_id' : None,
        'spline_id' : None,
        'table_id' : None,
        'flfact_id' : None,
        'flutter_id' : None,
        'freq_id' : None,
        'tstep_id' : None,
        'tstepnl_id' : None,
        'suport_id' : None,
        'suport1_id' : None,
        'tf_id' : None,
    }
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_ids_dict=starting_ids_dict, round_ids=False)
    """
    starting_id_dict_default = {
        'cid': 1,
        'nid': 1,
        'eid': 1,
        'pid': 1,
        'mid': 1,
        'spc_id': 1,
        'mpc_id': 1,
        'load_id': 1,
        'set_id': 1,
        'dload_id': 1,
        'method_id': 1,
        'cmethod_id': 1,
        'spline_id': 1,
        'table_id': 1,
        'flfact_id': 1,
        'flutter_id': 1,
        'freq_id': 1,
        'tstep_id': 1,
        'tstepnl_id': 1,
        'suport_id': 1,
        'suport1_id': 1,
        'tf_id': 1,
    }
    # fill up starting_id_dict
    if starting_id_dict is None:
        starting_id_dict = starting_id_dict_default
    else:
        for key, value in iteritems(starting_id_dict_default):
            if key not in starting_id_dict:
                starting_id_dict[key] = value

    nid = None
    cid = None
    eid = None
    pid = None
    mid = None
    nsm_id = None
    spc_id = None
    mpc_id = None
    load_id = None
    dload_id = None
    method_id = None
    cmethod_id = None
    spline_id = None
    table_id = None
    flfact_id = None
    flutter_id = None
    freq_id = None
    tstep_id = None
    tstepnl_id = None
    suport_id = None
    suport1_id = None
    tf_id = None

    # turn them into variables
    for key, value in sorted(iteritems(starting_id_dict)):
        #assert isinstance(key, string_types), key
        assert key in starting_id_dict_default, 'key=%r is invalid' % (key)
        #assert isidentifier(key), 'key=%s is invalid' % key
        if value is None:
            pass
        else:
            if not isinstance(value, integer_types):
                msg = 'key=%r value=%r must be an integer; type(value)=%s' % (
                    key, value, type(value))
                raise TypeError(msg)

        if key == 'nid':
            nid = int(value)
        elif key == 'cid':
            if value is None:
                cid = None
            else:
                cid = int(value)
        elif key == 'set_id':
            if value is None:
                set_id = None
            else:
                set_id = int(value)
        elif key == 'eid':
            eid = int(value)
        elif key == 'pid':
            if value is None:
                pid = None
            else:
                pid = int(value)
        elif key == 'mid':
            if value is None:
                mid = None
            else:
                mid = int(value)
        elif key == 'spc_id':
            spc_id = int(value)
        elif key == 'mpc_id':
            mpc_id = int(value)

        elif key == 'load_id':
            load_id = int(value)
        elif key == 'dload_id':
            dload_id = int(value)
        elif key == 'method_id':
            method_id = int(value)
        elif key == 'cmethod_id':
            cmethod_id = int(value)
        elif key == 'spline_id':
            spline_id = int(value)
        elif key == 'table_id':
            table_id = int(value)
        elif key == 'flfact_id':
            flfact_id = int(value)
        elif key == 'flutter_id':
            flutter_id = int(value)
        elif key == 'freq_id':
            freq_id = int(value)
        elif key == 'tstep_id':
            tstep_id = int(value)
        elif key == 'tstepnl_id':
            tstepnl_id = int(value)
        elif key == 'suport_id':
            suport_id = int(value)
        elif key == 'suport1_id':
            suport1_id = int(value)
        elif key == 'tf_id':
            tf_id = int(value)
        else:
            raise NotImplementedError('key=%r' % key)

    # build the maps
    mass_id_map = {}
    nid_map = {}
    properties_map = {}
    properties_mass_map = {}
    reverse_nid_map = {}
    eid_map = {}
    rigid_elements_map = {}
    nsm_map = {}
    mid_map = {}
    cid_map = {}
    mpc_map = {}
    spc_map = {}
    dload_map = {}
    load_map = {}

    cmethod_map = {}
    method_map = {}
    flfact_map = {}
    flutter_map = {}
    freq_map = {}
    tstep_map = {}
    tstepnl_map = {}
    #suport_map = {}
    suport1_map = {}

    if isinstance(bdf_filename, string_types):
        model = BDF(log=log, debug=debug)
        model.disable_cards(cards_to_skip)
        model.read_bdf(bdf_filename)
    else:
        model = bdf_filename

    spoints = list(model.spoints.keys())
    epoints = list(model.epoints.keys())

    nids = model.nodes.keys()

    nids_spoints_epoints = sorted(chain(nids, spoints, epoints))
    #spoints_nids.sort()
    i = 1
    #nnodes = len(spoints_nids)

    i = 1
    #j = 1
    #print(spoints_nids)
    #k = 0
    #model.log.debug(starting_id_dict)
    if 'nid' in starting_id_dict and nid is not None:
        i = nid
        #banned_nodes = spoints
        for nidi in nids_spoints_epoints:
            if nidi in spoints or nidi in epoints:
                pass
                #print('sid=%s -> %s' % (nid, i))
                #i += 1
            else:
                while i in spoints or i in epoints:
                    #print('*bump')
                    i += 1
                #print('nid=%s -> %s' % (nid, i))
                nid_map[nidi] = i
                reverse_nid_map[i] = nidi
                i += 1
        #for nid in sorted(nids):
        #nid_map[nid] = i
        #reverse_nid_map[i] = nid
        #i += 1
        #print(nid_map)
        #print(reverse_nid_map)
    else:
        for nid in nids_spoints_epoints:
            nid_map[nid] = nid
            reverse_nid_map[nid] = nid

    all_materials = (
        model.materials,
        model.creep_materials,
        model.thermal_materials,
        model.hyperelastic_materials,
        model.MATT1,
        model.MATT2,
        model.MATT3,
        model.MATT4,
        model.MATT5,
        #model.MATT6,
        #model.MATT7,
        model.MATT8,
        model.MATT9,
        model.MATS1,
        model.MATS3,
        model.MATS8,
    )

    if mid is not None:
        mids = []
        for materials in all_materials:
            mids += materials.keys()
        mids = np.unique(mids)
        mids.sort()
        nmaterials = len(mids)

        for i in range(nmaterials):
            midi = mids[i]
            mid_map[midi] = mid + i

    if 'nid' in starting_id_dict and nid is not None:
        #spoints2 = arange(1, len(spoints) + 1)
        for nid, node in sorted(iteritems(model.nodes)):
            nid_new = nid_map[nid]
            #print('nid=%s -> %s' % (nid,nid_new))
            node.nid = nid_new

    if 'pid' in starting_id_dict and pid is not None:
        # properties
        for pidi, prop in sorted(iteritems(model.properties)):
            prop.pid = pid
            properties_map[pidi] = pid
            pid += 1
        for pidi, prop in sorted(iteritems(model.properties_mass)):
            # PMASS
            prop.pid = pid
            properties_mass_map[pidi] = pid
            pid += 1
        for pidi, prop in sorted(iteritems(model.convection_properties)):
            # PCONV
            prop.pid = pid
            pid += 1
        for pidi, prop in sorted(iteritems(model.phbdys)):
            # PHBDY
            prop.pid = pid
            pid += 1

    if 'eid' in starting_id_dict and eid is not None:
        # elements
        for eidi, element in sorted(iteritems(model.elements)):
            element.eid = eid
            eid_map[eidi] = eid
            eid += 1
        for eidi, element in sorted(iteritems(model.masses)):
            # CONM1, CONM2, CMASSx
            element.eid = eid
            eid_map[eidi] = eid
            mass_id_map[eidi] = eid
            eid += 1
        for eidi, elem in sorted(iteritems(model.rigid_elements)):
            # RBAR/RBAR1/RBE1/RBE2/RBE3/RSPLINE
            elem.eid = eid
            eid_map[eidi] = eid
            rigid_elements_map[eidi] = eid
            eid += 1
        #for eidi, elem in iteritems(model.caeros):
        #pass

    if 'mid' in starting_id_dict and mid is not None:
        #mid = 1
        for materials in all_materials:
            for midi, material in iteritems(materials):
                mid = mid_map[midi]
                assert hasattr(material, 'mid')
                material.mid = mid

    if 'spc_id' in starting_id_dict and spc_id is not None:
        # spc
        for spc_idi, spc_group in sorted(iteritems(model.spcs)):
            for i, spc in enumerate(spc_group):
                assert hasattr(spc, 'conid')
                spc.conid = spc_id
            spc_map[spc_idi] = spc_id
            spc_id += 1
    else:
        for spc_id in model.spcs:
            spc_map[spc_id] = spc_id

    if 'mpc_id' in starting_id_dict and mpc_id is not None:
        # mpc
        for mpc_idi, mpc_group in sorted(iteritems(model.mpcs)):
            for i, mpc in enumerate(mpc_group):
                assert hasattr(mpc, 'conid')
                mpc.conid = mpc_id
            mpc_map[mpc_idi] = mpc_id
            mpc_id += 1
    else:
        for mpc_id in model.mpcs:
            mpc_map[mpc_id] = mpc_id

    if 'cid' in starting_id_dict and cid is not None:
        # coords
        for cidi, coord in sorted(iteritems(model.coords)):
            if cidi == 0:
                cid_map[0] = 0
                continue
            coord.cid = cid
            cid_map[cidi] = cid
            cid += 1

    if 'freq_id' in starting_id_dict and freq_id is not None:
        # frequencies
        for freqi, freqs in sorted(iteritems(model.frequencies)):
            freq_map[freqi] = freq_id
            for freq in freqs:
                freq.sid = freqi
            freq_id += 1
    set_map = {}
    if 'set_id' in starting_id_dict and set_id is not None:
        # sets
        for sidi, set_ in sorted(iteritems(model.sets)):
            set_.sid = set_id
            set_map[sidi] = set_id
            set_id += 1

    spline_id_map = {}
    if 'spline_id' in starting_id_dict and spline_id is not None:
        # sets
        for sidi, spline in sorted(iteritems(model.splines)):
            spline.eid = spline_id
            spline_id_map[sidi] = spline_id
            spline_id += 1

    nlparm_map = {}
    nlpci_map = {}
    table_sdamping_map = {}
    dconstr_map = {}
    dconadd_map = {}
    dresp_map = {}
    gust_map = {}
    trim_map = {}
    tic_map = {}
    csschd_map = {}
    tranfer_function_map = {}
    data = (
        (model.methods, 'sid', method_map),
        (model.cMethods, 'sid', cmethod_map),
        (model.flfacts, 'sid', flfact_map),
        (model.flutters, 'sid', flutter_map),
        (model.tsteps, 'sid', tstep_map),
        (model.tstepnls, 'sid', tstepnl_map),
        (model.suport1, 'conid', suport1_map),
        (model.nlparms, 'nlparm_id', nlparm_map),
        (model.nlpcis, 'nlpci_id', nlpci_map),
        (model.tables_sdamping, 'tid', table_sdamping_map),
        (model.dconadds, 'dcid', dconadd_map),
        #(model.dconstrs, 'oid', dconstr_map),
        (model.dresps, 'dresp_id', dresp_map),
        (model.gusts, 'sid', gust_map),
        (model.trims, 'sid', trim_map),
        (model.tics, 'sid', tic_map),
        (model.csschds, 'sid', csschd_map),
        (model.aefacts, 'sid', None),
        (model.aelinks, 'sid', None),
        (model.aelists, 'sid', None),
        (model.paeros, 'pid', None),

        #(model.asets, 'sid', None),
        (model.dareas, 'sid', None),
        (model.transfer_functions, 'sid', tranfer_function_map)
        #(model.bsets, 'sid', None),
        #(model.csets, 'sid', None),
        #(model.qsets, 'sid', None),
        #(model.usets, 'sid', None),

        #(model.se_sets, 'sid', None),
        #(model.se_asets, 'sid', None),
        #(model.se_bsets, 'sid', None),
        #(model.se_csets, 'sid', None),
        #(model.se_qsets, 'sid', None),
        #(model.se_usets, 'sid', None),
    )

    # apply the simple to update parameters
    param_id = 9999
    for (dict_obj, param_name, mmap) in data:
        if round_ids:
            param_id = roundup(param_id, 1000) + 1
        else:
            param_id = 1
        for idi, param in sorted(iteritems(dict_obj)):
            try:
                msg = '%s has no %r; use %s' % (param.type, param_name,
                                                object_attributes(param))
            except AttributeError:
                model.log.error('param = %r' % param)
                raise
            assert hasattr(param, param_name), msg
            setattr(param, param_name, param_id)
            if mmap is not None:
                mmap[idi] = param_id
            param_id += 1

    # start the complicated set
    # dconstr
    dessub_map = dconadd_map
    for key, value in iteritems(dconstr_map):
        if key in dessub_map:
            raise NotImplementedError()
        dessub_map[key] = value

    # tables
    for table_idi, table in sorted(sorted(iteritems(model.tables))):
        assert hasattr(table, 'tid')
        table.tid = table_id
        table_id += 1
    for table_idi, table in sorted(sorted(iteritems(model.random_tables))):
        assert hasattr(table, 'tid')
        table.tid = table_id
        table_id += 1

    # dloads
    for dload_idi, dloads in sorted(iteritems(model.dloads)):
        for dload in dloads:
            assert hasattr(dload, 'sid')
            dload.sid = dload_id
        dload_map[dload_idi] = dload_id
        dload_id += 1
    for dload_idi, dloads in sorted(iteritems(model.dload_entries)):
        for dload in dloads:
            assert hasattr(dload, 'sid')
            dload.sid = dload_id
        dload_map[dload_idi] = dload_id
        dload_id += 1

    # loads
    for load_idi, loads in sorted(iteritems(model.loads)):
        for load in loads:
            assert hasattr(load, 'sid')
            load.sid = load_id
        load_map[load_idi] = load_id
        load_id += 1

    # transfer_functions
    for tf_idi, tfs in sorted(iteritems(model.transfer_functions)):
        for tf in tfs:
            assert hasattr(tf, 'sid')
            tf.sid = tf_id
        tranfer_function_map[tf_idi] = tf_id
        load_id += 1

    lseq_map = load_map  # wrong???
    temp_map = load_map  # wrong???
    mapper = {
        'elements': eid_map,
        'masses': mass_id_map,
        'rigid_elements': rigid_elements_map,
        'nodes': nid_map,
        'coords': cid_map,
        'materials': mid_map,
        'properties': properties_map,
        'properties_mass': properties_mass_map,
        'SPC': spc_map,
        'MPC':
        mpc_map,  # TODO: come up with unified system that uses the same key
        'mpcs': mpc_map,  #       for bdf_merge and _update_case_control
        'METHOD': method_map,
        'CMETHOD': cmethod_map,
        'FLFACT': flfact_map,
        'FMETHOD': flutter_map,
        'FREQUENCY': freq_map,
        'sets': set_map,
        'splines': spline_id_map,
        'DLOAD': dload_map,
        'LOAD': load_map,
        'LOADSET': lseq_map,
        'TSTEP': tstep_map,
        'TSTEPNL': tstepnl_map,
        'SUPORT1': suport1_map,
        'NLPARM': nlparm_map,
        'SDAMPING': table_sdamping_map,
        'DESSUB': dessub_map,
        'DESOBJ': dresp_map,
        'GUST': gust_map,
        'TRIM': trim_map,
        'IC': tic_map,
        'CSSCHD': csschd_map,
        'TFL': tranfer_function_map,
        #'DESSUB' : dessub_map,
        # bad...
        'TEMPERATURE(LOAD)': temp_map,
        'TEMPERATURE(INITIAL)': temp_map,
        #'DATAREC' : datarec_map,
        #'ADAPT' : adapt_map,
        #'SUPER' : super_map,
        #'BOUTPUT' : boutput_map,
        #'OUTRCV' : outrcv_map,
    }
    #print('****suport1_map', suport1_map)
    #print('****dessub_map', dessub_map)
    #print('****dresp_map', dresp_map)
    _update_case_control(model, mapper)
    if bdf_filename_out is not None:
        model.write_bdf(bdf_filename_out,
                        size=size,
                        is_double=is_double,
                        interspersed=False)
    return model, mapper
예제 #49
0
    def execute(self):
        """Runs the NastranComponent.

        We are overiding ExternalCode's execute function.
        The steps are:
           1. Get a list of the input variables
           2. Read the BDF file into pyNastran's BDF object
           3. Using the info collected in Step #1, replace some
                of the values in the BDF object
           4. Call the update_hook method. Subclasses can override that
               to do processing of the BDF file before it is written out
               again
           5. Write the modified BDF file
           6. Run Nastran
           7. Read the OP2 file ( and the F06 if needed to get Nastran run error info )
           8. Read the results from the OP2 file and set output variables for this
                Component

        RuntimeError
            The component relies on ExternalCode which can throw all
            sorts of RuntimeError-like exceptions (RunStopped,
            RunInterrupted also included).

        Filesystem-type Errors
            NastranComponent makes a temporary directory with mkdtemp
            in the temp module. If that fails, the error just
            propagates up.


        While there are no explicit parameters or return values for this
        function, it gets all the input it needs from the design
        variables that are connected to the subclass of NastranComponent.
        This should be described pretty well in the :ref:`documentation<NastranComponent>`.

        """

        # all of these are {"traitname" : trait}
        smart_replacements = {}
        output_variables = {}
        grid_outputs = {}

        for name, trait in iteritems(self.traits()):
            if trait.iotype == "in":
                if trait.nastran_card and trait.nastran_id and trait.nastran_field:
                    smart_replacements[name] = trait
                elif trait.nastran_card or trait.nastran_id or trait.nastran_fieldnum:
                    raise RuntimeError("You specified at least one of " + \
                                    "nastran_card, nastran_id, and " + \
                                    "nastran_fieldnum, but you did " + \
                                    "not specify all of them. You " + \
                                    "most probably mistyped.")

            elif trait.iotype == "out":

                # if we want to supply a function that will parse
                # out the wanted information from the output object
                if trait.nastran_func:
                    output_variables[name] = trait

                # this is the grid method of accessing. We have to
                # specify a header, id, and column and
                # the output variable will be set to that value
                elif trait.nastran_header and trait.nastran_constraints :
                    grid_outputs[name] = trait
                elif trait.nastran_header or trait.nastran_constraints:
                    raise RuntimeError("You specified at least one of " + \
                                    "nastran_header and nastran_constraints"+\
                                    ", but you " + \
                                    "did not specify all them. You " + \
                                    "most probably mistyped")

        # do our work in a tmp dir
        tmpdir = mkdtemp(dir = self.output_tempdir_dir)
        tmppath = os.path.join(tmpdir, "input.bdf")

        pyNastran_get_card_methods = {
            'PSHELL': 'Property',
            'PROD': 'Property',
            'FORCE': 'Load',
            'MAT1': 'Material',
            }

        ########## Read BDF ##########
        self.timing_section( "Read BDF" )

        self.bdf = BDF(debug=False,log=logging.getLogger() )
        self.bdf.readBDF(self.nastran_filename,xref=True)

        ########## Modify BDF ##########
        self.timing_section( "Modify BDF" )
        for name, trait in iteritems(smart_replacements):
            value = getattr(self, name)
            nastran_id = int( trait.nastran_id )
            get_method = getattr( self.bdf, pyNastran_get_card_methods[ trait.nastran_card ] )
            # some of these methods have an extra arg for error reporting
            args = inspect.getargspec(get_method).args
            if 'msg' in args:
                nastran_item = get_method( nastran_id, 'dummy msg' )
            else:
                nastran_item = get_method( nastran_id )

            if trait.nastran_card == 'FORCE' :
                nastran_item = nastran_item[0]

            setattr(nastran_item, trait.nastran_field, value)

        ########## update hook ##########
        self.update_hook()

        ########## write modified BDF ##########
        self.timing_section( "Write modified BDF" )
         #self.bdf.write_bdf(tmppath)
        self.bdf.write_bdf(tmppath,precision='double',size=16)

        ########## Run Nastran via subprocess ##########
        self.timing_section( "Run Nastran" )
        self.output_filename = os.path.join(tmpdir, "input.out")
        print(self.output_filename)  # perhaps this should be logged, or something

        # Then we run the nastran file
        if self.nastran_command == 'python':  # True when using fake_nastran.py
            self.command = [self.nastran_command,
                            self.nastran_command_args[0], tmppath]
            self.command.extend(self.nastran_command_args[1:])
        else:
            self.command = [self.nastran_command, tmppath]
            self.command.extend(self.nastran_command_args)
        self.command.extend(["batch=no", "out=" + tmpdir, "dbs=" + tmpdir])

        # This calls ExternalCode's execute which will run
        super(NastranComponent, self).execute()

        ########## read OP2 ##########
        self.timing_section( "Read OP2" )
        op2_filename = self.output_filename[:-4] + '.op2'
        f06_filename = self.output_filename
        self.op2 = OP2(op2_filename, debug=False,log=None)
        #self.op2.make_op2_debug = True   # can create a HUGE file that slows things down a lot

        if os.path.exists(op2_filename):
            try:
                self.op2.read_op2()  # doesn't tell you what the error message is
            except FatalError:
                try:
                    self.f06 = F06(f06_filename,debug=False)  # debug True makes it slow
                    self.f06.read_f06()
                except FatalError as err:
                    raise RuntimeError('Nastran fatal error:' + str( err ) )
        elif os.path.exists(f06_filename):
            try:
                self.f06 = F06(f06_filename,debug=False)  # debug True makes it slow
                self.f06.read_f06()  # this will stop with a FatalError with the proper FATAL message
            except FatalError as err:
                raise RuntimeError('Nastran fatal error:' + str( err ) )
        else:
            raise RuntimeError('nastran fatal error' )

        ########## get the outputs using pyNastran ##########
        self.timing_section( "Set outputs using data from OP2" )

        from pyNastran.utils import object_attributes
        for name, trait in iteritems(grid_outputs):
            if trait.nastran_header == 'displacements' :
                var = getattr(self.op2, trait.nastran_header)
                case = var[trait.nastran_subcase]
                for key, eid in iteritems(trait.nastran_constraints):  # e.g., "translations", 18
                    if not hasattr(case, key):
                        #op2.displacements[isubcase=1] doesn't have an attribute "translation".
                        #valid attributes are:  ["translations", "rotations"]
                        msg = "op2.%s[isubcase=%i] does not have an attribute '%s'. " % (trait.nastran_header, trait.nastran_subcase, key )
                        msg += "Valid attributes are: %s" % str(['%s' % att for att in object_attributes(case)])
                        raise KeyError(msg)
                    results_data = getattr(case, key)  # get the translations
                    if trait.nastran_time_step_freq_mode:
                        disp = results_data[trait.nastran_time_step_freq_mode][eid]
                    else:  # "transient" result
                        disp = results_data[eid] # get the specific ID
                    setattr( self, name, disp[trait.nastran_dof] )
            else:
                raise RuntimeError("The Nastran header, %s, is not supported yet" % trait.nastran_header )

        # displacement_columns = ['T1','T2','T3']
        # for name, trait in iteritems(grid_outputs):
        #     table = trait.nastran_table
        #     subcase = trait.nastran_subcase
        #     nastran_id = trait.nastran_id
        #     column = trait.nastran_column
        #     if table == "displacement vector" :
        #         ixyz = displacement_columns.index( column )
        #         setattr(self, name, self.op2.displacements[subcase].translations[nastran_id][ixyz])

        for output_name, output_trait in iteritems(output_variables):
            # We run trait.nastran_func on op2 to get output values
            if output_trait.nastran_args:
                setattr(self, output_name,
                        output_trait.nastran_func(self.op2,**output_trait.nastran_args))
            else:
                setattr(self, output_name,
                        output_trait.nastran_func(self.op2))

        self.timing_section( "" )

        # get rid of our tmp dir
        tmpdir_to_delete = ""
        if self.delete_tmp_files:
            if self.keep_first_iteration:
                if not self._seen_first_iteration:
                    self._seen_first_iteration = True
                else:
                    if self.keep_last_iteration: # keep both
                        tmpdir_to_delete = self._last_seen_iteration
                        self._last_seen_iteration = tmpdir
                    else: # just keep first
                        tmpdir_to_delete = tmpdir
            else:
                if self.keep_last_iteration: # only keep last
                    tmpdir_to_delete = self._last_seen_iteration
                    self._last_seen_iteration = tmpdir
                else: # don't keep anything
                    tmpdir_to_delete = tmpdir

            if tmpdir_to_delete:
                rmtree(tmpdir_to_delete)
예제 #50
0
def export_bdf_to_hdf5_file(hdf5_file, model, exporter=None):
    """
    Converts the BDF objects into hdf5 object

    Parameters
    ----------
    hdf5_file : H5File()
        an h5py object
    exporter : HDF5Exporter; default=None
        unused

    """
    unused_attrs = object_attributes(model, mode='both', keys_to_skip=None)
    encoding = model.get_encoding()

    if 'GRID' in model.card_count:
        model.log.debug('exporting nodes')
        node_group = hdf5_file.create_group('nodes')
        grid_group = node_group.create_group('GRID')
        nids = model._type_to_id_map['GRID']
        if len(nids) == 0:
            assert len(model.nodes) == 0, len(model.nodes)
        CARD_MAP['GRID'].export_to_hdf5(grid_group, model, nids)

    _hdf5_export_group(hdf5_file, model, 'coords', encoding, debug=False)
    _hdf5_export_elements(hdf5_file, model, encoding)

    # explicit groups
    #
    # these are broken down by card type
    # they came from dict_int_obj_attrs
    groups_to_export = [
        'properties',
        'masses',
        'rigid_elements',
        'plotels',

        # materials
        'materials',
        'thermal_materials',
        'creep_materials',
        'hyperelastic_materials',
        #'MATS1',
        #'MATT1', 'MATT2', 'MATT3', 'MATT4', 'MATT5', 'MATT8', 'MATT9',

        # aero
        'caeros',
        'splines',
        'flutters',
        'trims',
        'csschds',
        'gusts',

        # other
        'methods',
        'tables',
        'desvars',
    ]
    for group_name in groups_to_export:
        _hdf5_export_group(hdf5_file, model, group_name, encoding)

    unused_dict_int_attrs = [  # TODO: not used...
        # required
        '_dmig_temp',
        'include_filenames',
        'superelement_models',
        'values_to_skip',

        # removed
        #'rsolmap_to_str',
        #'nid_map',
        #'subcases',
    ]

    _export_dict_int_obj_attrs(model, hdf5_file, encoding)
    _export_dict_int_list_obj_attrs(model, hdf5_file, encoding)
    _export_dconstrs(hdf5_file, model, encoding)

    #for key in scalar_obj_keys:
    #value = getattr(model, key)
    #hdf5_file.create_dataset(key, value)
    if model.params:
        model.log.debug('exporting params')
        skip_attrs = ['comment', '_field_map']
        group = hdf5_file.create_group('params')
        for key, param in model.params.items():
            _h5_export_class(group,
                             model,
                             key,
                             param,
                             skip_attrs,
                             encoding,
                             debug=False)

    if model.aelinks:
        model.log.debug('exporting aelinks')
        skip_attrs = ['comment', '_field_map']
        group = hdf5_file.create_group('aelinks')
        for aelink_id, aelinks in model.aelinks.items():
            groupi = group.create_group(str(aelink_id))
            for j, aelinki in enumerate(aelinks):
                key = str(j)
                _h5_export_class(groupi,
                                 model,
                                 key,
                                 aelinki,
                                 skip_attrs,
                                 encoding,
                                 debug=False)

    if model.usets:
        model.log.debug('exporting usets')
        skip_attrs = ['comment', '_field_map']
        group = hdf5_file.create_group('usets')
        for name, usets in model.usets.items():
            groupi = group.create_group(name)
            #print(usets)
            for i, uset in enumerate(usets):
                #print(uset.get_stats())
                key = str(i)
                _h5_export_class(groupi,
                                 model,
                                 key,
                                 uset,
                                 skip_attrs,
                                 encoding,
                                 debug=False)

    _export_scalar_group(hdf5_file, model, encoding)

    skip_attrs = ['comment', '_field_map']
    for key in scalar_obj_keys:
        value = getattr(model, key)
        if value is None:
            #print('None: %s %s' % (key, value))
            pass
        else:
            model.log.debug('exporting %s' % key)
            _h5_export_class(hdf5_file,
                             model,
                             key,
                             value,
                             skip_attrs,
                             encoding,
                             debug=False)

    _export_list_keys(model, hdf5_file, LIST_KEYS)
    _export_list_obj_keys(model, hdf5_file, LIST_OBJ_KEYS, encoding)

    cards_to_read = [key.encode(encoding) for key in list(model.cards_to_read)]
    cards_to_read = list(cards_to_read)
    cards_to_read.sort()
    hdf5_file.create_dataset('cards_to_read', data=cards_to_read)
예제 #51
0
class OP2(OP2_Scalar, OP2Writer):
    _properties = ['is_real', 'is_complex', 'is_random',
                   '_sort_method', 'is_sort1', 'is_sort2',
                   'matrix_tables', 'table_name_str']

    def __init__(self, debug: Optional[bool]=True, log: Any=None, 
				 debug_file: Optional[str]=None, mode: Optional[str]=None
				 ) -> None:
        """Initializes the OP2 object

        Parameters
        ----------
        debug : bool/None; default=True
            used to set the logger if no logger is passed in
                True:  logs debug/info/warning/error messages
                False: logs info/warning/error messages
                None:  logs warning/error messages
        log : Log()
            a logging object to write debug messages to
         (.. seealso:: import logging)
        debug_file : str; default=None (No debug)
            sets the filename that will be written to
        mode : str; default=None -> 'msc'
            {msc, nx}

        """
        self.encoding = None
        self.mode = mode
        if mode is not None:
            self.set_mode(mode)
        make_geom = False
        assert make_geom is False, make_geom
        OP2_Scalar.__init__(self, debug=debug, log=log, debug_file=debug_file)
        self.ask = False
        self.post = None
        self.table_count = defaultdict(int)
        self._set_mode(mode)

    def __del__(self) -> None:
        if hasattr(self, 'h5_file') and self.h5_file is not None:
            self.h5_file.close()

    def object_attributes(self, mode: str='public', 
	                      keys_to_skip: Optional[List[str]]=None,
                          filter_properties: bool=False) -> List[str]:
        """List the names of attributes of a class as strings. 
		
		Returns public attributes as default.

        Parameters
        ----------
        mode : str
            defines what kind of attributes will be listed
            * 'public' - names that do not begin with underscore
            * 'private' - names that begin with single underscore
            * 'both' - private and public
            * 'all' - all attributes that are defined for the object
        keys_to_skip : List[str]; default=None -> []
            names to not consider to avoid deprecation warnings

        Returns
        -------
        attribute_names : List[str]
            sorted list of the names of attributes of a given type or None
            if the mode is wrong

        """
        if keys_to_skip is None:
            keys_to_skip = []
        my_keys_to_skip = ['object_methods', 'object_attributes',]
		keys_to_skip += my_keys_to_skip
		
		# TODO: kwargs support
		kw = {'mode': mode, 
		      'keys_to_skip': keys_to_skip,
			  'filter_properties': filter_properties}
        return object_attributes(self, **kw)
예제 #52
0
 def test_object_attributes_02(self):
     """tests getting object attributes with key skipping"""
     model = BDF(debug=False)
     keys = []
     object_attributes(model, mode='public', keys_to_skip=keys)
예제 #53
0
 def test_object_attributes_02(self):
     model = BDF(debug=False)
     keys = []
     object_attributes(model, mode='public', keys_to_skip=keys)
예제 #54
0
 def test_object_attributes_02(self):
     """tests getting object attributes with key skipping"""
     model = BDF(debug=False)
     keys = []
     object_attributes(model, mode='public', keys_to_skip=keys)
예제 #55
0
def bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_id_dict=None, round_ids=False, cards_to_skip=None):
    """
    Renumbers a BDF

    Parameters
    ----------
    bdf_filename : str
        a bdf_filename (string; supported) or a BDF model (BDF)
        that has been cross referenced and is fully valid (a equivalenced deck is not valid)
    bdf_filename_out : str
        a bdf_filename to write
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write
    starting_id_dict : dict, None (default=None)
        None : renumber everything starting from 1
        dict : {key : starting_id}
            key : str
                the key (e.g. eid, nid, cid, ...)
            starting_id : int, None
                int : the value to start from
                None : don't renumber this key
    round_ids : bool; default=False
        Should a rounding up be applied for each variable?
        This makes it easier to read a deck and verify that it's been renumbered properly.
        This only really applies when starting_id_dict is None
    cards_to_skip : List[str]; (default=None -> don't skip any cards)
        There are edge cases (e.g. FLUTTER analysis) where things can break due to
        uncross-referenced cards.  You need to disable entire classes of cards in
        that case (e.g. all aero cards).

    .. todo:: bdf_model option for bdf_filename hasn't been tested
    .. todo:: add support for subsets (e.g. renumber only a subset of nodes/elements)
    .. todo:: doesn't support partial renumbering
    .. todo:: doesn't support element material coordinate systems

    ..warning :: spoints might be problematic...check
    ..warning :: still in development, but it usually brutally crashes if it's not supported
    ..warning :: be careful of card unsupported cards (e.g. ones not read in)

    Supports
    ========
     - GRIDs
       - no superelements
     - COORDx

     - elements
        - CELASx/CONROD/CBAR/CBEAM/CQUAD4/CTRIA3/CTETRA/CPENTA/CHEXA
        - RBAR/RBAR1/RBE1/RBE2/RBE3/RSPLINE

     - properties
        - PSHELL/PCOMP/PCOMPG/PSOLID/PSHEAR/PBAR/PBARL
          PROD/PTUBE/PBEAM
     - mass
        - CMASSx/CONMx/PMASS

     - aero
       - FLFACT
       - SPLINEx
       - FLUTTER

     - partial case control
       - METHOD/CMETHOD/FREQENCY
       - LOAD/DLOAD/LSEQ/LOADSET...LOADSET/LSEQ is iffy
       - SET cards
         - nodes
         - elements
       - SPC/MPC/FLUTTER/FLFACT

    - constraints
       - SPC/SPCADD/SPCAX/SPCD
       - MPC/MPCADD
       - SUPORT/SUPORT1

    - solution control/methods
       - TSTEP/TSTEPNL
       - NLPARM
       - EIGB/EIGC/EIGRL/EIGR

    - sets
       - USET

    - other
      - tables
      - materials
      - loads/dloads


    Not Done
    ========
     - SPOINT
     - any cards with SPOINTs
       - DMIG/DMI/DMIJ/DMIJI/DMIK/etc.
       - CELASx
       - CDAMPx
     - superelements
     - aero cards
       - CAEROx
       - PAEROx
     - thermal cards?
     - optimization cards
     - SETx
     - PARAM,GRDPNT,x; where x>0
     - GRID SEID
     - case control
       - STATSUB
       - SUBCASE
       - global SET cards won't be renumbered properly

    Example 1 - Renumber Everything; Start from 1
    ---------------------------------------------
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 round_ids=False)

    Example 2 - Renumber Everything; Start Material IDs from 100
    ------------------------------------------------------------
    starting_id_dict = {
        'mid' : 100,
    }
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_ids_dict=starting_ids_dict, round_ids=False)

    Example 3 - Only Renumber Material IDs
    --------------------------------------
    starting_id_dict = {
        'cid' : None,
        'nid' : None,
        'eid' : None,
        'pid' : None,
        'mid' : 1,
        'spc_id' : None,
        'mpc_id' : None,
        'load_id' : None,
        'dload_id' : None,

        'method_id' : None,
        'cmethod_id' : None,
        'spline_id' : None,
        'table_id' : None,
        'flfact_id' : None,
        'flutter_id' : None,
        'freq_id' : None,
        'tstep_id' : None,
        'tstepnl_id' : None,
        'suport_id' : None,
        'suport1_id' : None,
        'tf_id' : None,
    }
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_ids_dict=starting_ids_dict, round_ids=False)
    """
    starting_id_dict_default = {
        'cid' : 1,
        'nid' : 1,
        'eid' : 1,
        'pid' : 1,
        'mid' : 1,
        'spc_id' : 1,
        'mpc_id' : 1,
        'load_id' : 1,
        'dload_id' : 1,

        'method_id' : 1,
        'cmethod_id' : 1,
        'spline_id' : 1,
        'table_id' : 1,
        'flfact_id' : 1,
        'flutter_id' : 1,
        'freq_id' : 1,
        'tstep_id' : 1,
        'tstepnl_id' : 1,
        'suport_id' : 1,
        'suport1_id' : 1,
        'tf_id' : 1,
    }
    # fill up starting_id_dict
    if starting_id_dict is None:
        starting_id_dict = starting_id_dict_default
    else:
        for key, value in iteritems(starting_id_dict_default):
            if key not in starting_id_dict:
                starting_id_dict[key] = value

    nid = None
    cid = None
    eid = None
    pid = None
    mid = None
    spc_id = None
    mpc_id = None
    load_id = None
    dload_id = None
    method_id = None
    cmethod_id = None
    spline_id = None
    table_id = None
    flfact_id = None
    flutter_id = None
    freq_id = None
    tstep_id = None
    tstepnl_id = None
    suport_id = None
    suport1_id = None
    tf_id = None

    # turn them into variables
    for key, value in sorted(iteritems(starting_id_dict)):
        #assert isinstance(key, string_types), key
        assert key in starting_id_dict_default, 'key=%r is invalid' % (key)
        #assert isidentifier(key), 'key=%s is invalid' % key
        if value is None:
            pass
        else:
            if not isinstance(value, integer_types):
                msg = 'key=%r value=%r must be an integer; type(value)=%s' % (
                    key, value, type(value))
                raise TypeError(msg)

        if key == 'nid':
            nid = int(value)
        elif key == 'cid':
            if value is None:
                cid = None
            else:
                cid = int(value)
        elif key == 'eid':
            eid = int(value)
        elif key == 'pid':
            if value is None:
                pid = None
            else:
                pid = int(value)
        elif key == 'mid':
            if value is None:
                mid = None
            else:
                mid = int(value)
        elif key == 'spc_id':
            spc_id = int(value)
        elif key == 'mpc_id':
            mpc_id = int(value)

        elif key == 'load_id':
            load_id = int(value)
        elif key == 'dload_id':
            dload_id = int(value)
        elif key == 'method_id':
            method_id = int(value)
        elif key == 'cmethod_id':
            cmethod_id = int(value)
        elif key == 'spline_id':
            spline_id = int(value)
        elif key == 'table_id':
            table_id = int(value)
        elif key == 'flfact_id':
            flfact_id = int(value)
        elif key == 'flutter_id':
            flutter_id = int(value)
        elif key == 'freq_id':
            freq_id = int(value)
        elif key == 'tstep_id':
            tstep_id = int(value)
        elif key == 'tstepnl_id':
            tstepnl_id = int(value)
        elif key == 'suport_id':
            suport_id = int(value)
        elif key == 'suport1_id':
            suport1_id = int(value)
        elif key == 'tf_id':
            tf_id = int(value)
        else:
            raise NotImplementedError('key=%r' % key)

    # build the maps
    eid_map = {}
    nid_map = {}
    reverse_nid_map = {}
    mid_map = {}
    cid_map = {}
    mpc_map = {}
    spc_map = {}
    dload_map = {}
    load_map = {}

    cmethod_map = {}
    method_map = {}
    flfact_map = {}
    flutter_map = {}
    freq_map = {}
    tstep_map = {}
    tstepnl_map = {}
    suport_map = {}
    suport1_map = {}

    if isinstance(bdf_filename, string_types):
        model = BDF(debug=False)
        model.disable_cards(cards_to_skip)
        model.read_bdf(bdf_filename)
    else:
        model = bdf_filename

    if model.spoints is None:
        spoints = []
    else:
        spoints = list(model.spoints.points)
    if model.epoints is None:
        epoints = []
    else:
        epoints = list(model.epoints.points)

    nids = model.nodes.keys()
    from itertools import chain

    spoints_nids = sorted(chain(spoints, nids))
    #spoints_nids.sort()
    i = 1
    #nnodes = len(spoints_nids)

    i = 1
    #j = 1
    #print(spoints_nids)
    #k = 0
    #model.log.debug(starting_id_dict)
    if 'nid' in starting_id_dict and nid is not None:
        i = nid
        #banned_nodes = spoints
        for nidi in spoints_nids:
            if nidi in spoints:
                pass
                #print('sid=%s -> %s' % (nid, i))
                #i += 1
            else:
                while i in spoints:
                    #print('*bump')
                    i += 1
                #print('nid=%s -> %s' % (nid, i))
                nid_map[nidi] = i
                reverse_nid_map[i] = nidi
                i += 1
        #for nid in sorted(nids):
            #nid_map[nid] = i
            #reverse_nid_map[i] = nid
            #i += 1
        #print(nid_map)
        #print(reverse_nid_map)
    else:
        for nid in spoints_nids:
            nid_map[nid] = nid
            reverse_nid_map[nid] = nid

    all_materials = (
        model.materials,
        model.creep_materials,
        model.thermal_materials,
        model.hyperelastic_materials,
        model.MATT1,
        model.MATT2,
        model.MATT3,
        model.MATT4,
        model.MATT5,
        #model.MATT6,
        #model.MATT7,
        model.MATT8,
        model.MATT9,
        model.MATS1,
        model.MATS3,
        model.MATS8,
    )

    if mid is not None:
        mids = []
        for materials in all_materials:
            mids += materials.keys()
        mids = np.unique(mids)
        mids.sort()
        nmaterials = len(mids)

        for i in range(nmaterials):
            midi = mids[i]
            mid_map[midi] = mid + i

    if 'nid' in starting_id_dict and nid is not None:
        #spoints2 = arange(1, len(spoints) + 1)
        for nid, node in sorted(iteritems(model.nodes)):
            nid_new = nid_map[nid]
            #print('nid=%s -> %s' % (nid,nid_new))
            node.nid = nid_new

    if 'pid' in starting_id_dict and pid is not None:
        # properties
        for pidi, prop in sorted(iteritems(model.properties)):
            prop.pid = pid
            pid += 1
        for pidi, prop in sorted(iteritems(model.properties_mass)):
            # PMASS
            prop.pid = pid
            pid += 1
        for pidi, prop in sorted(iteritems(model.convection_properties)):
            # PCONV
            prop.pid = pid
            pid += 1
        for pidi, prop in sorted(iteritems(model.phbdys)):
            # PHBDY
            prop.pid = pid
            pid += 1

    if 'eid' in starting_id_dict and eid is not None:
        # elements
        for eidi, element in sorted(iteritems(model.elements)):
            element.eid = eid
            eid_map[eidi] = eid
            eid += 1
        for eidi, element in sorted(iteritems(model.masses)):
            # CONM1, CONM2, CMASSx
            element.eid = eid
            eid_map[eidi] = eid
            eid += 1
        for eidi, elem in sorted(iteritems(model.rigid_elements)):
            # RBAR/RBAR1/RBE1/RBE2/RBE3/RSPLINE
            elem.eid = eid
            eid_map[eidi] = eid
            eid += 1
        #for eidi, elem in iteritems(model.caeros):
            #pass

    if 'mid' in starting_id_dict and mid is not None:
        #mid = 1
        for materials in all_materials:
            for midi, material in iteritems(materials):
                mid = mid_map[midi]
                assert hasattr(material, 'mid')
                material.mid = mid

    if 'spc_id' in starting_id_dict and spc_id is not None:
        # spc
        for spc_idi, spc_group in sorted(iteritems(model.spcs)):
            for i, spc in enumerate(spc_group):
                assert hasattr(spc, 'conid')
                spc.conid = spc_id
            spc_map[spc_idi] = spc_id
            spc_id += 1
        for spc_idi, spcadd in sorted(iteritems(model.spcadds)):
            assert hasattr(spcadd, 'conid')
            spcadd.conid = spc_id
            spc_map[spc_idi] = spc_id
            spc_id += 1
    else:
        for spc_id in model.spcs:
            spc_map[spc_id] = spc_id
        for spc_id in model.spcadds:
            spc_map[spc_id] = spc_id

    if 'mpc_id' in starting_id_dict and mpc_id is not None:
        # mpc
        for mpc_idi, mpc_group in sorted(iteritems(model.mpcs)):
            for i, mpc in enumerate(mpc_group):
                assert hasattr(mpc, 'conid')
                mpc.conid = mpc_id
            mpc_map[mpc_idi] = mpc_id
            mpc_id += 1
        for mpc_idi, mpcadd in sorted(iteritems(model.mpcadds)):
            assert hasattr(mpcadd, 'conid')
            mpcadd.conid = mpc_id
            mpc_map[mpc_idi] = mpc_id
            mpc_id += 1
    else:
        for mpc_id in model.mpcs:
            mpc_map[mpc_id] = mpc_id
        for mpc_id in model.mpcadds:
            mpc_map[mpc_id] = mpc_id

    if 'cid' in starting_id_dict and cid is not None:
        # coords
        for cidi, coord in sorted(iteritems(model.coords)):
            if cidi == 0:
                cid_map[0] = 0
                continue
            coord.cid = cid
            cid_map[cidi] = cid
            cid += 1

    nlparm_map = {}
    nlpci_map = {}
    table_sdamping_map = {}
    dconstr_map = {}
    dconadd_map = {}
    dresp_map = {}
    gust_map = {}
    trim_map = {}
    tic_map = {}
    csschd_map = {}
    tranfer_function_map = {}
    data = (
        (model.methods, 'sid', method_map),
        (model.cMethods, 'sid', cmethod_map),
        (model.flfacts, 'sid', flfact_map),
        (model.flutters, 'sid', flutter_map),
        (model.frequencies, 'sid', freq_map),
        (model.tsteps, 'sid', tstep_map),
        (model.tstepnls, 'sid', tstepnl_map),
        (model.splines, 'eid', None),
        (model.suport1, 'conid', suport1_map),
        (model.nlparms, 'nlparm_id', nlparm_map),
        (model.nlpcis, 'nlpci_id', nlpci_map),
        (model.tables_sdamping, 'tid', table_sdamping_map),
        (model.dconadds, 'dcid', dconadd_map),
        #(model.dconstrs, 'oid', dconstr_map),
        (model.dresps, 'dresp_id', dresp_map),
        (model.gusts, 'sid', gust_map),
        (model.trims, 'sid', trim_map),
        (model.tics, 'sid', tic_map),
        (model.csschds, 'sid', csschd_map),
        (model.aefacts, 'sid', None),
        (model.aelinks, 'sid', None),
        (model.aelists, 'sid', None),
        (model.paeros, 'pid', None),

        (model.sets, 'sid', None),
        #(model.asets, 'sid', None),
        (model.dareas, 'sid', None),
        (model.transfer_functions, 'sid', tranfer_function_map)
        #(model.bsets, 'sid', None),
        #(model.csets, 'sid', None),
        #(model.qsets, 'sid', None),
        #(model.usets, 'sid', None),

        #(model.se_sets, 'sid', None),
        #(model.se_asets, 'sid', None),
        #(model.se_bsets, 'sid', None),
        #(model.se_csets, 'sid', None),
        #(model.se_qsets, 'sid', None),
        #(model.se_usets, 'sid', None),
    )

    # apply the simple to update parameters
    param_id = 9999
    for (dict_obj, param_name, mmap) in data:
        if round_ids:
            param_id = _roundup(param_id, 1000) + 1
        else:
            param_id = 1
        for idi, param in sorted(iteritems(dict_obj)):
            try:
                msg = '%s has no %r; use %s' % (param.type, param_name, object_attributes(param))
            except AttributeError:
                model.log.error('param = %r' % param)
                raise
            assert hasattr(param, param_name), msg
            setattr(param, param_name, param_id)
            if mmap is not None:
                mmap[idi] = param_id
            param_id += 1

    # start the complicated set
    # dconstr
    dessub_map = dconadd_map
    for key, value in iteritems(dconstr_map):
        if key in dessub_map:
            raise NotImplementedError()
        dessub_map[key] = value

    # tables
    for table_idi, table in sorted(sorted(iteritems(model.tables))):
        assert hasattr(table, 'tid')
        table.tid = table_id
        table_id += 1
    for table_idi, table in sorted(sorted(iteritems(model.random_tables))):
        assert hasattr(table, 'tid')
        table.tid = table_id
        table_id += 1

    # dloads
    for dload_idi, dloads in sorted(iteritems(model.dloads)):
        for dload in dloads:
            assert hasattr(dload, 'sid')
            dload.sid = dload_id
        dload_map[dload_idi] = dload_id
        dload_id += 1
    for dload_idi, dloads in sorted(iteritems(model.dload_entries)):
        for dload in dloads:
            assert hasattr(dload, 'sid')
            dload.sid = dload_id
        dload_map[dload_idi] = dload_id
        dload_id += 1

    # loads
    for load_idi, loads in sorted(iteritems(model.loads)):
        for load in loads:
            assert hasattr(load, 'sid')
            load.sid = load_id
        load_map[load_idi] = load_id
        load_id += 1

    # transfer_functions
    for tf_idi, tfs in sorted(iteritems(model.transfer_functions)):
        for tf in tfs:
            assert hasattr(tf, 'sid')
            tf.sid = tf_id
        tranfer_function_map[tf_idi] = tf_id
        load_id += 1

    lseq_map = load_map # wrong???
    temp_map = load_map # wrong???
    mapper = {
        'elements' : eid_map,
        'nodes' : nid_map,
        'coords' : cid_map,
        'materials' : mid_map,
        'SPC' : spc_map,
        'MPC' : mpc_map,
        'METHOD' : method_map,
        'CMETHOD' : cmethod_map,
        'FLFACT' : flfact_map,
        'FMETHOD' : flutter_map,
        'FREQUENCY' : freq_map,

        'DLOAD' : dload_map,
        'LOAD' : load_map,
        'LOADSET' : lseq_map,
        'TSTEP' : tstep_map,
        'TSTEPNL' : tstepnl_map,
        'SUPORT1' : suport1_map,
        'NLPARM' : nlparm_map,
        'SDAMPING' : table_sdamping_map,
        'DESSUB' : dessub_map,
        'DESOBJ' : dresp_map,
        'GUST' : gust_map,
        'TRIM' : trim_map,
        'IC' : tic_map,
        'CSSCHD' : csschd_map,
        'TFL' : tranfer_function_map,
        #'DESSUB' : dessub_map,
        # bad...
        'TEMPERATURE(LOAD)' : temp_map,
        'TEMPERATURE(INITIAL)' : temp_map,
        #'DATAREC' : datarec_map,
        #'ADAPT' : adapt_map,
        #'SUPER' : super_map,
        #'BOUTPUT' : boutput_map,
        #'OUTRCV' : outrcv_map,
    }
    #print('****suport1_map', suport1_map)
    #print('****dessub_map', dessub_map)
    #print('****dresp_map', dresp_map)
    _update_case_control(model, mapper)
    if bdf_filename_out is not None:
        model.write_bdf(bdf_filename_out, size=size, is_double=is_double,
                        interspersed=False)
    return model