Example #1
0
    def test_renumber_01(self):
        """renumbers a deck in a couple ways"""
        bdf_filename = os.path.abspath(
            os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero.bdf'))
        bdf_filename_out1 = os.path.abspath(
            os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero1.out'))
        bdf_filename_out2 = os.path.abspath(
            os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero2.out'))
        bdf_filename_out3 = os.path.abspath(
            os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero3.out'))
        model = bdf_renumber(bdf_filename, bdf_filename_out1, size=8,
                             is_double=False, starting_id_dict=None,
                             round_ids=False, cards_to_skip=None, debug=False)

        model = read_bdf(bdf_filename, log=log)
        bdf_renumber(model, bdf_filename_out2, size=16, is_double=False,
                     starting_id_dict={
                         'eid' : 1000, 'pid':2000, 'mid':3000,
                         'spc_id' : 4000,},
                     round_ids=False, cards_to_skip=None)
        bdf_renumber(bdf_filename, bdf_filename_out3, size=8,
                     is_double=False, starting_id_dict=None,
                     round_ids=True, cards_to_skip=None)
        read_bdf(bdf_filename_out1, log=log)
        read_bdf(bdf_filename_out2, log=log)
        read_bdf(bdf_filename_out3, log=log)
Example #2
0
def check_renumber(bdf_filename, bdf_filename_renumber, bdf_filename_check):
    bdf_renumber(bdf_filename, bdf_filename_renumber)

    model = BDF(debug=False)
    model.read_bdf(bdf_filename)
    model.write_bdf(bdf_filename_check, interspersed=False)

    model = BDF(debug=False)
    model.read_bdf(bdf_filename_renumber)
Example #3
0
def renumber(bdf_filename, log):
    bdf_filename_out = 'junk.bdf'
    #model3_copy = deepcopy(model3)
    #model3.cross_reference()
    bdf_renumber(bdf_filename, bdf_filename_out, size=8, is_double=False,
                 starting_id_dict=None, round_ids=False, cards_to_skip=None, log=None,
                 debug=False)
    model4 = BDF(debug=False, log=log)
    model4.read_bdf(bdf_filename_out)
Example #4
0
def cmd_line_renumber():  # pragma: no cover
    """command line interface to bdf_renumber"""
    from docopt import docopt
    import pyNastran
    msg = (
        "Usage:\n"
        '  bdf renumber IN_BDF_FILENAME OUT_BDF_FILENAME [--superelement] [--size SIZE]\n'
        '  bdf renumber IN_BDF_FILENAME                  [--superelement] [--size SIZE]\n'
        '  bdf renumber -h | --help\n'
        '  bdf renumber -v | --version\n'
        '\n'

        'Positional Arguments:\n'
        '  IN_BDF_FILENAME    path to input BDF/DAT/NAS file\n'
        '  OUT_BDF_FILENAME   path to output BDF/DAT/NAS file\n'
        '\n'

        'Options:\n'
        '--superelement  calls superelement_renumber\n'
        '--size SIZE     set the field size (default=16)\n\n'

        'Info:\n'
        '  -h, --help      show this help message and exit\n'
        "  -v, --version   show program's version number and exit\n"
    )
    if len(sys.argv) == 1:
        sys.exit(msg)

    ver = str(pyNastran.__version__)
    #type_defaults = {
    #    '--nerrors' : [int, 100],
    #}
    data = docopt(msg, version=ver)
    print(data)
    bdf_filename = data['IN_BDF_FILENAME']
    bdf_filename_out = data['OUT_BDF_FILENAME']
    if bdf_filename_out is None:
        bdf_filename_out = 'renumber.bdf'

    size = 16
    if data['--size']:
        size = int(data['--size'])

    #cards_to_skip = [
        #'AEFACT', 'CAERO1', 'CAERO2', 'SPLINE1', 'SPLINE2',
        #'AERO', 'AEROS', 'PAERO1', 'PAERO2', 'MKAERO1']
    cards_to_skip = []
    if data['--superelement']:
        superelement_renumber(bdf_filename, bdf_filename_out, size=size, is_double=False,
                              starting_id_dict=None, #round_ids=False,
                              cards_to_skip=cards_to_skip)
    else:
        bdf_renumber(bdf_filename, bdf_filename_out, size=size, is_double=False,
                     starting_id_dict=None, round_ids=False,
                     cards_to_skip=cards_to_skip)
Example #5
0
def check_renumber(bdf_filename, bdf_filename_renumber, bdf_filename_check,
                   log=None):
    """renumbers the file, then reloads both it and the renumbered deck"""
    bdf_renumber(bdf_filename, bdf_filename_renumber)

    model = BDF(debug=False, log=log)
    model.read_bdf(bdf_filename)
    model.write_bdf(bdf_filename_check, interspersed=False)

    model = BDF(debug=False, log=log)
    model.read_bdf(bdf_filename_renumber)

    os.remove(bdf_filename_renumber)
    os.remove(bdf_filename_check)
Example #6
0
def cmd_line_renumber():  # pragma: no cover
    """command line interface to bdf_renumber"""
    import sys
    from docopt import docopt
    import pyNastran
    msg = "Usage:\n"
    msg += "  bdf renumber IN_BDF_FILENAME [-o OUT_BDF_FILENAME]\n"
    msg += '  bdf renumber -h | --help\n'
    msg += '  bdf renumber -v | --version\n'
    msg += '\n'

    msg += "Positional Arguments:\n"
    msg += "  IN_BDF_FILENAME    path to input BDF/DAT/NAS file\n"
    #msg += "  OUT_BDF_FILENAME   path to output BDF/DAT/NAS file\n"
    msg += '\n'

    msg += 'Options:\n'
    msg += "  -o OUT, --output  OUT_BDF_FILENAME  path to output BDF/DAT/NAS file\n\n"

    msg += 'Info:\n'
    msg += '  -h, --help      show this help message and exit\n'
    msg += "  -v, --version   show program's version number and exit\n"

    if len(sys.argv) == 1:
        sys.exit(msg)

    ver = str(pyNastran.__version__)
    #type_defaults = {
    #    '--nerrors' : [int, 100],
    #}
    data = docopt(msg, version=ver)
    print(data)
    size = 16
    bdf_filename = data['IN_BDF_FILENAME']
    bdf_filename_out = data['--output']
    if bdf_filename_out is None:
        bdf_filename_out = 'renumber.bdf'

    cards_to_skip = [
        'AEFACT', 'CAERO1', 'CAERO2', 'SPLINE1', 'SPLINE2', 'AERO', 'AEROS',
        'PAERO1', 'PAERO2', 'MKAERO1'
    ]
    bdf_renumber(bdf_filename,
                 bdf_filename_out,
                 size=size,
                 is_double=False,
                 starting_id_dict=None,
                 round_ids=False,
                 cards_to_skip=cards_to_skip)
Example #7
0
def remove_unassociated_nodes(bdf_filename, bdf_filename_out, renumber=False,
                              size=8, is_double=False):
    """
    Removes nodes from a model that are not referenced.

    Parameters
    ----------
    bdf_filename : str
        the path to the bdf input file
    bdf_filename_out : str
        the path to the bdf output file
    renumber : bool
        should the model be renumbered
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write

    .. warning only considers elements
    .. renumber=False is not supported
    """
    model = BDF(debug=False)
    model.read_bdf(bdf_filename, xref=True)

    nids_used = set([])
    for element in itervalues(model.elements):
        nids_used.update(element.node_ids)
    #for element in itervalues(model.masses):
        #nids_used.update(element.node_ids)
    all_nids = set(model.nodes.keys())

    nodes_to_remove = all_nids - nids_used
    for nid in nodes_to_remove:
        del model.nodes[nid]

    if renumber:
        starting_id_dict = {
            'nid' : 1,
            'eid' : 1,
            'pid' : 1,
            'mid' : 1,
        }
        bdf_renumber(model, bdf_filename_out, size=size, is_double=is_double,
                     starting_id_dict=starting_id_dict)
    else:
        model.write_bdf(bdf_filename_out, size=size, is_double=is_double)
Example #8
0
def cmd_line_renumber():  # pragma: no cover
    """command line interface to bdf_renumber"""
    import sys
    from docopt import docopt
    import pyNastran
    msg = "Usage:\n"
    msg += "  bdf renumber IN_BDF_FILENAME [-o OUT_BDF_FILENAME]\n"
    msg += '  bdf renumber -h | --help\n'
    msg += '  bdf renumber -v | --version\n'
    msg += '\n'

    msg += "Positional Arguments:\n"
    msg += "  IN_BDF_FILENAME    path to input BDF/DAT/NAS file\n"
    #msg += "  OUT_BDF_FILENAME   path to output BDF/DAT/NAS file\n"
    msg += '\n'

    msg += 'Options:\n'
    msg += "  -o OUT, --output  OUT_BDF_FILENAME  path to output BDF/DAT/NAS file\n\n"

    msg += 'Info:\n'
    msg += '  -h, --help      show this help message and exit\n'
    msg += "  -v, --version   show program's version number and exit\n"

    if len(sys.argv) == 1:
        sys.exit(msg)

    ver = str(pyNastran.__version__)
    #type_defaults = {
    #    '--nerrors' : [int, 100],
    #}
    data = docopt(msg, version=ver)
    print(data)
    size = 16
    bdf_filename = data['IN_BDF_FILENAME']
    bdf_filename_out = data['--output']
    if bdf_filename_out is None:
        bdf_filename_out = 'renumber.bdf'

    cards_to_skip = ['AEFACT', 'CAERO1', 'CAERO2', 'SPLINE1', 'SPLINE2', 'AERO', 'AEROS', 'PAERO1', 'PAERO2', 'MKAERO1']
    bdf_renumber(bdf_filename, bdf_filename_out, size=size, is_double=False,
                 starting_id_dict=None, round_ids=False,
                 cards_to_skip=cards_to_skip)
Example #9
0
    def test_renumber_05(self):
        """renumbers a deck in a couple ways"""
        log = SimpleLogger(level='error')
        bdf_filename = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
        bdf_filename_out1 = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero1.out')
        bdf_filename_out2 = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero2.out')
        bdf_filename_out3 = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero3.out')
        model = bdf_renumber(bdf_filename,
                             bdf_filename_out1,
                             size=8,
                             is_double=False,
                             starting_id_dict=None,
                             round_ids=False,
                             cards_to_skip=None,
                             debug=False)

        model = read_bdf(bdf_filename, log=log)
        bdf_renumber(model,
                     bdf_filename_out2,
                     size=16,
                     is_double=False,
                     starting_id_dict={
                         'eid': 1000,
                         'pid': 2000,
                         'mid': 3000,
                         'spc_id': 4000,
                     },
                     round_ids=False,
                     cards_to_skip=None)
        bdf_renumber(bdf_filename,
                     bdf_filename_out3,
                     size=8,
                     is_double=False,
                     starting_id_dict=None,
                     round_ids=True,
                     cards_to_skip=None)
        read_bdf(bdf_filename_out1, log=log)
        read_bdf(bdf_filename_out2, log=log)
        read_bdf(bdf_filename_out3, log=log)
Example #10
0
    def test_renumber_01(self):
        msg = 'CEND\n'
        msg += 'BEGIN BULK\n'
        msg += 'GRID,10,,1.0,1.0\n'
        msg += 'GRID,30,,3.0,2.0\n'
        msg += 'GRID,20,,2.0,3.0\n'
        msg += 'GRID,33,,3.3,4.0\n'
        msg += 'GRID,34,,3.4,5.0\n'
        msg += 'GRID,35,,3.5,6.0\n'
        msg += 'GRID,36,,3.6,7.0\n'
        msg += 'SPOINT,4,THRU,8\n'
        msg += 'SPOINT,11\n'
        msg += 'CTRIA3,10,8,30,20,10\n'
        msg += 'PSHELL,8,4,0.1\n'
        msg += 'MAT1,4,3.0e7,,0.3\n'

        msg += 'MPC,10,20,1,1.0,10,2,1.0\n'
        msg += 'SPC,2,30,3,-2.6\n'
        msg += 'SPC1,313,12456,33,THRU,34\n'
        msg += 'SPC,314,30,3,-2.6,36,3,-2.6\n'

        msg += '$SPCD,SID,G1,C1, D1,  G2,C2,D2\n'
        msg += 'SPCD, 100,33,436,-2.6,10, 2,.9\n'
        msg += 'SPCD, 101,34,436,-2.6\n'

        msg += '$RBAR, EID, GA, GB, CNA\n'
        msg += 'RBAR,    5, 10, 20, 123456\n'

        msg += '$RBAR1, EID, GA, GB, CB, ALPHA\n'
        msg += 'RBAR1,    9, 20, 10, 123, 6.5-7\n'

        msg += 'RBE1        1001    33    123456\n'
        msg += '              UM    20       123    35       123    34       123\n'
        msg += '                    10       123\n'
        msg += '$[RBE3, eid, None, refgrid, refc]\n'
        msg += 'RBE3       12225           33     123456      1.     123    34      36\n'
        msg += '            20      10\n'
        msg += 'ENDDATA\n'

        bdf_filename = 'renumber_in.bdf'
        with open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        msg_expected = 'CEND\n'
        msg_expected += 'BEGIN BULK\n'
        msg_expected += 'GRID,6\n'
        msg_expected += 'GRID,7\n'
        msg_expected += 'GRID,8\n'
        msg_expected += 'SPOINT,1,THRU,5\n'
        msg_expected += 'CTRIA3,1,1,8,7,6\n'
        msg_expected += 'PSHELL,1,1,0.1\n'
        msg_expected += 'MAT1,1,3.0e7,,0.3\n'
        msg_expected += 'ENDDATA\n'

        # for now we're testing things don't crash
        bdf_filename_renumber = 'renumber_out.bdf'
        bdf_renumber(bdf_filename, bdf_filename_renumber)

        #model = BDF(debug=False)
        #model.read_bdf(bdf_filename)
        #model.write_bdf(bdf_filename_check)
        model = BDF(debug=False)
        model.read_bdf(bdf_filename_renumber)

        os.remove(bdf_filename)
        os.remove(bdf_filename_renumber)
Example #11
0
def equivalence_ugrid3d_and_bdf_to_bdf(ugrid_filename: str,
                                       bdf_filename: str,
                                       pshell_pids_to_remove: List[int],
                                       tol: float = 0.01,
                                       renumber: bool = True,
                                       log: Optional[SimpleLogger] = None):
    """
    Merges a UGRID3D (*.ugrid) with a BDF and exports a BDF that is
    equivalenced and renumbered.

    Parameters
    ----------
    ugrid_filename : str
        the AFLR3/UGrid3d filename
    bdf_filename : str
        the BDF filename
    pshell_pids_to_remove : List[int, ...]
    tol : float; default=0.01
        the equivalence tolerance
    renumber : bool; default=True
        calls ``bdf_renumber`` to renumber the output BDF model

    Returns
    -------
    out_bdf_filename : str
        the output BDF filename
    """
    log = get_logger2(log, debug=True)
    log.info(
        f'equivalence_ugrid3d_and_bdf_to_bdf - bdf_filename={bdf_filename}')
    log.info(
        f'equivalence_ugrid3d_and_bdf_to_bdf - ugrid_filename={ugrid_filename}'
    )
    check_path(ugrid_filename, 'ugrid_filename')

    base = os.path.splitext(bdf_filename)[0]
    #bdf_merged_filename = base + '_merged.bdf'
    bdf_equivalence_filename = base + '_equivalence.bdf'
    bdf_renumber_filename = base + '_renumber.bdf'

    update_merge = True
    if update_merge:
        bdf_model = _update_merge(ugrid_filename,
                                  bdf_filename,
                                  pshell_pids_to_remove,
                                  tol=tol,
                                  log=log)
        bdf_equivalence_nodes(bdf_model,
                              bdf_equivalence_filename,
                              tol,
                              renumber_nodes=False,
                              neq_max=10,
                              xref=False,
                              log=log)

    if renumber:
        starting_ids_dict = {
            'cid': 1,
            'nid': 1,
            'eid': 1,
            'pid': 1,
            'mid': 1,
        }
        bdf_renumber(bdf_equivalence_filename,
                     bdf_renumber_filename,
                     size=16,
                     is_double=False,
                     starting_id_dict=starting_ids_dict,
                     log=log)
        #os.remove(bdf_equivalence_filename)
        out_bdf_filename = bdf_renumber_filename
    else:
        out_bdf_filename = bdf_equivalence_filename

    #os.remove(bdf_merged_filename)
    #os.remove(bdf_renumber_filename)
    os.remove('model_join.bdf')
    return out_bdf_filename
Example #12
0
def equivalence_ugrid3d_and_bdf_to_bdf(ugrid_filename,
                                       bdf_filename,
                                       pshell_pids_to_remove,
                                       tol=0.01,
                                       renumber=True):
    """
    Merges a UGRID3D (*.ugrid) with a BDF and exports a BDF that is
    equivalenced and renumbered.

    Parameters
    ----------
    ugrid_filename : str
        the AFLR3/UGrid3d filename
    bdf_filename : str
        the BDF filename
    pshell_pids_to_remove : List[int, ...]
    tol : float; default=0.01
        the equivalence tolerance
    renumber : bool; default=True
        calls ``bdf_renumber`` to renumber the output BDF model

    Returns
    -------
    out_bdf_filename : str
        the output BDF filename
    """
    print('equivalence_ugrid3d_and_bdf_to_bdf - bdf_filename=%s' %
          bdf_filename)
    print('equivalence_ugrid3d_and_bdf_to_bdf - ugrid_filename=%s' %
          ugrid_filename)
    check_path(ugrid_filename, 'ugrid_filename')

    base = os.path.splitext(bdf_filename)[0]
    #bdf_merged_filename = base + '_merged.bdf'
    bdf_equivalence_filename = base + '_equivalence.bdf'
    bdf_renumber_filename = base + '_renumber.bdf'

    update_merge = True
    if update_merge:
        ugrid_model = UGRID(log=None, debug=False)
        ugrid_model.read_ugrid(ugrid_filename)

        bdf_model = read_bdf(bdf_filename, xref=False)
        #bdf_model.write_bdf(bdf_merged_filename, interspersed=False, enddata=False)

        tol = 0.01
        nid0 = max(bdf_model.nodes) + 1  # new node ids start at max+1
        nid_offset = nid0 - 1  # node_ids are 1-based, so we must offset them
        eid = max(bdf_model.elements) + 1

        cp = None
        for nid, node in enumerate(ugrid_model.nodes):
            #assert len(node) == 3, node
            card = ['GRID', nid + nid0, cp] + list(node)
            bdf_model.add_card(card, 'GRID', is_list=True)
            #f.write(print_card_double(card))

        pid_solid = 100
        mid = 1

        pids = unique(ugrid_model.pids)
        for pidi in pids:
            if pidi not in pshell_pids_to_remove:
                card = ['PSHELL', pidi, mid, 0.1]
                bdf_model.add_card(card, 'PSHELL', is_list=True)

        card = ['PSOLID', pid_solid, mid]
        bdf_model.add_card(card, 'PSOLID', is_list=True)

        card = ['MAT1', mid, 3.0e7, None, 0.3]
        bdf_model.add_card(card, 'MAT1', is_list=True)

        shells = [
            ('CQUAD4', ugrid_model.quads),
            ('CTRIA3', ugrid_model.tris),
        ]
        for card_type, card_nodes in shells:
            if card_nodes.shape[0]:
                for pid, nodes in zip(ugrid_model.pids,
                                      card_nodes + nid_offset):
                    if pid not in pshell_pids_to_remove:
                        card = [
                            card_type,
                            eid,
                            pid,
                        ] + list(nodes)
                        bdf_model.add_card(card, card_type, is_list=True)
                        eid += 1

        solids = [
            ('CTETRA', ugrid_model.tets),
            ('CPYRAM', ugrid_model.penta5s),
            ('CPENTA', ugrid_model.penta6s),
            ('CHEXA', ugrid_model.hexas),
        ]
        for card_type, card_nodes in solids:
            if card_nodes.shape[0]:
                for nodes in card_nodes + nid_offset:
                    card = [
                        card_type,
                        eid,
                        pid_solid,
                    ] + list(nodes)
                    bdf_model.add_card(card, card_type, is_list=True)
                    eid += 1

        # tol = min_edge_length / 2.0
        # TODO:  remove this...
        bdf_model.write_bdf('model_join.bdf', interspersed=False)
        bdf_model.cross_reference()
        bdf_equivalence_nodes(bdf_model,
                              bdf_equivalence_filename,
                              tol,
                              renumber_nodes=False,
                              neq_max=10,
                              xref=False)

    if renumber:
        starting_ids_dict = {
            'cid': 1,
            'nid': 1,
            'eid': 1,
            'pid': 1,
            'mid': 1,
        }
        bdf_renumber(bdf_equivalence_filename,
                     bdf_renumber_filename,
                     size=16,
                     is_double=False,
                     starting_id_dict=starting_ids_dict)
        #os.remove(bdf_equivalence_filename)
        out_bdf_filename = bdf_renumber_filename
    else:
        out_bdf_filename = bdf_equivalence_filename

    #os.remove(bdf_merged_filename)
    #os.remove(bdf_renumber_filename)
    return out_bdf_filename
Example #13
0
    def test_surf_01(self):
        """tests two_blade_wake_sym_extended.surf"""
        MODEL_PATH = os.path.join(PKG_PATH, '..', 'models')
        bdf_filename = os.path.join(MODEL_PATH, 'iSat', 'ISat_Launch_Sm_Rgd.dat')
        surf_filename = os.path.join(MODEL_PATH, 'iSat', 'ISat_Launch_Sm_Rgd.surf')
        bdf_model = read_bdf(bdf_filename)

        #ugrid_filename = os.path.join(PKG_PATH, 'converters', 'aflr', 'ugrid', 'models',
                                      #'two_blade_wake_sym_extended.surf')
        #log = get_logger(level='warning')

        pid_to_element_flags = {}
        for pid, prop in bdf_model.properties.items():
            if prop.type in ['PSHELL', 'PCOMP']:
                # name, initial_normal_spacing, bl_thickness, grid_bc
                pid_to_element_flags[pid] = ['na;me', 0.01, 0.1, 1]

        with self.assertRaises(RuntimeError):
            nastran_to_surf(bdf_model, pid_to_element_flags, surf_filename,
                            renumber_pids=None,
                            line_map=None, scale=1.0,
                            tol=1e-10, xref=True)

        delete_elements(
            bdf_model,
            element_types_to_save=['CTRIA3', 'CQUAD4'])
        delete_properties(
            bdf_model,
            property_types_to_save=['PSHELL', 'PCOMP', 'PCOMPG', 'PLPLANE'])
        #print(bdf_model.properties)

        bdf_model.uncross_reference()
        remove_unused(bdf_model, remove_nids=True, remove_cids=True,
                      remove_pids=True, remove_mids=True)
        #delete_forces(bdf_model)
        bdf_model.case_control_deck = None

        #bdf_filename_re = os.path.join(MODEL_PATH, 'iSat', 'ISat_Launch_Sm_Rgd_re.dat')
        bdf_filename_re = None
        bdf_model.cross_reference()
        bdf_model_re = bdf_renumber(
            bdf_model, bdf_filename_re,
            #size=8, is_double=False,
            #starting_id_dict=None,
            round_ids=False, cards_to_skip=None,
            log=bdf_model.log, debug=False)[0]

        remap_cards(bdf_model_re)
        #print(bdf_model_re.properties)
        #print(bdf_model_re.elements)
        #aaa

        #bdf_model_re = read_bdf(bdf_filename_re)
        #print(bdf_model_re.get_bdf_stats())

        pid_to_element_flags = {}
        for pid, prop in bdf_model_re.properties.items():
            if prop.type in ['PSHELL', 'PCOMP']:
                # name, initial_normal_spacing, bl_thickness, grid_bc
                pid_to_element_flags[pid] = ['na;me', 0.01, 0.1, 1]

        nastran_to_surf(bdf_model_re,
                        pid_to_element_flags, surf_filename,
                        renumber_pids=None,
                        line_map=None, scale=1.0,
                        tol=1e-10, xref=False)
Example #14
0
def cmd_line_renumber(argv=None, quiet=False):
    """command line interface to bdf_renumber"""
    if argv is None:
        argv = sys.argv

    from docopt import docopt
    import pyNastran
    msg = (
        "Usage:\n"
        '  bdf renumber IN_BDF_FILENAME OUT_BDF_FILENAME [--superelement] [--size SIZE]\n'
        '  bdf renumber IN_BDF_FILENAME                  [--superelement] [--size SIZE]\n'
        '  bdf renumber -h | --help\n'
        '  bdf renumber -v | --version\n'
        '\n'
        'Positional Arguments:\n'
        '  IN_BDF_FILENAME    path to input BDF/DAT/NAS file\n'
        '  OUT_BDF_FILENAME   path to output BDF/DAT/NAS file\n'
        '\n'
        'Options:\n'
        '--superelement  calls superelement_renumber\n'
        '--size SIZE     set the field size (default=16)\n\n'
        'Info:\n'
        '  -h, --help      show this help message and exit\n'
        "  -v, --version   show program's version number and exit\n")
    if len(argv) == 1:
        sys.exit(msg)

    ver = str(pyNastran.__version__)
    #type_defaults = {
    #    '--nerrors' : [int, 100],
    #}
    data = docopt(msg, version=ver, argv=argv[1:])
    if not quiet:  # pragma: no cover
        print(data)
    bdf_filename = data['IN_BDF_FILENAME']
    bdf_filename_out = data['OUT_BDF_FILENAME']
    if bdf_filename_out is None:
        bdf_filename_out = 'renumber.bdf'

    size = 16
    if data['--size']:
        size = int(data['SIZE'])

    assert size in [8, 16], f'size={size} args={argv}'
    #cards_to_skip = [
    #'AEFACT', 'CAERO1', 'CAERO2', 'SPLINE1', 'SPLINE2',
    #'AERO', 'AEROS', 'PAERO1', 'PAERO2', 'MKAERO1']
    cards_to_skip = []

    level = 'debug' if not quiet else 'warning'
    log = SimpleLogger(level=level, encoding='utf-8', log_func=None)
    if data['--superelement']:
        superelement_renumber(
            bdf_filename,
            bdf_filename_out,
            size=size,
            is_double=False,
            starting_id_dict=None,  #round_ids=False,
            cards_to_skip=cards_to_skip,
            log=log)
    else:
        bdf_renumber(bdf_filename,
                     bdf_filename_out,
                     size=size,
                     is_double=False,
                     starting_id_dict=None,
                     round_ids=False,
                     cards_to_skip=cards_to_skip,
                     log=log)
Example #15
0
def bdf_merge(
    bdf_filenames,
    bdf_filename_out=None,
    renumber=True,
    encoding=None,
    size=8,
    is_double=False,
    cards_to_skip=None,
    log=None,
):
    """
    Merges multiple BDF into one file

    Parameters
    ----------
    bdf_filenames : List[str]
        list of bdf filenames
    bdf_filename_out : str / None
        the output bdf filename (default=None; None -> no writing)
    renumber : bool
        should the bdf be renumbered (default=True)
    encoding : str
        the unicode encoding (default=None; system default)
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write
    cards_to_skip : List[str]; (default=None -> don't skip any cards)
        There are edge cases (e.g. FLUTTER analysis) where things can break due to
        uncross-referenced cards.  You need to disable entire classes of cards in
        that case (e.g. all aero cards).

    Supports
    --------
      nodes:      GRID
      coords:     CORDx
      elements:   CQUAD4, CTRIA3, CTETRA, CPENTA, CHEXA, CELASx, CBAR, CBEAM
                  CONM1, CONM2, CMASS
      properties: PSHELL, PCOMP, PSOLID, PMASS
      materials:  MAT1, MAT8

    .. todo:: doesn't support SPOINTs/EPOINTs
    .. warning:: still very preliminary
    """
    if not isinstance(bdf_filenames, (list, tuple)):
        raise TypeError("bdf_filenames is not a list/tuple...%s" % str(bdf_filenames))

    if not len(bdf_filenames) > 1:
        raise RuntimeError("You can't merge one BDF...bdf_filenames=%s" % str(bdf_filenames))
    for bdf_filename in bdf_filenames:
        if not isinstance(bdf_filename, string_types):
            raise TypeError("bdf_filenames is not a string...%s" % bdf_filename)
        # bdf_filenames = [bdf_filenames]

    # starting_id_dict_default = {
    #'cid' : max(model.coords.keys()),
    #'nid' : max(model.nodes.keys()),
    #'eid' : max([
    # max(model.elements.keys()),
    # max(model.masses.keys()),
    # ]),
    #'pid' : max([
    # max(model.properties.keys()),
    # max(model.properties_mass.keys()),
    # ]),
    #'mid' : max(model.material_ids),
    # }
    model = BDF(debug=False, log=log)
    model.disable_cards(cards_to_skip)
    bdf_filename0 = bdf_filenames[0]
    model.read_bdf(bdf_filename0, encoding=encoding)
    model.log.info("primary=%s" % bdf_filename0)

    data_members = ["coords", "nodes", "elements", "masses", "properties", "properties_mass", "materials"]
    for bdf_filename in bdf_filenames[1:]:
        # model.log.info('model.masses = %s' % model.masses)
        starting_id_dict = {
            "cid": max(model.coords.keys()) + 1,
            "nid": max(model.nodes.keys()) + 1,
            "eid": max([max(model.elements.keys()), 0 if len(model.masses) == 0 else max(model.masses.keys())]) + 1,
            "pid": max(
                [
                    max(model.properties.keys()),
                    0 if len(model.properties_mass) == 0 else max(model.properties_mass.keys()),
                ]
            )
            + 1,
            "mid": max(model.material_ids) + 1,
        }
        # for param, val in sorted(iteritems(starting_id_dict)):
        # print('  %-3s %s' % (param, val))

        model.log.info("secondary=%s" % bdf_filename)
        model2 = BDF(debug=False)
        model2.disable_cards(cards_to_skip)
        bdf_dump = "bdf_merge_temp.bdf"
        # model2.read_bdf(bdf_filename, xref=False)

        bdf_renumber(
            bdf_filename,
            bdf_dump,
            starting_id_dict=starting_id_dict,
            size=size,
            is_double=is_double,
            cards_to_skip=cards_to_skip,
        )
        model2 = BDF(debug=False)
        model2.disable_cards(cards_to_skip)
        model2.read_bdf(bdf_dump)
        os.remove(bdf_dump)

        # model.log.info('model2.node_ids = %s' % np.array(model2.node_ids))
        for data_member in data_members:
            data1 = getattr(model, data_member)
            data2 = getattr(model2, data_member)
            if isinstance(data1, dict):
                # model.log.info('  working on %s' % (data_member))
                for key, value in iteritems(data2):
                    if data_member in "coords" and key == 0:
                        continue
                    if isinstance(value, list):
                        raise NotImplementedError(type(value))
                    else:
                        assert key not in data1, key
                        data1[key] = value
                        # print('   %s' % key)
            else:
                raise NotImplementedError(type(data1))
    # if bdf_filenames_out:
    # model.write_bdf(bdf_filenames_out, size=size)

    if renumber:
        model.log.info("final renumber...")
        starting_id_dict = {"cid": 1, "nid": 1, "eid": 1, "pid": 1, "mid": 1}
        bdf_renumber(
            model,
            bdf_filename_out,
            starting_id_dict=starting_id_dict,
            size=size,
            is_double=is_double,
            cards_to_skip=cards_to_skip,
        )
    elif bdf_filename_out:
        model.write_bdf(
            out_filename=bdf_filename_out,
            encoding=None,
            size=size,
            is_double=is_double,
            interspersed=True,
            enddata=None,
        )
    return model
Example #16
0
def bdf_merge(bdf_filenames,
              bdf_filename_out=None,
              renumber=True,
              encoding=None,
              size=8,
              is_double=False,
              cards_to_skip=None,
              log=None,
              skip_case_control_deck=False):
    """
    Merges multiple BDF into one file

    Parameters
    ----------
    bdf_filenames : List[str]
        list of bdf filenames
    bdf_filename_out : str / None
        the output bdf filename (default=None; None -> no writing)
    renumber : bool
        should the bdf be renumbered (default=True)
    encoding : str
        the unicode encoding (default=None; system default)
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write
    cards_to_skip : List[str]; (default=None -> don't skip any cards)
        There are edge cases (e.g. FLUTTER analysis) where things can break due to
        uncross-referenced cards.  You need to disable entire classes of cards in
        that case (e.g. all aero cards).
    skip_case_control_deck : bool, optional, default : False
        If true, don't consider the case control deck while merging.

    Returns
    -------
    model : BDF
        Merged model.
    mappers_all : List[mapper]
        mapper : Dict[bdf_attribute] : old_id_to_new_id_dict
            List of mapper dictionaries of original ids to merged

            bdf_attribute : str
                a BDF attribute (e.g., 'nodes', 'elements')
            old_id_to_new_id_dict : dict[id_old] : id_new
                a sub dictionary that is used to map the node/element/etc. ids
            mapper = {
                'elements' : eid_map,
                'nodes' : nid_map,
                'coords' : cid_map,
                ...
            }

    Supports
      nodes:      GRID
      coords:     CORDx
      elements:   CQUAD4, CTRIA3, CTETRA, CPENTA, CHEXA, CELASx, CBAR, CBEAM
                  CONM1, CONM2, CMASS
      properties: PSHELL, PCOMP, PSOLID, PMASS
      materials:  MAT1, MAT8

    .. todo:: doesn't support SPOINTs/EPOINTs
    .. warning:: still very preliminary

    """
    if not isinstance(bdf_filenames, (list, tuple)):
        raise TypeError('bdf_filenames is not a list/tuple...%s' %
                        str(bdf_filenames))

    if not len(bdf_filenames) > 1:
        raise RuntimeError("You can't merge one BDF...bdf_filenames=%s" %
                           str(bdf_filenames))
    for bdf_filename in bdf_filenames:
        if not isinstance(bdf_filename, (string_types, BDF, StringIO)):
            raise TypeError('bdf_filenames is not a string/BDF...%s' %
                            bdf_filename)

        #bdf_filenames = [bdf_filenames]

    #starting_id_dict_default = {
    #'cid' : max(model.coords.keys()),
    #'nid' : max(model.nodes.keys()),
    #'eid' : max([
    #max(model.elements.keys()),
    #max(model.masses.keys()),
    #]),
    #'pid' : max([
    #max(model.properties.keys()),
    #max(model.properties_mass.keys()),
    #]),
    #'mid' : max(model.material_ids),
    #}
    bdf_filename0 = bdf_filenames[0]
    if isinstance(bdf_filename0, BDF):
        model = bdf_filename0
    else:
        model = BDF(debug=False, log=log)
        model.disable_cards(cards_to_skip)
        model.read_bdf(bdf_filename0, encoding=encoding, validate=False)

    if skip_case_control_deck:
        model.case_control_deck = CaseControlDeck([], log=None)
    model.log.info('primary=%s' % bdf_filename0)

    _mapper_0 = _get_mapper_0(model)  # mapper for first model

    data_members = [
        'coords',
        'nodes',
        'elements',
        'masses',
        'properties',
        'properties_mass',
        'materials',
        'sets',
        'rigid_elements',
        'mpcs',
        'caeros',
        'splines',
    ]
    mappers = []
    for bdf_filename in bdf_filenames[1:]:
        starting_id_dict = get_renumber_starting_ids_from_model(model)
        #for param, val in sorted(starting_id_dict.items()):
        #print('  %-3s %s' % (param, val))

        model.log.info('secondary=%s' % bdf_filename)
        if isinstance(bdf_filename, BDF):
            model2_renumber = bdf_filename
        else:
            model2_renumber = BDF(debug=False, log=log)
            model2_renumber.disable_cards(cards_to_skip)
            model2_renumber.read_bdf(bdf_filename)

        _apply_scalar_cards(model, model2_renumber)

        bdf_dump = StringIO()  # 'bdf_merge_temp.bdf'
        _, mapperi = bdf_renumber(model2_renumber,
                                  bdf_dump,
                                  starting_id_dict=starting_id_dict,
                                  size=size,
                                  is_double=is_double,
                                  cards_to_skip=cards_to_skip)
        bdf_dump.seek(0)

        mappers.append(mapperi)
        model2 = BDF(debug=False, log=log)
        model2.disable_cards(cards_to_skip)
        model2.read_bdf(bdf_dump)

        #model.log.info('model2.node_ids = %s' % np.array(model2.node_ids))
        for data_member in data_members:
            data1 = getattr(model, data_member)
            data2 = getattr(model2, data_member)
            if isinstance(data1, dict):
                #model.log.info('  working on %s' % (data_member))
                for key, value in data2.items():
                    if data_member in 'coords' and key == 0:
                        continue
                    if isinstance(value, list):
                        assert key not in data1, key
                        data1[key] = value

                    else:
                        assert key not in data1, key
                        data1[key] = value
                        #print('   %s' % key)
            else:
                raise NotImplementedError(type(data1))
    #if bdf_filenames_out:
    #model.write_bdf(bdf_filenames_out, size=size)

    mapper_renumber = None
    if renumber:
        model.log.info('final renumber...')

        starting_id_dict = {
            'cid': 1,
            'nid': 1,
            'eid': 1,
            'pid': 1,
            'mid': 1,
        }
        _, mapper_renumber = bdf_renumber(model,
                                          bdf_filename_out,
                                          starting_id_dict=starting_id_dict,
                                          size=size,
                                          is_double=is_double,
                                          cards_to_skip=cards_to_skip)
        bdf_filename_temp = StringIO()
        model.write_bdf(bdf_filename_temp,
                        size=size,
                        is_double=False,
                        interspersed=False,
                        enddata=None,
                        close=False)
        bdf_filename_temp.seek(0)
        model = read_bdf(bdf_filename_temp,
                         validate=False,
                         xref=model._xref,
                         punch=False,
                         log=model.log,
                         debug=True,
                         mode=model._nastran_format)

    elif bdf_filename_out:
        model.write_bdf(out_filename=bdf_filename_out,
                        encoding=None,
                        size=size,
                        is_double=is_double,
                        interspersed=True,
                        enddata=None)

    mappers_final = _assemble_mapper(mappers,
                                     _mapper_0,
                                     data_members,
                                     mapper_renumber=mapper_renumber)
    return model, mappers_final
Example #17
0
def clear_out_solids(bdf_filename,
                     bdf_filename_out=None,
                     equivalence=True,
                     renumber=True,
                     equivalence_tol=0.01):
    """removes solid elements"""
    if bdf_filename_out is None:
        if renumber or equivalence:
            msg = ('bdf_filename_out=%s must be specified if renumber=%s '
                   'or equivalence=%s are True' %
                   (bdf_filename_out, renumber, equivalence))
            raise RuntimeError(msg)

    print('clearing out solids from %s' % bdf_filename)
    model = read_bdf(bdf_filename, xref=False)
    #nodes2    = {nid, node for nid, node in iteritems(model.nodes)}
    #elements2 = {eid, element for eid, element in iteritems(model.elements)
    #if element.type in ['CTRIA3', 'CQUAD4']}

    out_dict = model.get_card_ids_by_card_types(
        card_types=['CTRIA3', 'CQUAD4'])
    save_eids = set(out_dict['CTRIA3'] + out_dict['CQUAD4'])
    all_eids = set(model.element_ids)
    print('all_eids =', all_eids)
    print('save_eids =', save_eids)
    remove_eids = all_eids - save_eids
    print('remove_eids =', remove_eids)

    for eid in remove_eids:
        print('eid =', eid)
        del model.elements[eid]

    # TODO: seems like we could be more efficient...
    #nids = unique(hstack([model.elements[eid].node_ids for eid in save_eids]))

    nids = set([])
    elements2 = {}
    print(model.elements)
    for eid, element in iteritems(model.elements):
        #if element.type not in ['CTRIA3', 'CQUAD4']:
        #continue
        #elements2[eid] = element
        nids.update(element.node_ids)
    nids = list(nids)
    nids.sort()
    #print('nids = ', nids)
    #print('eids = ', eids)
    nodes2 = {nid: node for nid, node in iteritems(model.nodes) if nid in nids}
    properties2 = {
        pid: prop
        for pid, prop in iteritems(model.properties) if prop.type == 'PSHELL'
    }

    model.nodes = nodes2
    #model.elements = elements2
    model.properties = properties2

    # already equivalenced?
    #remove_unassociated_nodes(bdf_filename, bdf_filename_out, renumber=False)

    #bdf_filename_out = 'equivalence.bdf'
    starting_id_dict = {
        'cid': 1,
        'nid': 1,
        'eid': 1,
        'pid': 1,
        'mid': 1,
    }
    if equivalence:
        if renumber:
            bdf_equivalenced_filename = 'equivalence.bdf'
        else:
            bdf_equivalenced_filename = bdf_filename_out

        model.write_bdf('remove_unused_nodes.bdf')
        bdf_equivalence_nodes(model,
                              bdf_equivalenced_filename,
                              equivalence_tol,
                              renumber_nodes=False,
                              neq_max=4,
                              xref=True)
        if renumber:
            bdf_renumber(bdf_equivalenced_filename,
                         bdf_filename_out,
                         size=8,
                         is_double=False,
                         starting_id_dict=starting_id_dict)
    elif renumber:
        bdf_renumber(model,
                     bdf_filename_out,
                     size=8,
                     is_double=False,
                     starting_id_dict=starting_id_dict)

    return model
Example #18
0
def bdf_merge(bdf_filenames,
              bdf_filename_out=None,
              renumber=True,
              encoding=None,
              size=8,
              is_double=False,
              cards_to_skip=None,
              log=None):
    """
    Merges multiple BDF into one file

    Parameters
    ----------
    bdf_filenames : List[str]
        list of bdf filenames
    bdf_filename_out : str / None
        the output bdf filename (default=None; None -> no writing)
    renumber : bool
        should the bdf be renumbered (default=True)
    encoding : str
        the unicode encoding (default=None; system default)
    size : int; {8, 16}; default=8
        the bdf write precision
    is_double : bool; default=False
        the field precision to write
    cards_to_skip : List[str]; (default=None -> don't skip any cards)
        There are edge cases (e.g. FLUTTER analysis) where things can break due to
        uncross-referenced cards.  You need to disable entire classes of cards in
        that case (e.g. all aero cards).

    Supports
    --------
      nodes:      GRID
      coords:     CORDx
      elements:   CQUAD4, CTRIA3, CTETRA, CPENTA, CHEXA, CELASx, CBAR, CBEAM
                  CONM1, CONM2, CMASS
      properties: PSHELL, PCOMP, PSOLID, PMASS
      materials:  MAT1, MAT8

    .. todo:: doesn't support SPOINTs/EPOINTs
    .. warning:: still very preliminary
    """
    if not isinstance(bdf_filenames, (list, tuple)):
        raise TypeError('bdf_filenames is not a list/tuple...%s' %
                        str(bdf_filenames))

    if not len(bdf_filenames) > 1:
        raise RuntimeError("You can't merge one BDF...bdf_filenames=%s" %
                           str(bdf_filenames))
    for bdf_filename in bdf_filenames:
        if not isinstance(bdf_filename, string_types):
            raise TypeError('bdf_filenames is not a string...%s' %
                            bdf_filename)
        #bdf_filenames = [bdf_filenames]

    #starting_id_dict_default = {
    #'cid' : max(model.coords.keys()),
    #'nid' : max(model.nodes.keys()),
    #'eid' : max([
    #max(model.elements.keys()),
    #max(model.masses.keys()),
    #]),
    #'pid' : max([
    #max(model.properties.keys()),
    #max(model.properties_mass.keys()),
    #]),
    #'mid' : max(model.material_ids),
    #}
    model = BDF(debug=False, log=log)
    model.disable_cards(cards_to_skip)
    bdf_filename0 = bdf_filenames[0]
    model.read_bdf(bdf_filename0, encoding=encoding)
    model.log.info('primary=%s' % bdf_filename0)

    data_members = [
        'coords',
        'nodes',
        'elements',
        'masses',
        'properties',
        'properties_mass',
        'materials',
    ]
    for bdf_filename in bdf_filenames[1:]:
        #model.log.info('model.masses = %s' % model.masses)
        starting_id_dict = {
            'cid':
            max(model.coords.keys()) + 1,
            'nid':
            max(model.nodes.keys()) + 1,
            'eid':
            max([
                max(model.elements.keys()),
                0 if len(model.masses) == 0 else max(model.masses.keys()),
            ]) + 1,
            'pid':
            max([
                max(model.properties.keys()),
                0 if len(model.properties_mass) == 0 else max(
                    model.properties_mass.keys()),
            ]) + 1,
            'mid':
            max(model.material_ids) + 1,
        }
        #for param, val in sorted(iteritems(starting_id_dict)):
        #print('  %-3s %s' % (param, val))

        model.log.info('secondary=%s' % bdf_filename)
        model2 = BDF(debug=False)
        model2.disable_cards(cards_to_skip)
        bdf_dump = 'bdf_merge_temp.bdf'
        #model2.read_bdf(bdf_filename, xref=False)

        bdf_renumber(bdf_filename,
                     bdf_dump,
                     starting_id_dict=starting_id_dict,
                     size=size,
                     is_double=is_double,
                     cards_to_skip=cards_to_skip)
        model2 = BDF(debug=False)
        model2.disable_cards(cards_to_skip)
        model2.read_bdf(bdf_dump)
        os.remove(bdf_dump)

        #model.log.info('model2.node_ids = %s' % np.array(model2.node_ids))
        for data_member in data_members:
            data1 = getattr(model, data_member)
            data2 = getattr(model2, data_member)
            if isinstance(data1, dict):
                #model.log.info('  working on %s' % (data_member))
                for key, value in iteritems(data2):
                    if data_member in 'coords' and key == 0:
                        continue
                    if isinstance(value, list):
                        raise NotImplementedError(type(value))
                    else:
                        assert key not in data1, key
                        data1[key] = value
                        #print('   %s' % key)
            else:
                raise NotImplementedError(type(data1))
    #if bdf_filenames_out:
    #model.write_bdf(bdf_filenames_out, size=size)

    if renumber:
        model.log.info('final renumber...')
        starting_id_dict = {
            'cid': 1,
            'nid': 1,
            'eid': 1,
            'pid': 1,
            'mid': 1,
        }
        bdf_renumber(model,
                     bdf_filename_out,
                     starting_id_dict=starting_id_dict,
                     size=size,
                     is_double=is_double,
                     cards_to_skip=cards_to_skip)
    elif bdf_filename_out:
        model.write_bdf(out_filename=bdf_filename_out,
                        encoding=None,
                        size=size,
                        is_double=is_double,
                        interspersed=True,
                        enddata=None)
    return model