예제 #1
0
 def _assign_loaded_columns(self):
     for ss in range(self.n_storeys):
         for bb in range(self.n_bays):
             sect_is = self._loaded_column_section_ids[ss][bb]
             if hasattr(sect_is, "__len__"):
                 n_sections = len(sect_is)
                 self.columns[ss][bb].split_into_multiple(
                     [1] * n_sections)  # TODO: should be lengths
                 for sect_i in range(len(sect_is)):
                     column_sect_id = str(
                         self._loaded_column_section_ids[ss][bb][sect_i])
                     sect_dictionary = self._loaded_column_sections[
                         column_sect_id]
                     sf.add_to_obj(self.columns[ss][bb].sections[sect_i],
                                   sect_dictionary)
             else:  # deprecated loading
                 deprecation(
                     "Frame data structure is out-of-date, "
                     "run sfsimodels.migrate_ecp(<file-path>, <out-file-path>)."
                 )
                 column_sect_id = str(
                     self._loaded_column_section_ids[ss][bb])
                 sect_dictionary = self._loaded_column_sections[
                     column_sect_id]
                 sf.add_to_obj(self.columns[ss][bb].sections[0],
                               sect_dictionary)
예제 #2
0
def load_last_objects(objs, load_later, ll_type, now_loaded):
    # if ll_type not in load_later:
    ll_objs = load_later[ll_type]
    for obj_pms in ll_objs:
        for pre_req in obj_pms[0].loading_pre_reqs:
            if pre_req in load_later and pre_req not in now_loaded:
                load_last_objects(
                    objs, load_later, pre_req, now_loaded
                )  # could get into infinite loop if prereqs dependent on each other
        add_to_obj(obj_pms[0], obj_pms[1], objs=objs, verbose=obj_pms[2])
        objs[ll_type][int(obj_pms[1]["id"])] = obj_pms[0]
        now_loaded.append(ll_type)
예제 #3
0
 def _assign_loaded_walls(
         self):  # This is now deprecated in favour of element based loading
     for ss in range(self.n_storeys):
         sect_is = self._loaded_wall_section_ids[ss]
         if hasattr(sect_is, "__len__"):
             n_sections = len(sect_is)
             self.elements[ss].split_into_multiple(
                 [1] * n_sections)  # TODO: should be lengths
             for sect_i in range(len(sect_is)):
                 wall_sect_id = str(
                     self._loaded_wall_section_ids[ss][sect_i])
                 sect_dictionary = self._loaded_wall_sections[wall_sect_id]
                 sf.add_to_obj(self.elements[ss].sections[sect_i],
                               sect_dictionary)
         else:  # deprecated loading
             deprecation(
                 "Frame data structure is out-of-date, please load and save the file to update."
             )
             wall_sect_id = str(self._loaded_wall_section_ids[ss])
             sect_dictionary = self._loaded_wall_sections[wall_sect_id]
             sf.add_to_obj(self.elements[ss].sections[0], sect_dictionary)
예제 #4
0
def ecp_dict_to_objects(ecp_dict,
                        custom_map=None,
                        default_to_base=False,
                        verbose=0):
    """
    Given an ecp dictionary, build a dictionary of sfsi objects

    :param ecp_dict: dict, engineering consistency project dictionary
    :param custom: dict, used to load custom objects, {model type: custom object}
    :param verbose: int, console output
    :return: dict
    """
    if custom_map is None:
        custom_map = {}

    obj_map = get_std_obj_map()
    # merge and overwrite the object map with custom maps
    # for item in custom_map:
    #     obj_map[item] = custom_map[item]
    obj_map = {**obj_map, **custom_map}

    data_models = ecp_dict["models"]

    exception_list = []
    objs = OrderedDict()
    collected = set([])
    # Set base type properly
    mtypes = list(data_models)
    for mtype in mtypes:
        base_type = mtype
        if base_type[:
                     -1] in standard_types:  # support the loading of old plural based ecp files
            base_type = base_type[:-1]
            data_models[base_type] = data_models[mtype]
            del data_models[mtype]
        for m_id in data_models[base_type]:
            data_models[base_type][m_id]["base_type"] = base_type
    load_later = {}
    for mtype in data_models:
        base_type = mtype
        if base_type in exception_list:
            continue
        collected.add(base_type)
        objs[base_type] = OrderedDict()
        for m_id in data_models[mtype]:
            obj = data_models[mtype][m_id]
            if "type" not in obj:
                obj["type"] = base_type
            try:
                obj_class = obj_map["%s-%s" % (base_type, obj["type"])]
            except KeyError:
                if default_to_base and f'{base_type}-{base_type}' in obj_map:
                    obj_class = obj_map[f'{base_type}-{base_type}']
                elif obj["type"] in deprecated_types:
                    try:
                        obj_class = obj_map["%s-%s" %
                                            (base_type,
                                             deprecated_types[obj["type"]])]
                    except KeyError:
                        raise KeyError(
                            "Map for Model: '%s' index: '%s' and type: '%s' not available, "
                            "add '%s-%s' to custom dict" %
                            (base_type, m_id, base_type, base_type,
                             obj["type"]))
                else:
                    raise KeyError(
                        "Map for Model: '%s' index: '%s' and type: '%s' not available, "
                        "add '%s-%s' to custom dict" %
                        (base_type, m_id, base_type, base_type, obj["type"]))
            # try:
            args, kwargs, missing = get_matching_args_and_kwargs(
                data_models[mtype][m_id], obj_class)
            if len(missing):
                for m_item in missing:
                    name = m_item[0]
                    m_indy = m_item[1]
                    if name == 'n_storeys':
                        args[m_indy] = len(
                            data_models[mtype][m_id]["storey_masses"])
                    elif name == 'n_bays':
                        args[m_indy] = len(
                            data_models[mtype][m_id]["bay_lengths"])
            new_instance = obj_class(*args, **kwargs)
            # add_to_obj(new_instance, data_models[mtype][m_id], objs=objs, verbose=verbose)
            try:
                add_to_obj(new_instance,
                           data_models[mtype][m_id],
                           objs=objs,
                           verbose=verbose)
            except KeyError as e:
                if hasattr(new_instance, 'loading_pre_reqs'):
                    if new_instance.base_type not in load_later:
                        load_later[new_instance.base_type] = []
                    load_later[new_instance.base_type].append(
                        [new_instance, data_models[mtype][m_id], verbose])
                    continue
                else:
                    raise KeyError(e)
            # print(mtype, m_id)
            objs[base_type][int(data_models[mtype][m_id]["id"])] = new_instance
    ll_types = list(load_later)
    now_loaded = []
    for ll_type in ll_types:
        if ll_type not in now_loaded:
            load_last_objects(objs, load_later, ll_type, now_loaded)

    # Deal with all the exceptions
    # for mtype in data_models:
    #     base_type = mtype
    #
    #     if base_type in collected:
    #         continue
    #     if base_type not in objs:
    #         objs[base_type] = OrderedDict()

    all_bts = list(objs)
    for base_type in all_bts:  # Support for old style ecp file
        if base_type in standard_types:
            objs[base_type + "s"] = objs[base_type]
    return objs