Beispiel #1
0
def get_processors(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    procs = set(glb_idx.get(Processor.partial_key()))  # Unique processors
    d = {}
    for p in procs:
        parent_relations = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key(child=p))
        d[p.ident] = set([p.parent_processor.ident for p in parent_relations])

    lst = [[
        "ProcessorGroup", "Processor", "ParentProcessor", "SubsystemType",
        "System", "FunctionalOrStructural", "Accounted", "Stock",
        "Description", "GeolocationRef", "GeolocationCode",
        "GeolocationLatLong", "Attributes"
    ]]
    # Elaborate a DAG, then iterate over it
    for ident in list(toposort.toposort_flatten(d)):
        p = glb_idx.get(Processor.partial_key(ident=ident))[0]
        gref = p.geolocation.reference if p.geolocation else ""
        gcode = p.geolocation.code if p.geolocation else ""
        pgroup = p.attributes.get("processor_group")
        fname = p.full_hierarchy_names(glb_idx)[0]
        t = [
            pgroup if pgroup else "", p.name, "",
            p.attributes.get("subsystem_type", ""),
            p.attributes.get("processor_system", ""),
            p.attributes.get("functional_or_structural", ""),
            p.attributes.get("instance_or_archetype", ""),
            p.attributes.get("stock"), "", gref, gcode, "", ""
        ]
        parent_relations = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key(child=p))
        if len(parent_relations) == 0:
            lst.append(t)
        else:
            first = True
            for rel in parent_relations:
                t[2] = rel.parent_processor.full_hierarchy_names(glb_idx)[0]
                lst.append(t.copy())
                if first:
                    first = False
                    proper = None
                    for n in p.full_hierarchy_names(glb_idx):
                        if t[2] in n:
                            proper = n
                            break
                    t[1] = proper if proper else t[1]

    return list_to_dataframe(lst)
def get_interface_type(attribute, value, prd: PartialRetrievalDictionary = None):
    """
    Obtain the name of an InterfaceType given the value of an attribute
    (Obtain the registry of objects)

    :param attribute:
    :param value:
    :param prd: A PartialRetrievalDictionary, passed in State "_glb_idx" to the AST evaluator by
    :return:
    """

    if not prd:
        raise Exception(f"No Global-Index parameter passed to InterfaceType function")
    else:
        # Obtain ALL InterfaceTypes, then ONE having attribute "attribute" with value <value>
        its = prd.get(FactorType.partial_key())
        ret = None
        for it in its:
            v = vars(it).get(attribute)
            if not v:
                v = it.attributes.get(attribute)
            if v and (strcmp(v, str(value)) or (is_float(value) and float(v) == float(value))):
                ret = it.name
                break
        if ret:
            return ret
        else:
            raise Exception(f"No InterfaceType found having attribute '{attribute}' with value '{value}'")
def get_processor(attribute, value, prd: PartialRetrievalDictionary = None):
    """
    Obtain the name of a Processor given the value of an attribute
    (Obtain the registry of objects)

    :param attribute:
    :param value:
    :param prd: A PartialRetrievalDictionary, passed in State "_glb_idx" to the AST evaluator by
    :return:
    """

    if not prd:
        raise Exception(f"No Global-Index parameter passed to Processor function")
    else:
        # Obtain ALL Processors, then ONE having attribute "attribute" with value <value>
        procs = prd.get(Processor.partial_key())
        ret = None
        for proc in procs:
            v = vars(proc).get(attribute)
            if not v:
                v = proc.attributes.get(attribute)
            if v and (strcmp(v, str(value)) or (is_float(value) and float(v) == float(value))):
                ret = proc.name
                break
        if ret:
            return ret
        else:
            raise Exception(f"No Processor found having attribute '{attribute}' with value '{value}'")
Beispiel #4
0
def get_code_hierarchies(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    def hierarchy_to_list(nodes: List[HierarchyNode]) -> List[HierarchyNode]:
        nodes_list = []
        if nodes:
            for node in sorted(nodes, key=lambda n: n.name):
                nodes_list.append(node)
                nodes_list.extend(hierarchy_to_list(list(node.get_children())))
        return nodes_list

    lst = [[
        "Source", "HierarchyGroup", "Hierarchy", "Level", "ReferredHierarchy",
        "Code", "ParentCode", "Label", "Description", "Expression",
        "GeolocationRef", "GeolocationCode", "Attributes"
    ]]
    for hh in glb_idx.get(Hierarchy.partial_key()):
        if hh.hierarchy_type == Taxon:
            for c in hierarchy_to_list(hh.roots):
                lst.append([
                    "", "", c.hierarchy.name,
                    ifnull(c.level, ""),
                    c.referred_node.hierarchy.name if c.referred_node else "",
                    c.name, c.parent.name if c.parent else "", c.label,
                    c.description, "", "", "", ""
                ])

    return list_to_dataframe(lst)
def deserialize_state(st: str, state_version: int = MODEL_VERSION):
    """
    Deserializes an object previously serialized using "serialize_state"

    It can receive also a "State" modified for the serialization to restore it

    :param state_version: version number of the internal models
    :param st:
    :return:
    """
    def deserialize_dataframe(t):
        if t:
            dtypes = {i[0]: np.dtype(i[1]) for i in json.loads(t[1]).items()}
            df = pd.read_json(t[0], orient="split",
                              dtype=dtypes)  # pd.DataFrame(t[1])
            # df.index.names = t[0]
            return df
        else:
            return pd.DataFrame()  # Return empty pd.Dataframe

    print("  deserialize_state")
    if isinstance(st, bytes):
        st = blosc.decompress(st).decode("utf-8")
    if isinstance(st, str):
        # TODO: use state_version to convert a previous version to the latest one
        #  This means transforming the old json to the latest json
        if state_version == MODEL_VERSION:
            state = deserialize_to_object(st)
        else:
            raise Exception(
                f"The model version {state_version} is not supported. Current version is {MODEL_VERSION}."
            )
    else:
        raise Exception(
            f"Serialized state must be a string: currently is of type {type(st)}"
        )

    # Iterate all namespaces
    for ns in state.list_namespaces():
        glb_idx = state.get("_glb_idx", ns)
        if isinstance(glb_idx, dict):
            glb_idx = PartialRetrievalDictionary().from_pickable(glb_idx)
            state.set("_glb_idx", glb_idx)
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state, ns)
        if isinstance(glb_idx, dict):
            print("glb_idx is DICT, after deserialization!!!")
        # TODO Deserialize DataFrames
        # In datasets
        for ds_name in datasets:
            lst = datasets[ds_name]
            ds = deserialize(lst[:-1])[0]
            ds.data = deserialize_dataframe(lst[-1])
            datasets[ds_name] = ds

    return state
    def _process_row(self, fields: Dict[str, Any], subrow=None) -> None:
        """
        :param fields:
        :param subrow:
        :return:
        """
        # InterfaceType must exist
        try:
            self._get_factor_type_from_field(None, "interface")
        except CommandExecutionError as e:
            self._add_issue(IType.ERROR, str(e))

        # (LCIA) Indicator must exist
        indicator = self._glb_idx.get(
            Indicator.partial_key(fields["lcia_indicator"]))
        if len(indicator) == 1:
            pass
        elif len(indicator) == 0:
            self._add_issue(
                IType.ERROR,
                f"Indicator with name '{fields['lcia_indicator']}' not found" +
                subrow_issue_message(subrow))
            return
        else:
            self._add_issue(
                IType.WARNING,
                f"Indicator with name '{fields['lcia_indicator']}' found {len(indicator)} times"
                + subrow_issue_message(subrow))
            return

        # Store LCIA Methods as a new variable.
        # TODO Use it to prepare a pd.DataFrame previous to calculating Indicators (after solving). Use "to_pickable"
        lcia_methods = self._state.get("_lcia_methods")
        if not lcia_methods:
            lcia_methods = PartialRetrievalDictionary()
            self._state.set("_lcia_methods", lcia_methods)
        _ = dict(m=fields["lcia_method"],
                 d=fields["lcia_indicator"],
                 h=fields["lcia_horizon"],
                 i=fields["interface"])
        lcia_methods.put(
            _, (fields["interface_unit"], fields["lcia_coefficient"]))
Beispiel #7
0
def get_problem_statement(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    ps = glb_idx.get(ProblemStatement.partial_key())
    if len(ps) == 0:
        ps = [ProblemStatement()]

    lst = [["Scenario", "Parameter", "Value", "Description"]]
    for solving_parameter in ps[0].solving_parameters.items():
        lst.append(["", solving_parameter[0], solving_parameter[1], ""])

    for scenario, params in ps[0].scenarios.items():
        for k, v in params.items():
            lst.append([scenario, k, v, ""])

    return list_to_dataframe(lst)
def lcia_method(indicator: str, method: str=None, horizon: str=None,
                state: State=None, lcia_methods: PartialRetrievalDictionary=None):
    """

    :param indicator: Indicator name
    :param method: LCIA method weighting
    :param horizon: Time horizon
    :param state: Current values of processor plus parameters
    :param lcia_methods: Where LCIA data is collected
    :return: A dictionary with the
    """
    if indicator is None or indicator.strip() == "":
        return None

    k = dict(d=indicator)
    if method:
        k["m"] = method
    if horizon:
        k["h"] = horizon
    ms = lcia_methods.get(key=k, key_and_value=True)
    indices = create_dictionary()
    for k, v in ms:
        idx_name = f'{k["d"]}_{k["m"]}_{k["h"]}'
        if idx_name in indices:
            lst = indices[idx_name]
        else:
            lst = []
            indices[idx_name] = lst
        lst.append((k["i"], v[0], float(v[1])))

    ifaces = create_dictionary()
    for t in state.list_namespace_variables():
        if not t[0].startswith("_"):
            p = t[1]  # * ureg(iface_unit)
            ifaces[t[0]] = p

    res = dict()
    for name, lst in indices.items():
        interfaces = []
        weights = []  # From "
        for t in lst:
            if t[0] in ifaces:
                v = ifaces[t[0]]  # TODO .to(t[1])
                interfaces.append(v)
                weights.append(t[2])
        # Calculate the value
        ind = np.sum(np.multiply(interfaces, weights))  # * ureg(indicator_unit)
        res[name] = ind

    return res
Beispiel #9
0
def get_parameters(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    lst = [[
        "Parameter", "Type", "Domain", "Value", "Group", "Description",
        "Attributes"
    ]]
    for p in glb_idx.get(Parameter.partial_key()):
        lst.append([
            p.name, p.type, p._range if p._range else "",
            ifnull(p.default_value, ""),
            ifnull(p.group, ""),
            ifnull(p._description, ""), ""
        ])

    return list_to_dataframe(lst)
Beispiel #10
0
def get_scale_relationships(
        glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    lst = [[
        "OriginProcessors", "OriginInterface", "DestinationProcessors",
        "DestinationInterface", "BackInterface", "RelationType", "Weight",
        "ChangeOfTypeScale", "OriginCardinality", "DestinationCardinality",
        "Attributes"
    ]]
    for rel in glb_idx.get(FactorsRelationScaleObservation.partial_key()):
        lst.append([
            rel.origin.processor.name, rel.origin.name,
            rel.destination.processor.name, rel.destination.name, "", "Scale",
            rel.quantity, "", "", "", ""
        ])

    return list_to_dataframe(lst)
Beispiel #11
0
def get_interface_types(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    lst = [[
        "InterfaceTypeHierarchy", "InterfaceType", "Sphere", "RoegenType",
        "ParentInterfaceType", "Formula", "Description", "Unit",
        "OppositeSubsystemType", "Attributes"
    ]]
    for itype in glb_idx.get(FactorType.partial_key()):
        lst.append([
            itype.hierarchy.name, itype.name, itype.sphere,
            itype.roegen_type.name, itype.parent.name if itype.parent else "",
            "",
            itype.attributes.get("description", ""),
            itype.attributes.get("unit",
                                 ""), itype._opposite_processor_type, ""
        ])
    return list_to_dataframe(lst)
Beispiel #12
0
def get_processor_names_to_processors_dictionary(
        state: PartialRetrievalDictionary):
    """
    Obtain a dictionary with all processor names (a processor may have multiple names) and
    the corresponding Processor object

    :param state:
    :return:
    """
    ps = state.get(Processor.partial_key())
    ps = set(ps)  # Avoid repeating Processor objects
    d = create_dictionary()
    for p in ps:
        for n in p.full_hierarchy_names(state):
            d[n] = p
    return d
Beispiel #13
0
def get_exchange_relationships(
        glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    lst = [[
        "OriginProcessors", "OriginInterface", "DestinationProcessors",
        "DestinationInterface", "BackInterface", "RelationType", "Weight",
        "ChangeOfTypeScale", "OriginCardinality", "DestinationCardinality",
        "Attributes"
    ]]
    for rel in glb_idx.get(
            FactorsRelationDirectedFlowObservation.partial_key()):
        lst.append([
            rel.source_factor.processor.name, rel.source_factor.name,
            rel.target_factor.processor.name, rel.target_factor.name,
            rel.back_factor.name if rel.back_factor else "", ">", rel.weight,
            "", "", "", ""
        ])

    return list_to_dataframe(lst)
 def test_002_serialization_deserialization(self):
     prd = prepare_partial_key_dictionary()
     prd2 = prd.to_pickable()
     s = serialize_from_object(prd2)
     prd2 = deserialize_to_object(s)
     prd = PartialRetrievalDictionary().from_pickable(prd2)
     # Tests
     res = prd.get({"_type": "PartOf"})
     self.assertEqual(len(res), 3)
     res = prd.get({"_type": "PartOf", "_child": "B"})
     self.assertEqual(len(res), 2)
     res = prd.get({"_type": "PartOf", "_parent": "A1"})
     self.assertEqual(len(res), 1)
     res = prd.get({"_type": "PartOf", "_parent": "C"})
     self.assertEqual(len(res), 0)
     res = prd.get({"_typer": "PartOf", "_parent": "C"})
     self.assertEqual(len(res), 0)
Beispiel #15
0
def get_scale_change_map(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    lst = [[
        "OriginHierarchy", "OriginInterfaceType", "DestinationHierarchy",
        "DestinationInterfaceType", "OriginContext", "DestinationContext",
        "Scale", "OriginUnit", "DestinationUnit"
    ]]
    for sc in glb_idx.get(
            FactorTypesRelationUnidirectionalLinearTransformObservation.
            partial_key()):
        lst.append([
            sc.origin.hierarchy.name if sc.origin.hierarchy else "",
            sc.origin.name,
            sc.destination.hierarchy.name if sc.destination.hierarchy else "",
            sc.destination.name,
            sc._origin_context.name if sc._origin_context else "",
            sc._destination_context.name if sc._destination_context else "",
            sc._weight, sc._origin_unit, sc._destination_unit
        ])

    return list_to_dataframe(lst)
Beispiel #16
0
def get_case_study_registry_objects(state, namespace=None):
    """
    Obtain the main entries of the state

    :param state: Input state (modified also)
    :param namespace: State supports several namespaces. This one serves to specify which one. Default=None
    :return: Tuple: (global index, processor sets, hierarchies, datasets, mappings)
    """
    # Index of ALL objects
    glb_idx = state.get("_glb_idx", namespace)
    if not glb_idx:
        glb_idx = PartialRetrievalDictionary()
        state.set("_glb_idx", glb_idx, namespace)

    # ProcessorSet dict (dict of sets)
    p_sets = state.get("_processor_sets", namespace)
    if not p_sets:
        p_sets = create_dictionary()
        state.set("_processor_sets", p_sets, namespace)

    # Hierarchies Dict
    hh = state.get("_hierarchies", namespace)
    if not hh:
        hh = create_dictionary()
        state.set("_hierarchies", hh, namespace)
    # Datasets Dict
    datasets = state.get("_datasets", namespace)
    if not datasets:
        datasets = create_dictionary()
        state.set("_datasets", datasets, namespace)
    # Mappings Dict
    mappings = state.get("_mappings", namespace)
    if not mappings:
        mappings = create_dictionary()
        state.set("_mappings", mappings, namespace)

    return glb_idx, p_sets, hh, datasets, mappings
Beispiel #17
0
def get_interfaces(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    # Used to examine "value" as expression, and find variables that are interface names vs parameter names
    params = create_dictionary(
        data={p.name: None
              for p in glb_idx.get(Parameter.partial_key())})
    s = State()
    procs = glb_idx.get(Processor.partial_key())
    d = {}
    for p in procs:
        parent_relations = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key(child=p))
        d[p.ident] = set([p.parent_processor.ident for p in parent_relations])

    lst = [[
        "Processor", "InterfaceType", "Interface", "Sphere", "RoegenType",
        "Orientation", "OppositeSubsystemType", "GeolocationRef",
        "GeolocationCode", "InterfaceAttributes", "Value", "Unit",
        "RelativeTo", "Uncertainty", "Assessment", "PedigreeMatrix",
        "Pedigree", "Time", "Source", "NumberAttributes", "Comments"
    ]]
    # Elaborate a DAG, then iterate over it
    for ident in list(toposort.toposort_flatten(d)):
        p = glb_idx.get(Processor.partial_key(ident=ident))[0]
        ifaces = glb_idx.get((Factor.partial_key(processor=p)))
        iface_names = create_dictionary(
            data={iface.name: iface
                  for iface in ifaces})
        # Elaborate DAG of Interfaces because of Observations
        d = {}
        for iface in ifaces:
            if iface.ident not in d:
                d[iface.ident] = set()
            for obs in iface.quantitative_observations:
                if obs.relative_factor:
                    d[iface.ident].add(obs.relative_factor.ident)
                # Consider obs.value and non linear dependencies
                if isinstance(obs.value, str):
                    ast = string_to_ast(expression_with_parameters, obs.value)
                    evaluation_issues = []
                    value, unresolved_vars = ast_evaluator(
                        exp=ast,
                        state=s,
                        obj=None,
                        issue_lst=evaluation_issues)
                    for unresolved in unresolved_vars:
                        if unresolved not in params:
                            d[iface.ident].add(iface_names[unresolved].ident)

        for ident2 in list(toposort.toposort_flatten(d)):
            iface = glb_idx.get(Factor.partial_key(ident=ident2))[0]
            lst1 = [
                iface.processor.name, iface.taxon.name, iface.name,
                iface.sphere, iface.roegen_type.name, iface.orientation,
                iface.opposite_processor_type, "", "", ""
            ]
            observations = iface.quantitative_observations
            if len(observations) > 0:
                for obs in observations:
                    lst2 = [
                        obs.value,
                        obs.attributes.get("unit",
                                           ""), obs.relative_factor.name
                        if obs.relative_factor else "",
                        obs.attributes.get("spread", ""),
                        obs.attributes.get("assessment", ""),
                        obs.attributes.get("pedigree_template", ""),
                        obs.attributes.get("pedigree", ""),
                        obs.attributes.get("time", ""),
                        obs.observer.name if obs.observer else "", "",
                        obs.attributes.get("comments", "")
                    ]
                    lst.append(lst1 + lst2)
            else:
                lst.append(lst1 + ["", "", "", "", "", "", "", "", "", ""])

    return list_to_dataframe(lst)
Beispiel #18
0
def export_model_to_xml(
        registry: PartialRetrievalDictionary
) -> Tuple[str, Dict[str, Processor]]:
    """
    Elaborate an XML string containing the nested processors and their attributes.
    Also the interfaces inside processors
    <processors>
      <root_p1 fullname="" level="" system="" subsystem="" functional="true|false">
        <interfaces>
          <i1 type="" sphere="" roegen_type="" orientation="" opposite_processor_type="" />
          ...
        </interfaces>
        <child_p2>
          ...
        </child_p2>
      </root_p1>
      ...
    </processors>

    Example (abstract):

    '/processors//[level="n"]'

    :param registry:
    :return:
    """
    def xml_processor(p: Processor, registry: PartialRetrievalDictionary,
                      p_map: Dict[str, Processor]):
        """
        Return the XML of a processor
        Recursive into children

        :param p:
        :return:
        """
        def xml_interface(iface: Factor):
            """

            :param iface:
            :return:
            """
            s = f'<{iface.name} type="{iface.taxon.name}" sphere="{iface.sphere}" ' \
                f'roegen_type="{iface.roegen_type}" orientation="{iface.orientation}" ' \
                f'opposite_processor_type="{iface.opposite_processor_type}" />'
            if case_sensitive:
                return s
            else:
                return s.lower()

        children = p.children(registry)
        full_name = p.full_hierarchy_names(registry)[0]
        if case_sensitive:
            p_map[full_name] = p
        else:
            p_map[full_name.lower()] = p

        s = f"""
    <{p.name} fullname="{full_name}" level="{p.level}" system="{p.processor_system}" subsystem="{p.subsystem_type}" functional="{"true" if strcmp(p.functional_or_structural, "Functional") else "false"}" >
        <interfaces>
        {chr(10).join([xml_interface(f) for f in p.factors])}
        </interfaces>
        {chr(10).join([xml_processor(c, registry, p_map) for c in children])}    
    </{p.name}>"""
        if case_sensitive:
            return s
        else:
            return s.lower()

    # Part of relationships
    por = registry.get(ProcessorsRelationPartOfObservation.partial_key())

    # Set of all instance processors NOT touched by part-of relationships
    unaffected_procs = set([
        p for p in registry.get(Processor.partial_key())
        if strcmp(p.instance_or_archetype, "Instance")
    ])
    for po in por:
        try:
            unaffected_procs.remove(po.parent_processor)
        except KeyError:
            pass
        try:
            unaffected_procs.remove(po.child_processor)
        except KeyError:
            pass

    # Keep those affecting Instance processors
    por = [
        po for po in por
        if strcmp(po.parent_processor.instance_or_archetype, "Instance")
    ]

    # Get root processors (set of processors not appearing as child_processor)
    parents = set([po.parent_processor for po in por])
    children = set([po.child_processor for po in por])
    roots = parents.difference(children).union(unaffected_procs)
    # leaves = children.difference(parents)
    result = '<processors>'  # <?xml version="1.0" encoding="utf-8"?>\n
    p_map = {}
    for p in roots:
        result += xml_processor(p, registry, p_map)
    result += "\n</processors>"

    return result, p_map
def prepare_partial_key_dictionary():
    prd = PartialRetrievalDictionary()
    oer = Observer("tester")
    p0 = Processor("A1")
    p1 = Processor("A2")
    p2 = Processor("B")
    p3 = Processor("C")
    prd.put({"_type": "Processor", "_name": "A1"}, p0)
    prd.put({"_type": "Processor", "_name": "A2"}, p1)
    prd.put({"_type": "Processor", "_name": "B"}, p2)
    prd.put({"_type": "Processor", "_name": "C"}, p3)
    prd.put({
        "_type": "PartOf",
        "_parent": "A1",
        "_child": "B"
    }, ProcessorsRelationPartOfObservation(p0, p2, oer))
    prd.put({
        "_type": "PartOf",
        "_parent": "A2",
        "_child": "B"
    }, ProcessorsRelationPartOfObservation(p1, p2, oer))
    prd.put({
        "_type": "PartOf",
        "_parent": "B",
        "_child": "C"
    }, ProcessorsRelationPartOfObservation(p2, p3, oer))
    return prd