def prepare_partial_key_dictionary():
    prd = PartialRetrievalDictionary()
    oer = Observer("tester")
    p0 = Processor("A1")
    p1 = Processor("A2")
    p2 = Processor("B")
    p3 = Processor("C")
    prd.put({"_type": "Processor", "_name": "A1"}, p0)
    prd.put({"_type": "Processor", "_name": "A2"}, p1)
    prd.put({"_type": "Processor", "_name": "B"}, p2)
    prd.put({"_type": "Processor", "_name": "C"}, p3)
    prd.put({
        "_type": "PartOf",
        "_parent": "A1",
        "_child": "B"
    }, ProcessorsRelationPartOfObservation(p0, p2, oer))
    prd.put({
        "_type": "PartOf",
        "_parent": "A2",
        "_child": "B"
    }, ProcessorsRelationPartOfObservation(p1, p2, oer))
    prd.put({
        "_type": "PartOf",
        "_parent": "B",
        "_child": "C"
    }, ProcessorsRelationPartOfObservation(p2, p3, oer))
    return prd
Example #2
0
def deserialize_state(st: str, state_version: int = MODEL_VERSION):
    """
    Deserializes an object previously serialized using "serialize_state"

    It can receive also a "State" modified for the serialization to restore it

    :param state_version: version number of the internal models
    :param st:
    :return:
    """
    def deserialize_dataframe(t):
        if t:
            dtypes = {i[0]: np.dtype(i[1]) for i in json.loads(t[1]).items()}
            df = pd.read_json(t[0], orient="split",
                              dtype=dtypes)  # pd.DataFrame(t[1])
            # df.index.names = t[0]
            return df
        else:
            return pd.DataFrame()  # Return empty pd.Dataframe

    print("  deserialize_state")
    if isinstance(st, bytes):
        st = blosc.decompress(st).decode("utf-8")
    if isinstance(st, str):
        # TODO: use state_version to convert a previous version to the latest one
        #  This means transforming the old json to the latest json
        if state_version == MODEL_VERSION:
            state = deserialize_to_object(st)
        else:
            raise Exception(
                f"The model version {state_version} is not supported. Current version is {MODEL_VERSION}."
            )
    else:
        raise Exception(
            f"Serialized state must be a string: currently is of type {type(st)}"
        )

    # Iterate all namespaces
    for ns in state.list_namespaces():
        glb_idx = state.get("_glb_idx", ns)
        if isinstance(glb_idx, dict):
            glb_idx = PartialRetrievalDictionary().from_pickable(glb_idx)
            state.set("_glb_idx", glb_idx)
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state, ns)
        if isinstance(glb_idx, dict):
            print("glb_idx is DICT, after deserialization!!!")
        # TODO Deserialize DataFrames
        # In datasets
        for ds_name in datasets:
            lst = datasets[ds_name]
            ds = deserialize(lst[:-1])[0]
            ds.data = deserialize_dataframe(lst[-1])
            datasets[ds_name] = ds

    return state
 def test_002_serialization_deserialization(self):
     prd = prepare_partial_key_dictionary()
     prd2 = prd.to_pickable()
     s = serialize_from_object(prd2)
     prd2 = deserialize_to_object(s)
     prd = PartialRetrievalDictionary().from_pickable(prd2)
     # Tests
     res = prd.get({"_type": "PartOf"})
     self.assertEqual(len(res), 3)
     res = prd.get({"_type": "PartOf", "_child": "B"})
     self.assertEqual(len(res), 2)
     res = prd.get({"_type": "PartOf", "_parent": "A1"})
     self.assertEqual(len(res), 1)
     res = prd.get({"_type": "PartOf", "_parent": "C"})
     self.assertEqual(len(res), 0)
     res = prd.get({"_typer": "PartOf", "_parent": "C"})
     self.assertEqual(len(res), 0)
    def _process_row(self, fields: Dict[str, Any], subrow=None) -> None:
        """
        :param fields:
        :param subrow:
        :return:
        """
        # InterfaceType must exist
        try:
            self._get_factor_type_from_field(None, "interface")
        except CommandExecutionError as e:
            self._add_issue(IType.ERROR, str(e))

        # (LCIA) Indicator must exist
        indicator = self._glb_idx.get(
            Indicator.partial_key(fields["lcia_indicator"]))
        if len(indicator) == 1:
            pass
        elif len(indicator) == 0:
            self._add_issue(
                IType.ERROR,
                f"Indicator with name '{fields['lcia_indicator']}' not found" +
                subrow_issue_message(subrow))
            return
        else:
            self._add_issue(
                IType.WARNING,
                f"Indicator with name '{fields['lcia_indicator']}' found {len(indicator)} times"
                + subrow_issue_message(subrow))
            return

        # Store LCIA Methods as a new variable.
        # TODO Use it to prepare a pd.DataFrame previous to calculating Indicators (after solving). Use "to_pickable"
        lcia_methods = self._state.get("_lcia_methods")
        if not lcia_methods:
            lcia_methods = PartialRetrievalDictionary()
            self._state.set("_lcia_methods", lcia_methods)
        _ = dict(m=fields["lcia_method"],
                 d=fields["lcia_indicator"],
                 h=fields["lcia_horizon"],
                 i=fields["interface"])
        lcia_methods.put(
            _, (fields["interface_unit"], fields["lcia_coefficient"]))
Example #5
0
def get_case_study_registry_objects(state, namespace=None):
    """
    Obtain the main entries of the state

    :param state: Input state (modified also)
    :param namespace: State supports several namespaces. This one serves to specify which one. Default=None
    :return: Tuple: (global index, processor sets, hierarchies, datasets, mappings)
    """
    # Index of ALL objects
    glb_idx = state.get("_glb_idx", namespace)
    if not glb_idx:
        glb_idx = PartialRetrievalDictionary()
        state.set("_glb_idx", glb_idx, namespace)

    # ProcessorSet dict (dict of sets)
    p_sets = state.get("_processor_sets", namespace)
    if not p_sets:
        p_sets = create_dictionary()
        state.set("_processor_sets", p_sets, namespace)

    # Hierarchies Dict
    hh = state.get("_hierarchies", namespace)
    if not hh:
        hh = create_dictionary()
        state.set("_hierarchies", hh, namespace)
    # Datasets Dict
    datasets = state.get("_datasets", namespace)
    if not datasets:
        datasets = create_dictionary()
        state.set("_datasets", datasets, namespace)
    # Mappings Dict
    mappings = state.get("_mappings", namespace)
    if not mappings:
        mappings = create_dictionary()
        state.set("_mappings", mappings, namespace)

    return glb_idx, p_sets, hh, datasets, mappings