def get_scaled(scenarios, scenario_params, relations_scale, observations_by_time): # Obtain a i2i Scales Graph graph = create_scales_graph(relations_scale) # Compute the scales for the different scenarios and time periods, and store the results in # another partial retrieval dictionary scale_beginning_interfaces = get_scale_beginning_interfaces(graph) scales_prd = PartialRetrievalDictionary() for scenario_idx, scenario_name in enumerate(scenarios): for time_period, observations in observations_by_time.items(): # Filter, and prepare dictionary for the update of the scaling beginning_values: Dict[Factor, Tuple[Any, FactorQuantitativeObservation]] = \ {obs.factor: (value, obs) for value, obs in observations if obs.factor in scale_beginning_interfaces} # Evaluate expressions set_update_scales_graph(graph, scenario_params[scenario_name], beginning_values) # Write data to the PartialRetrieveDictionary for interface, data in graph.nodes( data=True): # type: Factor, Dict key = dict(__i=interface, __t=time_period, __s=scenario_idx) scales_prd.put( key, (interface, data["value"])) # ERROR: Observation not hashable return scales_prd
def prepare_partial_key_dictionary(): prd = PartialRetrievalDictionary() oer = Observer("tester") p0 = Processor("A1") p1 = Processor("A2") p2 = Processor("B") p3 = Processor("C") prd.put({"_type": "Processor", "_name": "A1"}, p0) prd.put({"_type": "Processor", "_name": "A2"}, p1) prd.put({"_type": "Processor", "_name": "B"}, p2) prd.put({"_type": "Processor", "_name": "C"}, p3) prd.put({"_type": "PartOf", "_parent": "A1", "_child": "B"}, ProcessorsRelationPartOfObservation(p0, p2, oer)) prd.put({"_type": "PartOf", "_parent": "A2", "_child": "B"}, ProcessorsRelationPartOfObservation(p1, p2, oer)) prd.put({"_type": "PartOf", "_parent": "B", "_child": "C"}, ProcessorsRelationPartOfObservation(p2, p3, oer)) return prd
def deserialize_state(st: str, state_version: int = MODEL_VERSION): """ Deserializes an object previously serialized using "serialize_state" It can receive also a "State" modified for the serialization to restore it :param state_version: version number of the internal models :param st: :return: """ def deserialize_dataframe(t): df = pd.read_json(t, orient="split") # pd.DataFrame(t[1]) # df.index.names = t[0] return df print(" deserialize_state") if isinstance(st, str): # TODO: use state_version to convert a previous version to the latest one # This means transforming the old json to the latest json if state_version == MODEL_VERSION: state = deserialize_to_object(st) else: raise Exception( f"The model version {state_version} is not supported. Current version is {MODEL_VERSION}." ) else: raise Exception( f"Serialized state must be a string: currently is of type {type(st)}" ) # Iterate all namespaces for ns in state.list_namespaces(): glb_idx = state.get("_glb_idx", ns) if isinstance(glb_idx, dict): glb_idx = PartialRetrievalDictionary().from_pickable(glb_idx) state.set("_glb_idx", glb_idx) glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( state, ns) if isinstance(glb_idx, dict): print("glb_idx is DICT, after deserialization!!!") # TODO Deserialize DataFrames # In datasets for ds_name in datasets: lst = datasets[ds_name] ds = deserialize(lst[:-1])[0] ds.data = deserialize_dataframe(lst[-1]) datasets[ds_name] = ds return state
def get_observations_OLD(prd: PartialRetrievalDictionary) \ -> Tuple[PartialRetrievalDictionary, PartialRetrievalDictionary, Dict[str, int]]: """ Process All QQ observations (intensive or extensive): * Store in a compact way (then clear), by Time-period, by Interface, by Observer. * Convert to float or prepare AST * Store as value the result plus the QQ observation (in a tuple) :param prd: :param relative: True->a QQ observation relative to the value of another interface :return: another PartialRetrievalDictionary, the Observers and the Time Periods (indexed) """ observations_prd = PartialRetrievalDictionary() relative_observations_prd = PartialRetrievalDictionary() time_periods: Dict[str, int] = create_dictionary( ) # Dictionary of time periods and the associated IDX state = State() next_time_period_idx = 0 for observation in find_quantitative_observations( prd, processor_instances_only=True): # Obtain time period index time = observation.attributes["time"] if time not in time_periods: time_periods[time] = next_time_period_idx next_time_period_idx += 1 # Elaborate Key: Interface, Time, Observer key = dict(__i=observation.factor, __t=time_periods[time], __o=observation.observer) value, ast, _, issues = evaluate_numeric_expression_with_parameters( observation.value, state) if not value: value = ast # Store Key: (Value, FactorQuantitativeObservation) if observation.is_relative: relative_observations_prd.put(key, (value, observation)) else: observations_prd.put(key, (value, observation)) return observations_prd, relative_observations_prd, time_periods
def get_processor_names_to_processors_dictionary( state: PartialRetrievalDictionary): """ Obtain a dictionary with all processor names (a processor may have multiple names) and the corresponding Processor object :param state: :return: """ ps = state.get(Processor.partial_key()) ps = set(ps) # Avoid repeating Processor objects d = create_dictionary() for p in ps: for n in p.full_hierarchy_names(state): d[n] = p return d
def test_002_serialization_deserialization(self): prd = prepare_partial_key_dictionary() prd2 = prd.to_pickable() s = serialize_from_object(prd2) prd2 = deserialize_to_object(s) prd = PartialRetrievalDictionary().from_pickable(prd2) # Tests res = prd.get({"_type": "PartOf"}) self.assertEqual(len(res), 3) res = prd.get({"_type": "PartOf", "_child": "B"}) self.assertEqual(len(res), 2) res = prd.get({"_type": "PartOf", "_parent": "A1"}) self.assertEqual(len(res), 1) res = prd.get({"_type": "PartOf", "_parent": "C"}) self.assertEqual(len(res), 0) res = prd.get({"_typer": "PartOf", "_parent": "C"}) self.assertEqual(len(res), 0)
def get_case_study_registry_objects(state, namespace=None): """ Obtain the main entries of the state :param state: Input state (modified also) :param namespace: State supports several namespaces. This one serves to specify which one. Default=None :return: Tuple: (global index, processor sets, hierarchies, datasets, mappings) """ # Index of ALL objects glb_idx = state.get("_glb_idx", namespace) if not glb_idx: glb_idx = PartialRetrievalDictionary() state.set("_glb_idx", glb_idx, namespace) # ProcessorSet dict (dict of sets) p_sets = state.get("_processor_sets", namespace) if not p_sets: p_sets = create_dictionary() state.set("_processor_sets", p_sets, namespace) # Hierarchies Dict hh = state.get("_hierarchies", namespace) if not hh: hh = create_dictionary() state.set("_hierarchies", hh, namespace) # Datasets Dict datasets = state.get("_datasets", namespace) if not datasets: datasets = create_dictionary() state.set("_datasets", datasets, namespace) # Mappings Dict mappings = state.get("_mappings", namespace) if not mappings: mappings = create_dictionary() state.set("_mappings", mappings, namespace) return glb_idx, p_sets, hh, datasets, mappings