Ejemplo n.º 1
0
def serialize_state(state: State):
    """
    Serialization prepared for a given organization of the state

    :return:
    """
    def serialize_dataframe(df):
        return df.to_json(orient="split")  # list(df.index.names), df.to_dict()

    print("  serialize_state IN")

    import copy
    # "_datasets"
    ns_ds = {}
    # Save and nullify before deep copy
    for ns in state.list_namespaces():
        _, _, _, datasets, _ = get_case_study_registry_objects(state, ns)
        ns_ds[ns] = datasets
        state.set("_datasets", create_dictionary(), ns)  # Nullify datasets

    # !!! WARNING: It destroys "state", so a DEEP COPY is performed !!!
    tmp = sys.getrecursionlimit()
    sys.setrecursionlimit(10000)
    state2 = copy.deepcopy(state)
    sys.setrecursionlimit(tmp)

    # Iterate all namespaces
    for ns in state2.list_namespaces():
        glb_idx, p_sets, hh, _, mappings = get_case_study_registry_objects(
            state2, ns)
        if glb_idx:
            tmp = glb_idx.to_pickable()
            state2.set("_glb_idx", tmp, ns)
        datasets = ns_ds[ns]
        # TODO Serialize other DataFrames.
        # Process Datasets
        for ds_name in datasets:
            ds = datasets[ds_name]
            if isinstance(ds.data, pd.DataFrame):
                tmp = serialize_dataframe(ds.data)
            else:
                tmp = None
                # ds.data = None
            # DB serialize the datasets
            lst2 = serialize(ds.get_objects_list())
            lst2.append(tmp)  # Append the serialized DataFrame
            datasets[ds_name] = lst2
        state2.set("_datasets", datasets, ns)
    tmp = serialize_from_object(
        state2)  # <<<<<<<< SLOWEST !!!! (when debugging)
    print("  serialize_state length: " + str(len(tmp)) + " OUT")

    return tmp
Ejemplo n.º 2
0
 def test_025(self):
     file_path = os.path.dirname(os.path.abspath(
         __file__)) + "/../../nis-undisclosed-tests/utest1.xlsx"
     isess, issues = execute_file_return_issues(
         file_path, generator_type="spreadsheet")
     # Check State of things
     self.assertEqual(len(issues), 1)  # Just one issue
     glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
         isess.state)
     p = glb_idx.get(Processor.partial_key("Society"))
     self.assertEqual(len(p), 1)
     p = p[0]
     self.assertEqual(len(p.factors), 4)
     f = glb_idx.get(Factor.partial_key(p, None, name="Bioethanol"))
     self.assertEqual(len(f), 1)
     self.assertEqual(len(f[0].observations), 4)
     # TODO These Observations are not registered, uncomment in case they are
     # obs = glb_idx.get(FactorQuantitativeObservation.partial_key(f[0]))
     #self.assertEqual(len(obs), 2)
     f = glb_idx.get(Factor.partial_key(p, None, name="BioethIn2"))
     self.assertEqual(len(f), 1)
     self.assertEqual(len(f[0].observations), 0)
     # TODO Check things!!!
     # self.assertEqual(len(p_sets), 3)
     # Close interactive session
     isess.close_db_session()
Ejemplo n.º 3
0
    def execute(self, state: "State"):
        """
        Create the PedigreeMatrix object to which QQ observation may refer

        """
        some_error = False
        issues = []

        glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

        # Prepare PedigreeMatrix object instance
        phases = self._content["phases"]
        codes = self._content["codes"]
        name = self._content["name"]

        pm = PedigreeMatrix(name)
        pm.set_phases(codes, phases)

        # Insert the PedigreeMatrix object into the state
        glb_idx.put(pm.key(), pm)
        import jsonpickle
        s = jsonpickle.encode(pm)
        pm2 = jsonpickle.decode(s)

        return None, None
Ejemplo n.º 4
0
    def execute(self, state: "State"):
        issues = []
        sheet_name = self._content["command_name"]
        # Obtain global variables in state
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)

        for r, param in enumerate(self._content["items"]):
            name = param["name"]
            p = glb_idx.get(Parameter.partial_key(name))
            if len(p) > 0:
                issues.append(
                    Issue(itype=2,
                          description="The parameter '" + name +
                          "' has been declared previously. Skipped.",
                          location=IssueLocation(sheet_name=sheet_name,
                                                 row=r,
                                                 column=None)))
                continue
            p = Parameter(name)
            if "value" in param:
                p._default_value = p._current_value = param["value"]
            if "type" in param:
                p._type = param["type"]
            if "range" in param:
                p._range = param["range"]
            if "description" in param:
                p._description = param["description"]
            if "group" in param:
                p._group = None
            glb_idx.put(p.key(), p)
        return issues, None
Ejemplo n.º 5
0
    def execute(self, state: "State"):
        """
        Process each of the references, simply storing them as Reference objects
        """
        glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

        issues = []

        # Receive a list of validated references
        # Store them as independent objects
        for ref in self._content["references"]:
            if "ref_id" not in ref:
                issues.append((3, "'ref_id' field not found: " + str(ref)))
            else:
                ref_id = ref["ref_id"]
                ref_type = ref["type"]
                existing = glb_idx.get(Reference.partial_key(ref_id, ref_type))
                if len(existing) == 1:
                    issues.append((2, "Overwriting reference '" + ref_id +
                                   "' of type '" + ref_type + "'"))
                elif len(existing) > 1:
                    issues.append(
                        (3, "The reference '" + ref_id + "' of type '" +
                         ref_type + "' is defined more than one time (" +
                         str(len(existing)) + ")"))

                reference = Reference(ref_id, ref_type, ref)
                glb_idx.put(reference.key(), reference)

        return issues, None
Ejemplo n.º 6
0
    def execute(self, state: "State"):
        """
        Process each of the references, simply storing them as Reference objects
        """
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)
        name = self._content["command_name"]
        issues = []

        # Receive a list of validated references
        # Store them as objects, which can be referred to later
        for ref in self._content["items"]:
            r = ref["_row"]

            if "ref_id" not in ref:
                issues.append(
                    Issue(itype=3,
                          description="'ref_id' field not found: " + str(ref),
                          location=IssueLocation(sheet_name=name,
                                                 row=r,
                                                 column=None)))
                continue
            else:
                ref_id = ref["ref_id"]
                existing = glb_idx.get(self.ref_type.partial_key(ref_id))
                if len(existing) == 1:
                    issues.append(
                        Issue(itype=3,
                              description="Reference '" + ref_id +
                              "' of type '" + str(self.ref_type) +
                              "' is already defined. Not allowed",
                              location=IssueLocation(sheet_name=name,
                                                     row=r,
                                                     column=None)))
                    continue
                elif len(existing) > 1:  # This condition should not occur...
                    issues.append(
                        Issue(itype=3,
                              description="The reference '" + ref_id +
                              "' of type '" + str(self.ref_type) +
                              "' is defined more than one time (" +
                              str(len(existing)) + ")",
                              location=IssueLocation(sheet_name=name,
                                                     row=r,
                                                     column=None)))
                    continue

                # Create and store the Reference
                reference = self.ref_type(ref_id, ref)
                glb_idx.put(reference.key(), reference)

                # BibliographicReference and ProvenanceReference ar also Observer
                if isinstance(reference, Observer):
                    glb_idx.put(Observer.key(reference), reference)

        return issues, None
Ejemplo n.º 7
0
 def test_024_maddalena_dataset(self):
     file_path = os.path.dirname(os.path.abspath(
         __file__)) + "/z_input_files/v2/MAGIC_n_1_CC_Spain.xlsx"
     isess = execute_file(file_path, generator_type="spreadsheet")
     # Check State of things
     glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
         isess.state)
     # TODO Check things!!!
     # self.assertEqual(len(p_sets), 3)
     # Close interactive session
     isess.close_db_session()
    def execute(self, state: State) -> IssuesOutputPairType:
        self._issues = []

        self._glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

        for row in self._content["items"]:
            try:
                self._process_row(row)
            except CommandExecutionError as e:
                self._add_issue(IType.error(), str(e))

        return self._issues, None
Ejemplo n.º 9
0
 def test_022_processor_scalings(self):
     file_path = os.path.dirname(os.path.abspath(
         __file__)) + "/z_input_files/v2/14_processor_scalings_example.xlsx"
     isess = execute_file(file_path, generator_type="spreadsheet")
     json_string = export_model_to_json(isess.state)
     print(json_string)
     # Check State of things
     glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
         isess.state)
     # TODO Check things!!!
     # self.assertEqual(len(p_sets), 3)
     # Close interactive session
     isess.close_db_session()
Ejemplo n.º 10
0
    def test_023_solving(self):
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/06_upscale_almeria_NEW.xlsx"

        isess = execute_file(file_path, generator_type="spreadsheet")

        issues = prepare_and_solve_model(isess.state)

        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)

        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 11
0
    def execute(self, state: "State"):
        any_error = False
        issues = []
        sheet_name = self._content["command_name"]
        # Obtain global variables in state
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)

        scenarios = create_dictionary()
        solver_parameters = create_dictionary()

        for r, param in enumerate(self._content["items"]):
            parameter = param["parameter"]
            scenario = param["scenario_name"]
            p = glb_idx.get(Parameter.partial_key(parameter))
            if scenario:
                if len(p) == 0:
                    issues.append(
                        Issue(itype=3,
                              description="The parameter '" + parameter +
                              "' has not been declared previously.",
                              location=IssueLocation(sheet_name=sheet_name,
                                                     row=r,
                                                     column=None)))
                    any_error = True
                    continue
                p = p[0]
                name = p.name
            else:
                name = parameter
            value = param["parameter_value"]
            description = param.get(
                "description",
                None)  # For readability of the workbook. Not used for solving
            if scenario:
                if scenario in scenarios:
                    sp = scenarios[scenario]
                else:
                    sp = create_dictionary()
                    scenarios[scenario] = sp
                sp[name] = value
            else:
                solver_parameters[name] = value

        if not any_error:
            ps = ProblemStatement(solver_parameters, scenarios)
            glb_idx.put(ps.key(), ps)

        return issues, None
Ejemplo n.º 12
0
def deserialize_state(st: str, state_version: int = MODEL_VERSION):
    """
    Deserializes an object previously serialized using "serialize_state"

    It can receive also a "State" modified for the serialization to restore it

    :param state_version: version number of the internal models
    :param st:
    :return:
    """
    def deserialize_dataframe(t):
        df = pd.read_json(t, orient="split")  # pd.DataFrame(t[1])
        # df.index.names = t[0]
        return df

    print("  deserialize_state")
    if isinstance(st, str):
        # TODO: use state_version to convert a previous version to the latest one
        #  This means transforming the old json to the latest json
        if state_version == MODEL_VERSION:
            state = deserialize_to_object(st)
        else:
            raise Exception(
                f"The model version {state_version} is not supported. Current version is {MODEL_VERSION}."
            )
    else:
        raise Exception(
            f"Serialized state must be a string: currently is of type {type(st)}"
        )

    # Iterate all namespaces
    for ns in state.list_namespaces():
        glb_idx = state.get("_glb_idx", ns)
        if isinstance(glb_idx, dict):
            glb_idx = PartialRetrievalDictionary().from_pickable(glb_idx)
            state.set("_glb_idx", glb_idx)
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state, ns)
        if isinstance(glb_idx, dict):
            print("glb_idx is DICT, after deserialization!!!")
        # TODO Deserialize DataFrames
        # In datasets
        for ds_name in datasets:
            lst = datasets[ds_name]
            ds = deserialize(lst[:-1])[0]
            ds.data = deserialize_dataframe(lst[-1])
            datasets[ds_name] = ds

    return state
Ejemplo n.º 13
0
    def execute(self, state: "State"):
        """
        Create a set of linear scale conversions, from factor type to factor type
        """
        some_error = False
        issues = []

        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)

        origin_factor_types = self._content["origin_factor_types"]
        destination_factor_types = self._content["destination_factor_types"]
        scales = self._content["scales"]

        # Check that we have valid factor type names
        fts = create_dictionary()
        for ft_name in origin_factor_types + destination_factor_types:
            # Obtain (maybe Create) the mentioned Factor Types
            p, ft, f = find_or_create_observable(
                glb_idx,
                ft_name,
                Observer.no_observer_specified,
                None,
                proc_external=None,
                proc_attributes=None,
                proc_location=None,
                fact_roegen_type=None,
                fact_attributes=None,
                fact_incoming=None,
                fact_external=None,
                fact_location=None)
            if not ft:
                some_error = True
                issues.append((3, "Could not obtain/create the Factor Type '" +
                               ft_name + "'"))
            fts[ft_name] = ft

        if some_error:
            return issues, None

        for sc in scales:
            origin = fts[sc["origin"]]
            destination = fts[sc["destination"]]
            scale = sc["scale"]
            FactorTypesRelationUnidirectionalLinearTransformObservation.create_and_append(
                origin, destination, scale, Observer.no_observer_specified)

        return None, None
Ejemplo n.º 14
0
    def test_013_execute_file_v2_seven(self):
        """
        Parsing of Custom datasets

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/07_custom_datasets.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 15
0
    def test_019_import_commands(self):
        """
        Testing import commands

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/12_import_commands_example.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 16
0
    def test_009_execute_file_v2_three(self):
        """
        Soslaires, without parameters
        With regard to the two previous, introduces the syntax of a Selector of many Processors

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/03_Soslaires_no_parameters.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 17
0
 def test_001_execute_file_one(self):
     """
     A file containing QQs for three different sets of processor: Crop, Farm, AgrarianRegion
     (extracted from Almeria case study)
     Test number of processors read for each category, using processor sets and PartialRetrievalDictionary
     :return:
     """
     file_path = os.path.dirname(os.path.abspath(
         __file__)) + "/z_input_files/test_spreadsheet_1.xlsx"
     isess = execute_file(file_path, generator_type="spreadsheet")
     # Check State of things
     glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
         isess.state)
     # Three processor sets
     self.assertEqual(len(p_sets), 3)
     # Close interactive session
     isess.close_db_session()
Ejemplo n.º 18
0
    def test_011_execute_file_v2_five(self):
        """
        Dataset processing using old commands

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/05_caso_energia_eu_old_commands.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 19
0
    def test_016_execute_file_v2_ten(self):
        """
        Upscaling using Instantiations. Translation of Louisa's file "Electricity state of the play 16.03.xlsm"

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/10_electricity_state_of_the_play.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 20
0
    def test_017_execute_file_v2_eleven(self):
        """
        Dataset queries using Mappings, then use of resulting Datasets to create Processors and Interfaces

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/11_dataset_to_musiasem_maddalena.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 21
0
    def execute(self, state: "State"):
        """

        :param state:
        :return:
        """
        # TODO At SOLVE time (not here, which is only in charge of creating structure, Indicator objects and Benchmark objects)

        # TODO For each indicator
        # TODO Parse it, and return the number of referenced Processors:
        # TODO If referenced Factors are all inside a Processor, it returns ONE -> IndicatorCategories.factors_expression
        # TODO If Factors are from different Processors, it returns > ONE -> IndicatorCategories.case_study
        # TODO If everything is FactorTypes, it returns ZERO -> IndicatorCategories.factor_types_expression
        # TODO This last case will serve at solve time to potentially generate an Indicator per Processor matching the FactorTypes mentioned in the indicator (first the FactorTypes have to be resolved)

        # TODO Only the factor_types_expression can be validated at this EXECUTION time, all FactorTypes MUST exist
        # TODO The other two types "factors_expression" and "case_study" can mention FactorTypes which are not currently expanded (they are expanded at reasoning/solving time)
        # Obtain global variables in state
        issues = []
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)
        name = self._content["command_name"]

        # Process parsed information
        for line in self._content["items"]:
            r = line["_row"]
            i_name = line.get("indicator_name", None)
            i_local = line.get("local", None)
            i_formula = line.get("expression", None)
            i_benchmark = line.get("benchmark", None)
            i_description = line.get("description", None)
            b = None
            # if i_benchmark:
            #     b = Benchmark(i_benchmark)
            # else:
            #     b = None
            indi = Indicator(i_name,
                             i_formula,
                             from_indicator=None,
                             benchmark=b,
                             indicator_category=None,
                             description=i_description)
            glb_idx.put(indi.key(), indi)

        return issues, None
Ejemplo n.º 22
0
    def test_006_execute_file_five(self):
        """

        Parameters
        Simple Expression evaluation in QQs

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/mapping_example_maddalena.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # Three processor sets
        self.assertEqual(len(p_sets), 1)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 23
0
    def test_018_many_to_many_mappings(self):
        """
        Testing many to many mappings

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/09_many_to_many_mapping.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        datasets["ds1"].data.to_csv(
            "/tmp/09_many_to_many_mapping_ds1_results.csv", index=False)
        isess.close_db_session()
Ejemplo n.º 24
0
    def test_007_execute_file_v2_one(self):
        """
        Two connected Processors
        Test parsing and execution of a file with basic commands, and only literals (very basic syntax)

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/01_declare_two_connected_processors.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 25
0
    def test_008_execute_file_v2_two(self):
        """
        Processors from Soslaires
        Test parsing and execution of a file with basic commands, and only literals (very basic syntax)

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/02_declare_hierarchies_and_cloning_and_scaling.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        processor_dict = get_processor_names_to_processors_dictionary(glb_idx)
        for p in processor_dict:
            print(p)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 26
0
    def test_014_execute_file_v2_eight(self):
        """
        Dataset queries using Mappings

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/08_caso_energia_eu_new_commands_CASE_SENSITIVE.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        datasets["ds1"].data.to_csv(
            "/tmp/08_caso_energia_eu_new_commands_ds1_results.csv")
        datasets["ds2"].data.to_csv(
            "/tmp/08_caso_energia_eu_new_commands_ds2_results.csv")
        isess.close_db_session()
Ejemplo n.º 27
0
    def test_012_execute_file_v2_six(self):
        """
        Almeria upscaling with new syntax
        * References
        * InterfaceTypes
        * BareProcessors
          * Dynamic attribute columns
        * Interfaces
        * Old Upscale (really efficient)

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/06_upscale_almeria.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        # TODO Check things!!!
        # self.assertEqual(len(p_sets), 3)
        # Close interactive session
        isess.close_db_session()
Ejemplo n.º 28
0
    def execute(self, state: "State"):
        """
        Create a Hierarchy. The exact form of this hierarchy is different depending on the concept:
        * FactorTypes and Categories use Hierarchies, which are intrinsic.
            The hierarchy name is passed to the containing Hierarchy object
        * Processors use Part-Of Relations. In this case, the hierarchy name is lost
        Names of Processor and FactorTypes are built both in hierarchical and simple form
        The hierarchical is all the ancestors from root down to the current node, separated by "."
        The simple name is just the current node. If there is already another concept with that name, the simple name
        is not stored (STORE BOTH CONCEPTS by the same name, and design some tie breaking mechanism??)
        """
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)
        # Extract base information
        name = self._content["name"]
        type_name = self._content["type"]
        first_level = self._content["h"]

        # Build the hierarchy
        build_hierarchy(name, type_name, registry=glb_idx, h=first_level)

        return None, None
Ejemplo n.º 29
0
def call_solver(state: State, systems: Dict[str, Set[Processor]]):
    """
    Solve the problem
    """
    def obtain_problem_statement() -> ProblemStatement:
        """
        Obtain a ProblemStatement instance
        Obtain the solver parameters plus a list of scenarios
        :param state:
        :return:
        """
        ps_list: List[ProblemStatement] = glb_idx.get(
            ProblemStatement.partial_key())
        if len(ps_list) == 0:
            # No scenarios (dummy), and use the default solver
            scenarios = create_dictionary()
            scenarios["default"] = create_dictionary()
            return ProblemStatement(scenarios=scenarios)
        else:
            return ps_list[0]

    # Registry and the other objects also
    glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

    global_parameters: List[Parameter] = glb_idx.get(Parameter.partial_key())

    problem_statement = obtain_problem_statement()

    solver_type = problem_statement.solving_parameters.get(
        "solver", "flow_graph").lower()

    issues = []
    if solver_type == "flow_graph":
        pass
        #issues = flow_graph_solver(global_parameters, problem_statement, systems, state)

    return issues
Ejemplo n.º 30
0
    def execute(self, state: "State"):
        issues = []

        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(state)
        name = self._content["command_name"]

        # List of available dataset names. The newly defined datasets must not be in this list
        ds_names = [ds.code for ds in datasets.values()]

        # Process parsed information
        for r, line in enumerate(self._content["items"]):
            # A dataset
            dataset_name = line["name"]
            # Find it in the already available datasets. MUST EXIST
            for n in ds_names:
                if strcmp(dataset_name, n):
                    df = pd.read_json(StringIO(line["values"]), orient="split")
                    # Check columns
                    ds = datasets[n]
                    iss = prepare_dataframe_after_external_read(ds, df)
                    for issue in iss:
                        issues.append(
                            Issue(itype=3,
                                  description=issue,
                                  location=IssueLocation(sheet_name=name, row=-1, column=-1)))
                    # Everything ok? Store the dataframe!
                    if len(iss) == 0:
                        ds.data = df
                    break
            else:
                issues.append(
                    Issue(itype=3,
                          description="Metadata for the dataset '"+dataset_name+"' must be defined previously",
                          location=IssueLocation(sheet_name=name, row=-1, column=-1)))

        return issues, None