示例#1
0
def prepare_partial_key_dictionary():
    prd = PartialRetrievalDictionary()
    oer = Observer("tester")
    p0 = Processor("A1")
    p1 = Processor("A2")
    p2 = Processor("B")
    p3 = Processor("C")
    prd.put({"_type": "Processor", "_name": "A1"}, p0)
    prd.put({"_type": "Processor", "_name": "A2"}, p1)
    prd.put({"_type": "Processor", "_name": "B"}, p2)
    prd.put({"_type": "Processor", "_name": "C"}, p3)
    prd.put({"_type": "PartOf", "_parent": "A1", "_child": "B"}, ProcessorsRelationPartOfObservation(p0, p2, oer))
    prd.put({"_type": "PartOf", "_parent": "A2", "_child": "B"}, ProcessorsRelationPartOfObservation(p1, p2, oer))
    prd.put({"_type": "PartOf", "_parent": "B", "_child": "C"}, ProcessorsRelationPartOfObservation(p2, p3, oer))
    return prd
    def _clone_processor_as_child(self,
                                  processor: Processor,
                                  parent_processor: Processor,
                                  name: str = None) -> Processor:
        # Clone inherits some attributes from parent
        inherited_attributes = dict(
            subsystem_type=parent_processor.subsystem_type,
            processor_system=parent_processor.processor_system,
            instance_or_archetype=parent_processor.instance_or_archetype)

        processor_clone = processor.clone(
            state=self._glb_idx,
            name=name,
            inherited_attributes=inherited_attributes)

        # Create PART-OF relation
        relationship = ProcessorsRelationPartOfObservation.create_and_append(
            parent=parent_processor, child=processor_clone)
        self._glb_idx.put(relationship.key(), relationship)

        # Add cloned processor hierarchical names to global index
        for hierarchical_name in processor_clone.full_hierarchy_names(
                self._glb_idx):
            self._glb_idx.put(
                Processor.partial_key(name=hierarchical_name,
                                      ident=processor_clone.ident),
                processor_clone)

        return processor_clone
示例#3
0
def flow_graph_solver(global_parameters: List[Parameter],
                      problem_statement: ProblemStatement,
                      input_systems: Dict[str, Set[Processor]], state: State):
    """
    * First scales have to be solved
    * Second direct flows
    * Third conversions of flows

    Once flows have been found, Indicators have to be gathered.

    :param global_parameters: Parameters including the default value (if defined)
    :param problem_statement: ProblemStatement object, with scenarios (parameters changing the default)
                              and parameters for the solver
    :param state: State with everything
    :param input_systems: A dictionary of the different systems to be solved
    :return: Issue[]
    """
    class Edge(NamedTuple):
        src: Factor
        dst: Factor
        weight: Optional[str]

    def add_edges(edges: List[Edge]):
        for src, dst, weight in edges:
            src_name = get_interface_name(src, glb_idx)
            dst_name = get_interface_name(dst, glb_idx)
            if "Archetype" in [
                    src.processor.instance_or_archetype,
                    dst.processor.instance_or_archetype
            ]:
                print(
                    f"WARNING: excluding relation from '{src_name}' to '{dst_name}' because of Archetype processor"
                )
            else:
                relations.add_edge(src_name, dst_name, weight=weight)

    glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

    # Get all interface observations. Also resolve expressions without parameters. Cannot resolve expressions
    # depending only on global parameters because some of them can be overridden by scenario parameters.
    time_observations_absolute, time_observations_relative = get_observations_by_time(
        glb_idx)

    if len(time_observations_absolute) == 0:
        raise Exception(
            f"No absolute observations have been found. The solver has nothing to solve."
        )

    relations = nx.DiGraph()

    # Add Interfaces -Flow- relations (time independent)
    add_edges([
        Edge(r.source_factor, r.target_factor, r.weight) for r in glb_idx.get(
            FactorsRelationDirectedFlowObservation.partial_key())
    ])

    # Add Processors -Scale- relations (time independent)
    add_edges([
        Edge(r.origin, r.destination, r.quantity)
        for r in glb_idx.get(FactorsRelationScaleObservation.partial_key())
    ])

    # TODO Expand flow graph with it2it transforms
    # relations_scale_it2it = glb_idx.get(FactorTypesRelationUnidirectionalLinearTransformObservation.partial_key())

    # First pass to resolve weight expressions: only expressions without parameters can be solved
    for _, _, data in relations.edges(data=True):
        expression = data["weight"]
        if expression:
            value, ast, _, _ = evaluate_numeric_expression_with_parameters(
                expression, state)
            data["weight"] = ifnull(value, ast)

    for scenario_idx, (scenario_name, scenario_params) in enumerate(
            problem_statement.scenarios.items()):

        print(f"********************* SCENARIO: {scenario_name}")

        scenario_state = State()
        scenario_combined_params = evaluate_parameters_for_scenario(
            global_parameters, scenario_params)
        scenario_state.update(scenario_combined_params)

        for time_period, observations in time_observations_absolute.items():

            print(f"********************* TIME PERIOD: {time_period}")

            # Final values are taken from "observations" that need to computed
            graph_params = {}

            # Second and last pass to resolve observation expressions with parameters
            for expression, obs in observations:
                interface_name = get_interface_name(obs.factor, glb_idx)
                if interface_name not in relations.nodes:
                    print(
                        f"WARNING: observation at interface '{interface_name}' is not taken into account."
                    )
                else:
                    value, ast, _, issues = evaluate_numeric_expression_with_parameters(
                        expression, scenario_state)
                    if not value:
                        raise Exception(
                            f"Cannot evaluate expression '{expression}' for observation at "
                            f"interface '{interface_name}'. Issues: {', '.join(issues)}"
                        )
                    graph_params[interface_name] = value

            assert (graph_params is not None)

            # Add Processors internal -RelativeTo- relations (time dependent)
            # Transform relative observations into graph edges
            for expression, obs in time_observations_relative[time_period]:
                relations.add_edge(get_interface_name(obs.relative_factor,
                                                      glb_idx),
                                   get_interface_name(obs.factor, glb_idx),
                                   weight=expression)

            # Second and last pass to resolve weight expressions: expressions with parameters can be solved
            for u, v, data in relations.edges(data=True):
                expression = data["weight"]
                if expression:
                    value, ast, _, _ = evaluate_numeric_expression_with_parameters(
                        expression, scenario_state)
                    if not value:
                        raise Exception(
                            f"Cannot evaluate expression '{expression}' for weight "
                            f"from interface '{u}' to interface '{v}'. Issues: {', '.join(issues)}"
                        )
                    data["weight"] = value

            # ----------------------------------------------------

            if time_period == '2008':
                for component in nx.weakly_connected_components(relations):
                    nx.draw_kamada_kawai(relations.subgraph(component),
                                         with_labels=True)
                    plt.show()

            flow_graph = FlowGraph(relations)
            comp_graph, issues = flow_graph.get_computation_graph()

            for issue in issues:
                print(issue)

            print(f"****** NODES: {comp_graph.nodes}")

            # ----------------------------------------------------

            # Obtain nodes without a value
            compute_nodes = [
                n for n in comp_graph.nodes if not graph_params.get(n)
            ]

            # Compute the missing information with the computation graph
            if len(compute_nodes) == 0:
                print("All nodes have a value. Nothing to solve.")
                return []

            print(f"****** UNKNOWN NODES: {compute_nodes}")
            print(f"****** PARAMS: {graph_params}")

            conflicts = comp_graph.compute_param_conflicts(
                set(graph_params.keys()))

            for s, (param, values) in enumerate(conflicts.items()):
                print(f"Conflict {s + 1}: {param} -> {values}")

            combinations = ComputationGraph.compute_param_combinations(
                conflicts)

            for s, combination in enumerate(combinations):
                print(f"Combination {s}: {combination}")

                filtered_params = {
                    k: v
                    for k, v in graph_params.items() if k in combination
                }
                results, _ = comp_graph.compute_values(compute_nodes,
                                                       filtered_params)

                results_with_values = {k: v for k, v in results.items() if v}
                print(f'  results_with_values={results_with_values}')

                # TODO: work with "part_of_graph"
                #  - Params: graph_params + results
                #  - Compute conflicts, combinations
                #  - For each combination "compute_values"

        # TODO INDICATORS

    # ----------------------------------------------------
    # ACCOUNTING PER SYSTEM

    for system in input_systems:

        # Handle Processors -PartOf- relations
        proc_hierarchy = nx.DiGraph()
        for relation in glb_idx.get(
                ProcessorsRelationPartOfObservation.partial_key(
                )):  # type: ProcessorsRelationPartOfObservation
            if relation.parent_processor.instance_or_archetype == "Instance":
                proc_hierarchy.add_edge(
                    get_processor_name(relation.child_processor, glb_idx),
                    get_processor_name(relation.parent_processor, glb_idx))

        part_of_graph = ComputationGraph()

        # for relation in system_flows[system]:  # type: FactorsRelationDirectedFlowObservation
        #
        #     # We create another graph only with interfaces in processors with parents
        #     for interface in [relation.source_factor, relation.target_factor]:
        #
        #         processor_name = get_processor_name(interface.processor, glb_idx)
        #         interface_full_name = processor_name+":"+interface.name
        #
        #         # If "processor" is in the "PartOf" hierarchy AND the "processor:interface" is not being handled yet
        #         if processor_name in proc_hierarchy and interface_full_name not in part_of_graph.nodes:
        #             # Insert into the Computation Graph a copy of the "PartOf" hierarchy of processors
        #             # for the specific interface
        #             new_edges = [(u+":"+interface.name, v+":"+interface.name)
        #                          for u, v in weakly_connected_subgraph(proc_hierarchy, processor_name).edges]
        #             part_of_graph.add_edges(new_edges, 1.0, None)

        # for component in nx.weakly_connected_components(part_of_graph.graph):
        #     nx.draw_kamada_kawai(part_of_graph.graph.subgraph(component), with_labels=True)
        #     plt.show()

    return []
示例#4
0
def flow_graph_solver(global_parameters: List[Parameter],
                      problem_statement: ProblemStatement,
                      input_systems: Dict[str, Set[Processor]], state: State):
    """
    * First scales have to be solved
    * Second direct flows
    * Third conversions of flows

    Once flows have been found, Indicators have to be gathered.

    :param global_parameters: Parameters including the default value (if defined)
    :param problem_statement: ProblemStatement object, with scenarios (parameters changing the default)
                              and parameters for the solver
    :param state: State with everything
    :param input_systems: A dictionary of the different systems to be solved
    :return: Issue[]
    """

    glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

    # Initialize dictionaries
    system_flows: Dict[str,
                       Set[FactorsRelationDirectedFlowObservation]] = dict()
    system_scales: Dict[str, Set[FactorsRelationScaleObservation]] = dict()
    system_processor_hierarchies: Dict[str, nx.DiGraph] = dict()
    for s in input_systems:
        system_flows[s] = set()
        system_scales[s] = set()
        system_processor_hierarchies[s] = dict()

    # Handle Interface Types -Scale- relations
    relations_scale_it2it = glb_idx.get(
        FactorTypesRelationUnidirectionalLinearTransformObservation.
        partial_key())

    # Handle Interfaces -Flow- relations
    relations_flow = glb_idx.get(
        FactorsRelationDirectedFlowObservation.partial_key())

    for relation in relations_flow:  # type: FactorsRelationDirectedFlowObservation
        system_flows[relation.source_factor.processor.processor_system].add(
            relation)
        system_flows[relation.target_factor.processor.processor_system].add(
            relation)

    relations_scale = glb_idx.get(
        FactorsRelationScaleObservation.partial_key())

    for relation in relations_scale:  # type: FactorsRelationScaleObservation
        system_scales[relation.origin.processor.processor_system].add(relation)
        system_scales[relation.destination.processor.processor_system].add(
            relation)

    # Handle Processors -PartOf- relations
    relations_part_of = glb_idx.get(
        ProcessorsRelationPartOfObservation.partial_key())

    for relation in relations_part_of:  # type: ProcessorsRelationPartOfObservation
        if relation.parent_processor.instance_or_archetype.lower(
        ) == "instance":
            graph = system_processor_hierarchies[
                relation.parent_processor.processor_system]

            if not graph:
                graph = nx.DiGraph()
                system_processor_hierarchies[
                    relation.parent_processor.processor_system] = graph

            graph.add_edge(
                get_processor_name(relation.child_processor, glb_idx),
                get_processor_name(relation.parent_processor, glb_idx))

    # Get all interface observations. Also resolve expressions without parameters. Cannot resolve expressions
    # depending only on global parameters because some of them can be overridden by scenario parameters.
    observations_by_time = get_observations_by_time(glb_idx)

    if len(observations_by_time) == 0:
        raise Exception(
            f"No observations have been found. The solver has nothing to solve."
        )

    # Split observations into relative and not relative
    observations_by_time_norelative , observations_by_time_relative = \
        split_observations_by_relativeness(observations_by_time)

    # Combine scenario parameters with the global parameters
    scenario_parameters: Dict[str, Dict[str, str]] = \
        {scenario_name: evaluate_parameters_for_scenario(global_parameters, scenario_params)
         for scenario_name, scenario_params in problem_statement.scenarios.items()}

    # SCALES --------------------------

    # Obtain the scale VALUES
    # scales_prd = get_scaled(scenarios=problem_statement.scenarios,
    #                         scenario_params=scenario_parameters,
    #                         relations_scale=glb_idx.get(FactorsRelationScaleObservation.partial_key()),
    #                         observations_by_time=observations_by_time_norelative)

    # FLOWS --------------------------
    for system in input_systems:
        # From Factors IN the context (LOCAL, ENVIRONMENT or OUTSIDE)
        # obtain a basic graph. Signal each Factor as LOCAL or EXTERNAL, and SOCIETY or ENVIRONMENT
        # basic_graph = prepare_interfaces_graph(systems[s][Factor])

        print(f"********************* SYSTEM: {system}")

        # Obtain a flow graph
        flow_graph = FlowGraph()
        part_of_graph = ComputationGraph()

        for relation in system_flows[
                system]:  # type: FactorsRelationDirectedFlowObservation
            flow_graph.add_edge(get_interface_name(relation.source_factor,
                                                   glb_idx),
                                get_interface_name(relation.target_factor,
                                                   glb_idx),
                                weight=relation.weight,
                                reverse_weight=None)

            assert (relation.source_factor.name == relation.target_factor.name)

            # We create another graph only with interfaces in processors with parents
            proc_hierarchy = system_processor_hierarchies[system]

            for interface in [relation.source_factor, relation.target_factor]:

                processor_name = get_processor_name(interface.processor,
                                                    glb_idx)
                interface_full_name = processor_name + ":" + interface.name

                # If "processor" is in the "PartOf" hierarchy AND the "processor:interface" is not being handled yet
                if processor_name in proc_hierarchy and interface_full_name not in part_of_graph.nodes:
                    # Insert into the Computation Graph a copy of the "PartOf" hierarchy of processors
                    # for the specific interface
                    new_edges = [(u + ":" + interface.name,
                                  v + ":" + interface.name)
                                 for u, v in weakly_connected_subgraph(
                                     proc_hierarchy, processor_name).edges]
                    part_of_graph.add_edges(new_edges, 1.0, None)

        comp_graph, issues = flow_graph.get_computation_graph()

        for relation in system_scales[
                system]:  # type: FactorsRelationScaleObservation
            comp_graph.add_edge(get_interface_name(relation.origin, glb_idx),
                                get_interface_name(relation.destination,
                                                   glb_idx),
                                weight=relation.quantity,
                                reverse_weight=None)

        for issue in issues:
            print(issue)

        print(f"****** NODES: {comp_graph.nodes}")

        # for component in nx.weakly_connected_components(part_of_graph.graph):
        #     nx.draw_kamada_kawai(part_of_graph.graph.subgraph(component), with_labels=True)
        #     plt.show()

        # TODO Expand flow graph with it2it transforms

        # Split flow graphs
        for scenario_idx, (scenario_name, scenario) in enumerate(
                problem_statement.scenarios.items()):

            print(f"********************* SCENARIO: {scenario_name}")

            scenario_state = State()
            scenario_state.update(scenario_parameters[scenario_name])

            for time_period, observations in observations_by_time_norelative.items(
            ):

                print(f"********************* TIME PERIOD: {time_period}")

                scales = {
                }  # {fact: val for fact, val in scales_prd.get(dict(__t=time_period, __s=scenario_idx))}

                # Final values are taken from "scales" or from "observations" that need to computed
                graph_params = {}
                for expression, obs in observations:
                    interface_name = get_interface_name(obs.factor, glb_idx)
                    if interface_name not in comp_graph.nodes:
                        print(
                            f"WARNING: observation at interface '{interface_name}' is not taken into account."
                        )
                    else:
                        if scales.get(obs.factor):
                            graph_params[interface_name] = scales[obs.factor]
                        else:
                            value, ast, _, issues = evaluate_numeric_expression_with_parameters(
                                expression, scenario_state)
                            if not value:
                                raise Exception(
                                    f"Cannot evaluate expression '{expression}' for observation at interface '{interface_name}'"
                                )

                            graph_params[interface_name] = value

                # ----------------------------------------------------

                compute_nodes = [
                    n for n in comp_graph.nodes if not graph_params.get(n)
                ]

                # Compute the missing information with the computation graph
                if len(compute_nodes) > 0:

                    print(f"****** UNKNOWN NODES: {compute_nodes}")
                    print(f"****** PARAMS: {graph_params}")

                    conflicts = comp_graph.compute_param_conflicts(
                        set(graph_params.keys()))

                    for s, (param, values) in enumerate(conflicts.items()):
                        print(f"Conflict {s + 1}: {param} -> {values}")

                    combinations = ComputationGraph.compute_param_combinations(
                        conflicts)

                    for s, combination in enumerate(combinations):
                        print(f"Combination {s}: {combination}")

                        filtered_params = {
                            k: v
                            for k, v in graph_params.items()
                            if k in combination
                        }
                        results, _ = comp_graph.compute_values(
                            compute_nodes, filtered_params)

                        results_with_values = {
                            k: v
                            for k, v in results.items() if v
                        }
                        print(f'  results_with_values={results_with_values}')

                        # TODO: work with "part_of_graph"
                        #  - Params: graph_params + results
                        #  - Compute conflicts, combinations
                        #  - For each combination "compute_values"
                else:
                    print(
                        "There aren't nodes with unknown values. Nothing to solve."
                    )

                # TODO Overwrite "obs" with "scales" results
                # TODO Put observations into the flow-graph

                # TODO Put processors into scale (intensive to extensive conversion)
                # scale_unit_processors(flow_graph, params, relative_observations_prd)

                # for sub_fg in nx.weakly_connected_component_subgraphs(flow_graph):
                # TODO Elaborate information flow graph
                #      Cycles allowed?
                # ifg = get_information_flow_graph(sub_fg)
                # TODO Solve information flow graph. From all possible combinations:
                #  bottom-up if top-down USE
                #  bottom-up if top-down DO NOT USE
                #  top-down  if bottom-up USE
                #  top-down  if bottom-up DO NOT USE
                # solve_flow_graph(sub_fg, ifg)  # Each value: Interface, Scenario, Time, Given/Computed -> VALUE (or UNDEFINED)
                # TODO Put results back

        # TODO INDICATORS --- (INSIDE FLOWS)

    return []
示例#5
0
    def execute(self, state: "State"):
        """
        For each parent processor clone all the child processors.
        The cloning process may pass some factor observation, that may result in
        """
        some_error = False
        issues = []

        parent_processor_type = self._content["parent_processor_type"]
        child_processor_type = self._content["child_processor_type"]
        scaled_factor = self._content["scaled_factor"]
        source = self._content["source"]
        # column_headers = self._content["column_headers"]
        # row_headers = self._content["row_headers"]
        scales = self._content["scales"]

        # Find processor sets, for parent and child
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(state)
        if parent_processor_type not in p_sets:
            some_error = True
            issues.append((3, "The processor type '"+parent_processor_type +
                           "' (appointed for parent) has not been found in the commands execute so far"))

        if child_processor_type not in p_sets:
            some_error = True
            issues.append((3, "The processor type '"+child_processor_type +
                           "' (should be child processor) has not been found in the commands execute so far"))

        if some_error:
            return issues, None

        # CREATE the Observer of the Upscaling
        oer = glb_idx.get(Observer.partial_key(source))
        if not oer:
            oer = Observer(source)
            glb_idx.put(oer.key(), oer)
        else:
            oer = oer[0]

        # Processor Sets have associated attributes, and each of them has a code list
        parent = p_sets[parent_processor_type]  # type: ProcessorsSet
        child = p_sets[child_processor_type]  # type: ProcessorsSet

        # Form code lists from the command specification
        code_lists = None
        for sc_dict in scales:
            codes = sc_dict["codes"]
            if not code_lists:
                code_lists = [set() for _ in codes]

            for i, c in enumerate(codes):
                code_lists[i].add(c)

        # Match existing code lists (from Processor attributes) with the ones gathered in the specification of
        # the two (parent and child) processors sets.
        # Form lists of attributes of processors used in the code lists
        parent_attrs = []
        child_attrs = []
        matched = []
        for i, cl in enumerate(code_lists):
            found = False
            for attr, attr_values in parent.attributes.items():
                if set(attr_values).issuperset(cl):
                    parent_attrs.append((attr, i))  # (Attribute, code list index)
                    found = True
                    break
            for attr, attr_values in child.attributes.items():
                if set(attr_values).issuperset(cl):
                    child_attrs.append((attr, i))  # (Attribute, code list index)
                    found = True
                    break
            matched.append(found)
        for i, found in enumerate(matched):
            if not found:
                cl = code_lists[i]
                # TODO Try cl as a list of names of parent or child processors
                if not found:
                    issues.append((2, "The code list: " + ", ".join(cl) + " is not contained in the attributes of the parent processors set '" + parent_processor_type + "' nor in the attributes of the child processors set '" + child_processor_type + "'"))

        # Execute the upscale for each
        cached_processors = {}
        for sc_dict in scales:
            try:
                non_zero_weight = math.fabs(float(sc_dict["weight"])) > 1e-6
            except:
                non_zero_weight = True
            if not non_zero_weight:
                continue

            codes = sc_dict["codes"]
            # Find parent processor
            parent_dict = {attr: codes[i] for attr, i in parent_attrs}
            d2s = str(parent_dict)
            if d2s in cached_processors:
                parent = cached_processors[d2s]
                if not parent:
                    issues.append((3, "Either the tuple (" + d2s + ") did not match any Processor or matched more than one."))
            else:
                parent_dict.update(Processor.partial_key())

                # Obtain Processor matching the attributes <<<<<<<<<<
                # Query the PartialRetrievalDictionary by attributes
                parents = glb_idx.get(parent_dict)

                if len(parents) > 1:
                    issues.append((3, "The tuple ("+str(parent_dict)+") matches "+str(len(parents))+" Processors: "+(", ".join([p.name for p in parents]))))
                    parent = None
                elif len(parents) == 0:
                    issues.append((3, "The tuple (" + str(parent_dict) + ") did not match any Processor"))
                    parent = None
                else:
                    parent = parents[0]

                cached_processors[d2s] = parent

            # Find child processor
            child_dict = {attr: codes[i] for attr, i in child_attrs}
            d2s = str(child_dict)
            if d2s in cached_processors:
                child = cached_processors[d2s]
                if not child:
                    issues.append((3, "Either the tuple (" + d2s + ") did not match any Processor or matched more than one."))
            else:
                child_dict.update(Processor.partial_key())

                # Obtain Processors matching the attributes
                # Query the PartialRetrievalDictionary by attributes
                children = glb_idx.get(child_dict)

                if len(children) > 1:
                    issues.append((3, "The tuple ("+str(child_dict)+") matches "+str(len(parents))+" Processors: "+(", ".join([p.name for p in children]))))
                    child = None
                elif len(children) == 0:
                    issues.append((3, "The tuple (" + str(child_dict) + ") did not match any Processor"))
                    child = None
                else:
                    child = children[0]  # type: Processor

                cached_processors[d2s] = child

            # Clone child processor (and its descendants) and add an upscale relation between "parent" and the clone
            if parent and child:
                if non_zero_weight:
                    # Clone the child processor
                    # TODO
                    cloned_child = child.clone(state=glb_idx)
                    glb_idx.put(cloned_child.key(), cloned_child)

                    # Create the new Relation Observations
                    # - Part-of Relation
                    o1 = ProcessorsRelationPartOfObservation.create_and_append(parent, cloned_child, oer)  # Part-of
                    glb_idx.put(o1.key(), o1)
                    # - Upscale Relation
                    quantity = str(sc_dict["weight"])
                    if True:
                        # Find Interface named "scaled_factor"
                        for f in parent.factors:
                            if strcmp(f.name, scaled_factor):
                                origin = f
                                break
                        else:
                            origin = None
                        for f in cloned_child.factors:
                            if strcmp(f.name, scaled_factor):
                                destination = f
                                break
                        else:
                            destination = None

                        if origin and destination:
                            o3 = FactorsRelationScaleObservation.create_and_append(origin, destination,
                                                                                   observer=None,
                                                                                   quantity=quantity)
                            glb_idx.put(o3.key(), o3)
                        else:
                            raise Exception("Could not find Interfaces to define a Scale relation. Processors: " +
                                            parent.name+", "+cloned_child.name+"; Interface name: "+scaled_factor)
                    else:
                        o3 = ProcessorsRelationUpscaleObservation.create_and_append(parent, cloned_child,
                                                                                    observer=None,
                                                                                    factor_name=scaled_factor,
                                                                                    quantity=quantity)
                        glb_idx.put(o3.key(), o3)
            else:
                # TODO
                parent_dict = str({attr: codes[i] for attr, i in parent_attrs})
                child_dict = str({attr: codes[i] for attr, i in child_attrs})
                if not parent and child:
                    issues.append((2, "Could not find parent Processor matching attributes: "+parent_dict))
                elif not child and parent:
                    issues.append((2, "Could not find child Processor matching attributes: "+child_dict))
                else:
                    issues.append((2, "Could not find parent Processor matching attributes: "+parent_dict+", nor child Processor matching attributes: " + child_dict))

        return issues, None
示例#6
0
        def process_line(item):
            fields_value = {
                k: item.get(k, v.default_value)
                for k, v in fields.items()
            }

            # Check if mandatory fields with no value exist
            for field in [
                    k for k, v in fields.items()
                    if v.mandatory and not fields_value[k]
            ]:
                issues.append(
                    create_issue(
                        3, f"Mandatory field '{field}' is empty. Skipped."))
                return

            # Transform text of "attributes" into a dictionary
            field_val = fields_value.get("attributes", None)
            if field_val:
                try:
                    fields_value[
                        "attributes"] = dictionary_from_key_value_list(
                            field_val, glb_idx)
                except Exception as e:
                    issues.append(create_issue(3, str(e)))
                    return
            else:
                fields_value["attributes"] = {}

            # Process specific fields

            # Obtain the parent: it must exist. It could be created dynamically but it's important to specify attributes
            parent_processor = None
            field_val = fields_value.get("parent_processor", None)
            if field_val:
                parent_processor = find_processor_by_name(
                    state=glb_idx, processor_name=field_val)
                if not parent_processor:
                    issues.append(
                        create_issue(
                            3,
                            f"Specified parent processor, '{field_val}', does not exist"
                        ))
                    return

            # Find or create processor and REGISTER it in "glb_idx"
            # TODO Now, only Simple name allowed
            # TODO Improve allowing hierarchical names, and hierarchical names with wildcards
            # TODO Improve allowing CLONE(<processor name>)
            # TODO Pass the attributes:
            # TODO p_type, p_f_or_s, p_i_or_a, p_alias, p_description, p_copy_interfaces
            if fields_value.get("clone_processor", None):
                # TODO Find origin processor
                # TODO Clone it
                pass
            else:
                # Get internal and user-defined attributes in one dictionary
                attributes = {
                    k: fields_value[k]
                    for k, v in fields.items() if v.attribute_of == Processor
                }
                attributes.update(fields_value["attributes"])

                p = find_or_create_processor(
                    state=glb_idx,
                    name=fields_value[
                        "processor"],  # TODO: add parent hierarchical name
                    proc_attributes=attributes,
                    proc_location=Geolocation.create(
                        fields_value["geolocation_ref"],
                        fields_value["geolocation_code"]))

            # Add to ProcessorsGroup, if specified
            field_val = fields_value.get("processor_group", None)
            if field_val:
                p_set = p_sets.get(field_val, ProcessorsSet(field_val))
                p_sets[field_val] = p_set
                if p_set.append(
                        p, glb_idx
                ):  # Appends codes to the pset if the processor was not member of the pset
                    p_set.append_attributes_codes(fields_value["attributes"])

            # Add Relationship "part-of" if parent was specified
            # The processor may have previously other parent processors that will keep its parentship
            if parent_processor:
                # Create "part-of" relationship
                o1 = ProcessorsRelationPartOfObservation.create_and_append(
                    parent_processor, p, None)  # Part-of
                glb_idx.put(o1.key(), o1)
示例#7
0
    def test_005_execute_file_five(self):
        """
        Just Structure. From Soslaires.

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)) + "/z_input_files/Soslaires.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # # Save state
        s = serialize_state(isess.state)
        with open("/home/rnebot/GoogleDrive/AA_MAGIC/Soslaires.serialized",
                  "wt") as f:
            f.write(s)
        local_state = deserialize_state(s)
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            local_state)
        # Four processor sets
        self.assertEqual(len(p_sets), 4)
        # Obtain all Observers
        print("---- Observer ----")
        oers = glb_idx.get(Observer.partial_key())
        for i in oers:
            print(i.name)
        # Obtain all processors
        print("---- Processor ----")
        procs = glb_idx.get(Processor.partial_key())
        for i in procs:
            print(i.name)
        # Obtain all FactorTypes
        print("---- FactorType ----")
        fts = glb_idx.get(FactorType.partial_key())
        for i in fts:
            print(i.name)
        # Obtain all Factors
        print("---- Factor ----")
        fs = glb_idx.get(Factor.partial_key())
        for i in fs:
            print(i.processor.name + ":" + i.taxon.name)
        # Obtain all Quantitative Observations
        print("---- Quantities ----")
        qqs = glb_idx.get(FactorQuantitativeObservation.partial_key())
        for i in qqs:
            print(i.factor.processor.name + ":" + i.factor.taxon.name + "= " +
                  str(i.value.expression if i.value else ""))
        # Obtain all part-of Relation Observations
        print("---- Part-of relations (P-P) ----")
        po_rels = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key())
        for i in po_rels:
            print(i.parent_processor.name + " \/ " + i.child_processor.name)
        # Obtain all undirected flow Relation Observations
        print("---- Undirected flow relations (P-P) ----")
        uf_rels = glb_idx.get(
            ProcessorsRelationUndirectedFlowObservation.partial_key())
        for i in uf_rels:
            print(i.source_processor.name + " <> " + i.target_processor.name)
        # Obtain all upscale Relation Observations
        print("---- Upscale relations (P-P) ----")
        up_rels = glb_idx.get(
            ProcessorsRelationUpscaleObservation.partial_key())
        for i in up_rels:
            print(i.parent_processor.name + " \/ " + i.child_processor.name +
                  "(" + i.factor_name + ": " + str(i.quantity) + ")")
        # Obtain all directed flow Relation Observations
        print("---- Directed flow relations (F-F) ----")
        df_rels = glb_idx.get(
            FactorsRelationDirectedFlowObservation.partial_key())
        for i in df_rels:
            print(i.source_factor.processor.name + ":" +
                  i.source_factor.taxon.name + " -> " +
                  i.target_factor.processor.name + ":" +
                  i.target_factor.taxon.name +
                  (" (" + str(i.weight) + ")" if i.weight else ""))
        # Obtain all hierarchies
        print("---- FactorType Hierarchies ----")
        hies = glb_idx.get(Hierarchy.partial_key())
        for i in hies:
            print(i.name)
        # Close interactive session
        isess.close_db_session()