Пример #1
0
    def test_032_new_processor_declaration_convention(self):
        """
        Test new convention for creation of Processors, using BareProcessors command
        See the descriptions in the file

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)
        ) + "/z_input_files/v2/19_naming_processors_new_convention.xlsx"

        isess = execute_file(file_path, generator_type="spreadsheet")

        # issues = prepare_and_solve_model(isess.state)
        # for idx, issue in enumerate(issues):
        #     print(f"Issue {idx + 1}/{len(issues)} = {issue}")

        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        ps = glb_idx.get(Processor.partial_key())
        lst1 = []
        for p in ps:
            lst = p.full_hierarchy_names(glb_idx)
            lst1.extend(lst)
        p1 = glb_idx.get(Processor.partial_key("P2.C"))
        p2 = glb_idx.get(Processor.partial_key("P3.C"))
        self.assertEqual(p1[0], p2[0])
        p = glb_idx.get(Processor.partial_key("P1.C.P2"))
        self.assertEqual(len(p), 1)

        # Close interactive session
        isess.close_db_session()
Пример #2
0
def get_processors(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    procs = set(glb_idx.get(Processor.partial_key()))  # Unique processors
    d = {}
    for p in procs:
        parent_relations = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key(child=p))
        d[p.ident] = set([p.parent_processor.ident for p in parent_relations])

    lst = [[
        "ProcessorGroup", "Processor", "ParentProcessor", "SubsystemType",
        "System", "FunctionalOrStructural", "Accounted", "Stock",
        "Description", "GeolocationRef", "GeolocationCode",
        "GeolocationLatLong", "Attributes"
    ]]
    # Elaborate a DAG, then iterate over it
    for ident in list(toposort.toposort_flatten(d)):
        p = glb_idx.get(Processor.partial_key(ident=ident))[0]
        gref = p.geolocation.reference if p.geolocation else ""
        gcode = p.geolocation.code if p.geolocation else ""
        pgroup = p.attributes.get("processor_group")
        fname = p.full_hierarchy_names(glb_idx)[0]
        t = [
            pgroup if pgroup else "", p.name, "",
            p.attributes.get("subsystem_type", ""),
            p.attributes.get("processor_system", ""),
            p.attributes.get("functional_or_structural", ""),
            p.attributes.get("instance_or_archetype", ""),
            p.attributes.get("stock"), "", gref, gcode, "", ""
        ]
        parent_relations = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key(child=p))
        if len(parent_relations) == 0:
            lst.append(t)
        else:
            first = True
            for rel in parent_relations:
                t[2] = rel.parent_processor.full_hierarchy_names(glb_idx)[0]
                lst.append(t.copy())
                if first:
                    first = False
                    proper = None
                    for n in p.full_hierarchy_names(glb_idx):
                        if t[2] in n:
                            proper = n
                            break
                    t[1] = proper if proper else t[1]

    return list_to_dataframe(lst)
def get_processor(attribute, value, prd: PartialRetrievalDictionary = None):
    """
    Obtain the name of a Processor given the value of an attribute
    (Obtain the registry of objects)

    :param attribute:
    :param value:
    :param prd: A PartialRetrievalDictionary, passed in State "_glb_idx" to the AST evaluator by
    :return:
    """

    if not prd:
        raise Exception(f"No Global-Index parameter passed to Processor function")
    else:
        # Obtain ALL Processors, then ONE having attribute "attribute" with value <value>
        procs = prd.get(Processor.partial_key())
        ret = None
        for proc in procs:
            v = vars(proc).get(attribute)
            if not v:
                v = proc.attributes.get(attribute)
            if v and (strcmp(v, str(value)) or (is_float(value) and float(v) == float(value))):
                ret = proc.name
                break
        if ret:
            return ret
        else:
            raise Exception(f"No Processor found having attribute '{attribute}' with value '{value}'")
Пример #4
0
    def test_029_dataset_expansion2(self):
        """
        Test dataset expansion using advanced expansion expression
        (function calls returning either InterfaceTypes or Processors)

        :return:
        """
        file_path = os.path.dirname(os.path.abspath(
            __file__)) + "/z_input_files/v2/18_dataset_expansion_2.xlsx"

        isess = execute_file(file_path, generator_type="spreadsheet")

        # issues = prepare_and_solve_model(isess.state)
        # for idx, issue in enumerate(issues):
        #     print(f"Issue {idx + 1}/{len(issues)} = {issue}")

        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            isess.state)
        p = glb_idx.get(Processor.partial_key())
        self.assertEqual(len(p), 2)
        p = p[0]

        # Close interactive session
        isess.close_db_session()
Пример #5
0
def generate_geojson(s):
    """

    :param s:
    :return:
    """

    features = []
    # Obtain principal structures
    glb_idx, p_sets, hierarchies, datasets, mappings = get_case_study_registry_objects(
        s)
    ps = glb_idx.get(Processor.partial_key())
    for p in ps:
        # TODO If there is Geolocation information, obtain the region
        #      Collect also the attributes of the processor and of all the interfaces, from "flow graph solution"
        # url = "https://raw.githubusercontent.com/eurostat/Nuts2json/master/2016/4326/20M/nutsrg_1.json"
        if not p.geolocation:
            continue

        gr = glb_idx.get(
            GeographicReference.partial_key(name=p.geolocation.reference))
        url = gr[0].attributes["data_location"]
        geo_id = p.geolocation.code
        j, ids = read_geojson(url)
        # Obtain element of interest (GeoJSON)
        tmp = j["features"][ids[geo_id]]
        feature = tmp.copy()
        # Add processor properties
        feature["properties"]["processor"] = dict(
            name=p.name,
            h_name=p.full_hierarchy_names(glb_idx)[0],
            subsystem_type=p.subsystem_type,
            system=p.processor_system,
            level=p.level,
            functional_or_structural=p.functional_or_structural,
            instance_or_archetype=p.instance_or_archetype,
            stock=p.stock)
        # Add interface properties
        interfaces = []
        for i in p.factors:
            idict = dict()
            interfaces.append(idict)
        feature["properties"]["interfaces"] = interfaces
        # TODO Add relationships?

        features.append(feature)

    return dict(type="FeatureCollection", features=features)
Пример #6
0
def get_processor_names_to_processors_dictionary(
        state: PartialRetrievalDictionary):
    """
    Obtain a dictionary with all processor names (a processor may have multiple names) and
    the corresponding Processor object

    :param state:
    :return:
    """
    ps = state.get(Processor.partial_key())
    ps = set(ps)  # Avoid repeating Processor objects
    d = create_dictionary()
    for p in ps:
        for n in p.full_hierarchy_names(state):
            d[n] = p
    return d
Пример #7
0
def generate_rdf_from_object_model(state: State):
    """
    Using the base ontology "musiasem.owl", generate a file

    :param state:
    :return:
    """
    onto = get_ontology(
        "file:////home/rnebot/Dropbox/nis-backend/nexinfosys/ie_exports/musiasem.owl"
    ).load()
    glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
        state)
    # onto = get_ontology("file:///musiasem.owl").load()

    # Individuals / instances
    # InterfaceTypes
    fts = glb_idx.get(FactorType.partial_key())
    # Processors
    procs = glb_idx.get(Processor.partial_key())
    # Interfaces and Quantities
    fs = glb_idx.get(Factor.partial_key())
Пример #8
0
    def execute(self, state: "State"):
        """
        For each parent processor clone all the child processors.
        The cloning process may pass some factor observation, that may result in
        """
        some_error = False
        issues = []

        parent_processor_type = self._content["parent_processor_type"]
        child_processor_type = self._content["child_processor_type"]
        scaled_factor = self._content["scaled_factor"]
        source = self._content["source"]
        # column_headers = self._content["column_headers"]
        # row_headers = self._content["row_headers"]
        scales = self._content["scales"]

        # Find processor sets, for parent and child
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            state)
        if parent_processor_type not in p_sets:
            some_error = True
            issues.append((
                3, "The processor type '" + parent_processor_type +
                "' (appointed for parent) has not been found in the commands execute so far"
            ))

        if child_processor_type not in p_sets:
            some_error = True
            issues.append((
                3, "The processor type '" + child_processor_type +
                "' (should be child processor) has not been found in the commands execute so far"
            ))

        if some_error:
            return issues, None

        # CREATE the Observer of the Upscaling
        oer = glb_idx.get(Observer.partial_key(source))
        if not oer:
            oer = Observer(source)
            glb_idx.put(oer.key(), oer)
        else:
            oer = oer[0]

        # Processor Sets have associated attributes, and each of them has a code list
        parent = p_sets[parent_processor_type]  # type: ProcessorsSet
        child = p_sets[child_processor_type]  # type: ProcessorsSet

        # Form code lists from the command specification
        code_lists = None
        for sc_dict in scales:
            codes = sc_dict["codes"]
            if not code_lists:
                code_lists = [set() for _ in codes]

            for i, c in enumerate(codes):
                code_lists[i].add(c)

        # Match existing code lists (from Processor attributes) with the ones gathered in the specification of
        # the two (parent and child) processors sets.
        # Form lists of attributes of processors used in the code lists
        parent_attrs = []
        child_attrs = []
        matched = []
        for i, cl in enumerate(code_lists):
            found = False
            for attr, attr_values in parent.attributes.items():
                if set(attr_values).issuperset(cl):
                    parent_attrs.append(
                        (attr, i))  # (Attribute, code list index)
                    found = True
                    break
            for attr, attr_values in child.attributes.items():
                if set(attr_values).issuperset(cl):
                    child_attrs.append(
                        (attr, i))  # (Attribute, code list index)
                    found = True
                    break
            matched.append(found)
        for i, found in enumerate(matched):
            if not found:
                cl = code_lists[i]
                # TODO Try cl as a list of names of parent or child processors
                if not found:
                    issues.append((
                        2, "The code list: " + ", ".join(cl) +
                        " is not contained in the attributes of the parent processors set '"
                        + parent_processor_type +
                        "' nor in the attributes of the child processors set '"
                        + child_processor_type + "'"))

        # Execute the upscale for each
        cached_processors = {}
        for sc_dict in scales:
            try:
                non_zero_weight = math.fabs(float(sc_dict["weight"])) > 1e-6
            except:
                non_zero_weight = True
            if not non_zero_weight:
                continue

            codes = sc_dict["codes"]
            # Find parent processor
            parent_dict = {attr: codes[i] for attr, i in parent_attrs}
            d2s = str(parent_dict)
            if d2s in cached_processors:
                parent = cached_processors[d2s]
                if not parent:
                    issues.append((
                        3, "Either the tuple (" + d2s +
                        ") did not match any Processor or matched more than one."
                    ))
            else:
                parent_dict.update(Processor.partial_key())

                # Obtain Processor matching the attributes <<<<<<<<<<
                # Query the PartialRetrievalDictionary by attributes
                parents = glb_idx.get(parent_dict)

                if len(parents) > 1:
                    issues.append(
                        (3, "The tuple (" + str(parent_dict) + ") matches " +
                         str(len(parents)) + " Processors: " +
                         (", ".join([p.name for p in parents]))))
                    parent = None
                elif len(parents) == 0:
                    issues.append((3, "The tuple (" + str(parent_dict) +
                                   ") did not match any Processor"))
                    parent = None
                else:
                    parent = parents[0]

                cached_processors[d2s] = parent

            # Find child processor
            child_dict = {attr: codes[i] for attr, i in child_attrs}
            d2s = str(child_dict)
            if d2s in cached_processors:
                child = cached_processors[d2s]
                if not child:
                    issues.append((
                        3, "Either the tuple (" + d2s +
                        ") did not match any Processor or matched more than one."
                    ))
            else:
                child_dict.update(Processor.partial_key())

                # Obtain Processors matching the attributes
                # Query the PartialRetrievalDictionary by attributes
                children = glb_idx.get(child_dict)

                if len(children) > 1:
                    issues.append(
                        (3, "The tuple (" + str(child_dict) + ") matches " +
                         str(len(parents)) + " Processors: " +
                         (", ".join([p.name for p in children]))))
                    child = None
                elif len(children) == 0:
                    issues.append((3, "The tuple (" + str(child_dict) +
                                   ") did not match any Processor"))
                    child = None
                else:
                    child = children[0]  # type: Processor

                cached_processors[d2s] = child

            # Clone child processor (and its descendants) and add an upscale relation between "parent" and the clone
            if parent and child:
                if non_zero_weight:
                    # Clone the child processor
                    # TODO
                    cloned_child, cloned_children = child.clone(state=glb_idx)
                    Processor.register([cloned_child] + list(cloned_children),
                                       glb_idx)

                    # Create the new Relation Observations
                    # - Part-of Relation
                    o1 = ProcessorsRelationPartOfObservation.create_and_append(
                        parent, cloned_child, oer)  # Part-of
                    glb_idx.put(o1.key(), o1)
                    # - Upscale Relation
                    quantity = str(sc_dict["weight"])
                    if True:
                        # Find Interface named "scaled_factor"
                        for f in parent.factors:
                            if strcmp(f.name, scaled_factor):
                                origin = f
                                break
                        else:
                            origin = None
                        for f in cloned_child.factors:
                            if strcmp(f.name, scaled_factor):
                                destination = f
                                break
                        else:
                            destination = None

                        if origin and destination:
                            o3 = FactorsRelationScaleObservation.create_and_append(
                                origin,
                                destination,
                                observer=None,
                                quantity=quantity)
                            glb_idx.put(o3.key(), o3)
                        else:
                            raise Exception(
                                "Could not find Interfaces to define a Scale relation. Processors: "
                                + parent.name + ", " + cloned_child.name +
                                "; Interface name: " + scaled_factor)
                    else:
                        o3 = ProcessorsRelationUpscaleObservation.create_and_append(
                            parent,
                            cloned_child,
                            observer=None,
                            factor_name=scaled_factor,
                            quantity=quantity)
                        glb_idx.put(o3.key(), o3)
            else:
                # TODO
                parent_dict = str({attr: codes[i] for attr, i in parent_attrs})
                child_dict = str({attr: codes[i] for attr, i in child_attrs})
                if not parent and child:
                    issues.append((
                        2,
                        "Could not find parent Processor matching attributes: "
                        + parent_dict))
                elif not child and parent:
                    issues.append(
                        (2,
                         "Could not find child Processor matching attributes: "
                         + child_dict))
                else:
                    issues.append((
                        2,
                        "Could not find parent Processor matching attributes: "
                        + parent_dict +
                        ", nor child Processor matching attributes: " +
                        child_dict))

        return issues, None
Пример #9
0
def get_interfaces(glb_idx: PartialRetrievalDictionary) -> pd.DataFrame:
    # Used to examine "value" as expression, and find variables that are interface names vs parameter names
    params = create_dictionary(
        data={p.name: None
              for p in glb_idx.get(Parameter.partial_key())})
    s = State()
    procs = glb_idx.get(Processor.partial_key())
    d = {}
    for p in procs:
        parent_relations = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key(child=p))
        d[p.ident] = set([p.parent_processor.ident for p in parent_relations])

    lst = [[
        "Processor", "InterfaceType", "Interface", "Sphere", "RoegenType",
        "Orientation", "OppositeSubsystemType", "GeolocationRef",
        "GeolocationCode", "InterfaceAttributes", "Value", "Unit",
        "RelativeTo", "Uncertainty", "Assessment", "PedigreeMatrix",
        "Pedigree", "Time", "Source", "NumberAttributes", "Comments"
    ]]
    # Elaborate a DAG, then iterate over it
    for ident in list(toposort.toposort_flatten(d)):
        p = glb_idx.get(Processor.partial_key(ident=ident))[0]
        ifaces = glb_idx.get((Factor.partial_key(processor=p)))
        iface_names = create_dictionary(
            data={iface.name: iface
                  for iface in ifaces})
        # Elaborate DAG of Interfaces because of Observations
        d = {}
        for iface in ifaces:
            if iface.ident not in d:
                d[iface.ident] = set()
            for obs in iface.quantitative_observations:
                if obs.relative_factor:
                    d[iface.ident].add(obs.relative_factor.ident)
                # Consider obs.value and non linear dependencies
                if isinstance(obs.value, str):
                    ast = string_to_ast(expression_with_parameters, obs.value)
                    evaluation_issues = []
                    value, unresolved_vars = ast_evaluator(
                        exp=ast,
                        state=s,
                        obj=None,
                        issue_lst=evaluation_issues)
                    for unresolved in unresolved_vars:
                        if unresolved not in params:
                            d[iface.ident].add(iface_names[unresolved].ident)

        for ident2 in list(toposort.toposort_flatten(d)):
            iface = glb_idx.get(Factor.partial_key(ident=ident2))[0]
            lst1 = [
                iface.processor.name, iface.taxon.name, iface.name,
                iface.sphere, iface.roegen_type.name, iface.orientation,
                iface.opposite_processor_type, "", "", ""
            ]
            observations = iface.quantitative_observations
            if len(observations) > 0:
                for obs in observations:
                    lst2 = [
                        obs.value,
                        obs.attributes.get("unit",
                                           ""), obs.relative_factor.name
                        if obs.relative_factor else "",
                        obs.attributes.get("spread", ""),
                        obs.attributes.get("assessment", ""),
                        obs.attributes.get("pedigree_template", ""),
                        obs.attributes.get("pedigree", ""),
                        obs.attributes.get("time", ""),
                        obs.observer.name if obs.observer else "", "",
                        obs.attributes.get("comments", "")
                    ]
                    lst.append(lst1 + lst2)
            else:
                lst.append(lst1 + ["", "", "", "", "", "", "", "", "", ""])

    return list_to_dataframe(lst)
Пример #10
0
def export_model_to_xml(
        registry: PartialRetrievalDictionary
) -> Tuple[str, Dict[str, Processor]]:
    """
    Elaborate an XML string containing the nested processors and their attributes.
    Also the interfaces inside processors
    <processors>
      <root_p1 fullname="" level="" system="" subsystem="" functional="true|false">
        <interfaces>
          <i1 type="" sphere="" roegen_type="" orientation="" opposite_processor_type="" />
          ...
        </interfaces>
        <child_p2>
          ...
        </child_p2>
      </root_p1>
      ...
    </processors>

    Example (abstract):

    '/processors//[level="n"]'

    :param registry:
    :return:
    """
    def xml_processor(p: Processor, registry: PartialRetrievalDictionary,
                      p_map: Dict[str, Processor]):
        """
        Return the XML of a processor
        Recursive into children

        :param p:
        :return:
        """
        def xml_interface(iface: Factor):
            """

            :param iface:
            :return:
            """
            s = f'<{iface.name} type="{iface.taxon.name}" sphere="{iface.sphere}" ' \
                f'roegen_type="{iface.roegen_type}" orientation="{iface.orientation}" ' \
                f'opposite_processor_type="{iface.opposite_processor_type}" />'
            if case_sensitive:
                return s
            else:
                return s.lower()

        children = p.children(registry)
        full_name = p.full_hierarchy_names(registry)[0]
        if case_sensitive:
            p_map[full_name] = p
        else:
            p_map[full_name.lower()] = p

        s = f"""
    <{p.name} fullname="{full_name}" level="{p.level}" system="{p.processor_system}" subsystem="{p.subsystem_type}" functional="{"true" if strcmp(p.functional_or_structural, "Functional") else "false"}" >
        <interfaces>
        {chr(10).join([xml_interface(f) for f in p.factors])}
        </interfaces>
        {chr(10).join([xml_processor(c, registry, p_map) for c in children])}    
    </{p.name}>"""
        if case_sensitive:
            return s
        else:
            return s.lower()

    # Part of relationships
    por = registry.get(ProcessorsRelationPartOfObservation.partial_key())

    # Set of all instance processors NOT touched by part-of relationships
    unaffected_procs = set([
        p for p in registry.get(Processor.partial_key())
        if strcmp(p.instance_or_archetype, "Instance")
    ])
    for po in por:
        try:
            unaffected_procs.remove(po.parent_processor)
        except KeyError:
            pass
        try:
            unaffected_procs.remove(po.child_processor)
        except KeyError:
            pass

    # Keep those affecting Instance processors
    por = [
        po for po in por
        if strcmp(po.parent_processor.instance_or_archetype, "Instance")
    ]

    # Get root processors (set of processors not appearing as child_processor)
    parents = set([po.parent_processor for po in por])
    children = set([po.child_processor for po in por])
    roots = parents.difference(children).union(unaffected_procs)
    # leaves = children.difference(parents)
    result = '<processors>'  # <?xml version="1.0" encoding="utf-8"?>\n
    p_map = {}
    for p in roots:
        result += xml_processor(p, registry, p_map)
    result += "\n</processors>"

    return result, p_map
Пример #11
0
    def test_005_execute_file_five(self):
        """
        Just Structure. From Soslaires.

        :return:
        """
        file_path = os.path.dirname(
            os.path.abspath(__file__)) + "/z_input_files/Soslaires.xlsx"
        isess = execute_file(file_path, generator_type="spreadsheet")
        # # Save state
        s = serialize_state(isess.state)
        # Changed "wt" to "wb": the output of "serialize_state" is a byte array (it is compressed now)
        with open("/home/rnebot/GoogleDrive/AA_MAGIC/Soslaires.serialized",
                  "wb") as f:
            f.write(s)
        local_state = deserialize_state(s)
        # Check State of things
        glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects(
            local_state)
        # Four processor sets
        self.assertEqual(len(p_sets), 4)
        # Obtain all Observers
        print("---- Observer ----")
        oers = glb_idx.get(Observer.partial_key())
        for i in oers:
            print(i.name)
        # Obtain all processors
        print("---- Processor ----")
        procs = glb_idx.get(Processor.partial_key())
        for i in procs:
            print(i.name)
        # Obtain all FactorTypes
        print("---- FactorType ----")
        fts = glb_idx.get(FactorType.partial_key())
        for i in fts:
            print(i.name)
        # Obtain all Factors
        print("---- Factor ----")
        fs = glb_idx.get(Factor.partial_key())
        for i in fs:
            print(i.processor.name + ":" + i.taxon.name)
        # Obtain all Quantitative Observations
        print("---- Quantities ----")
        qqs = glb_idx.get(FactorQuantitativeObservation.partial_key())
        for i in qqs:
            print(i.factor.processor.name + ":" + i.factor.taxon.name + "= " +
                  str(i.value.expression if i.value else ""))
        # Obtain all part-of Relation Observations
        print("---- Part-of relations (P-P) ----")
        po_rels = glb_idx.get(
            ProcessorsRelationPartOfObservation.partial_key())
        for i in po_rels:
            print(i.parent_processor.name + " \/ " + i.child_processor.name)
        # Obtain all undirected flow Relation Observations
        print("---- Undirected flow relations (P-P) ----")
        uf_rels = glb_idx.get(
            ProcessorsRelationUndirectedFlowObservation.partial_key())
        for i in uf_rels:
            print(i.source_processor.name + " <> " + i.target_processor.name)
        # Obtain all upscale Relation Observations
        print("---- Upscale relations (P-P) ----")
        up_rels = glb_idx.get(
            ProcessorsRelationUpscaleObservation.partial_key())
        for i in up_rels:
            print(i.parent_processor.name + " \/ " + i.child_processor.name +
                  "(" + i.factor_name + ": " + str(i.quantity) + ")")
        # Obtain all directed flow Relation Observations
        print("---- Directed flow relations (F-F) ----")
        df_rels = glb_idx.get(
            FactorsRelationDirectedFlowObservation.partial_key())
        for i in df_rels:
            print(i.source_factor.processor.name + ":" +
                  i.source_factor.taxon.name + " -> " +
                  i.target_factor.processor.name + ":" +
                  i.target_factor.taxon.name +
                  (" (" + str(i.weight) + ")" if i.weight else ""))
        # Obtain all hierarchies
        print("---- FactorType Hierarchies ----")
        hies = glb_idx.get(Hierarchy.partial_key())
        for i in hies:
            print(i.name)
        # Close interactive session
        isess.close_db_session()
Пример #12
0
def prepare_model(state) -> NoReturn:
    """
    Modify the state so that:
    * Implicit references of Interfaces to subcontexts are materialized
      * Creating processors
      * Creating interfaces in these processors
      * Creating relationships in these processors

    :param state:
    """

    # TODO: currently when an interface is defined as a Scale from two or more interfaces, the computed values are
    #  added while the intuition tells us that only one scale should be defined. We have to give a warning message
    #  if this situation happens.

    # Registry and the other objects also
    glb_idx, _, _, _, _ = get_case_study_registry_objects(state)
    # Prepare a Query to obtain ALL interfaces
    query = BasicQuery(state)
    filt = {}
    objs = query.execute([Factor], filt)
    for iface in objs[Factor]:  # type: Factor
        if strcmp(
                iface.processor.instance_or_archetype, 'Archetype') or strcmp(
                    iface.processor.instance_or_archetype, 'No'):
            continue

        # If the Interface is connected to a "Subcontext" different than the owning Processor
        if iface.opposite_processor_type:
            if iface.opposite_processor_type.lower(
            ) != iface.processor.subsystem_type.lower():
                # Check if the interface has flow relationships
                # TODO An alternative is to search "observations" of type FactorsRelationDirectedFlowObservation
                #      in the same "iface"

                if iface.orientation.lower() == "input":
                    parameter = {"target": iface}
                else:
                    parameter = {"source": iface}

                relations = glb_idx.get(
                    FactorsRelationDirectedFlowObservation.partial_key(
                        **parameter))

                # If it does not have flow relationships:
                #  * define default Processor name and retrieve it (or if it does not exist, create it)
                #  * create an Interface into that Processor and a Flow Relationship
                if len(relations) == 0:
                    # Define the name of a Processor in the same context but in different subcontext
                    p_name = iface.processor.processor_system + "_" + iface.opposite_processor_type
                    p = glb_idx.get(Processor.partial_key(p_name))
                    if len(p) == 0:
                        attributes = {
                            'subsystem_type': iface.opposite_processor_type,
                            'processor_system':
                            iface.processor.processor_system,
                            'functional_or_structural': 'Functional',
                            'instance_or_archetype': 'Instance'
                            # 'stock': None
                        }

                        p = Processor(p_name, attributes=attributes)
                        glb_idx.put(p.key(), p)
                    else:
                        p = p[0]

                    attributes = {
                        'sphere':
                        'Technosphere'
                        if iface.opposite_processor_type.lower()
                        in ["local", "external"] else 'Biosphere',
                        'roegen_type':
                        iface.roegen_type,
                        'orientation':
                        "Input"
                        if iface.orientation.lower() == "output" else "Output",
                        'opposite_processor_type':
                        iface.processor.subsystem_type
                    }

                    # Create Interface (if it does not exist)
                    if not p.factors_find(iface.taxon.name):
                        f = Factor.create_and_append(
                            name=iface.taxon.name,
                            processor=p,
                            in_processor_type=FactorInProcessorType(
                                external=False,
                                incoming=iface.orientation.lower() ==
                                "output"),
                            attributes=attributes,
                            taxon=iface.taxon)

                        glb_idx.put(f.key(), f)

                    # Create Flow Relationship
                    if iface.orientation.lower() == "output":
                        source = iface
                        target = f
                    else:
                        source = f
                        target = iface

                    fr = FactorsRelationDirectedFlowObservation.create_and_append(
                        source=source, target=target, observer=None)
                    glb_idx.put(fr.key(), fr)
Пример #13
0
    def _process_row(self, field_values: Dict[str, Any], subrow=None) -> None:

        # Transform text of "attributes" into a dictionary
        if field_values.get("attributes"):
            try:
                field_values["attributes"] = dictionary_from_key_value_list(
                    field_values["attributes"], self._glb_idx)
            except Exception as e:
                self._add_issue(IType.ERROR,
                                str(e) + subrow_issue_message(subrow))
                return
        else:
            field_values["attributes"] = {}

        # Process specific fields

        # Obtain the parent: it must exist. It could be created dynamically but it's important to specify attributes
        if field_values.get("parent_processor"):
            try:
                parent_processor = self._get_processor_from_field(
                    "parent_processor")
                # parents = find_processors_matching_name(parent_processor)
                # if len(parents) > 1:
                #     self._add_issue(IType.WARNING,
                #                     f"Parent processor '{parent_processor}' not unique. Matches: {', '.join(p.hierarchical_names[0] for p in parents)}. Skipped." + subrow_issue_message(subrow))
                #     return
            except CommandExecutionError:
                self._add_issue(
                    IType.ERROR,
                    f"Specified parent processor, '{field_values.get('parent_processor')}', does not exist"
                    + subrow_issue_message(subrow))
                return
        else:
            parent_processor = None

        behave_as_processor: Optional[Processor] = None
        if field_values.get("behave_as_processor"):
            try:
                behave_as_processor = self._get_processor_from_field(
                    "behave_as_processor")
            except CommandExecutionError:
                self._add_issue(
                    IType.WARNING,
                    f"Specified 'behave as' processor, '{field_values.get('behave_as_processor')}', does not exist, value ignored"
                    + subrow_issue_message(subrow))

        # Find or create processor and REGISTER it in "glb_idx"
        # TODO Now, only Simple name allowed
        # TODO Improve allowing hierarchical names, and hierarchical names with wildcards
        pgroup = field_values.get("processor_group")

        # Get internal and user-defined attributes in one dictionary
        attributes = {
            c.name: field_values[c.name]
            for c in self._command_fields if c.attribute_of == Processor
        }
        attributes.update(field_values["attributes"])
        attributes["processor_group"] = pgroup

        # Needed to support the new name of the field, "Accounted" (previously it was "InstanceOrArchetype")
        # (internally the values have the same meaning, "Instance" for a processor which has to be accounted,
        # "Archetype" for a processor which hasn't)
        v = attributes.get("instance_or_archetype", None)
        if strcmp(v, "Yes"):
            v = "Instance"
        elif strcmp(v, "No"):
            v = "Archetype"
        if v:
            attributes["instance_or_archetype"] = v

        name = field_values["processor"]
        p_names, _ = obtain_name_parts(name)

        geolocation = Geolocation.create(field_values["geolocation_ref"],
                                         field_values["geolocation_code"])

        ps = find_processors_matching_name(name, self._glb_idx)
        more_than_one = len(ps) > 1
        simple = len(p_names) == 1
        exists = True if len(ps) == 1 else False
        # SIMPLE? EXISTS? PARENT? ACTION:
        # Yes     Yes     Yes     NEW; HANG FROM PARENT
        # Yes     Yes     No      Warning: repeated
        # Yes     No      Yes     NEW; HANG FROM PARENT
        # Yes     No      No      NEW
        # No      Yes     Yes     Warning: cannot hang from parent
        # No      Yes     No      Warning: repeated AND not simple not allowed
        # No      No      Yes     Warning: cannot create more than one processor AND not simple not allowed
        # No      No      No      Warning: cannot create more than one processor AND not simple not allowed

        create_new = False
        if not simple:
            if not parent_processor:
                self._add_issue(
                    IType.WARNING,
                    f"When a processor does not have parent, the name must be simple. Skipped."
                    + subrow_issue_message(subrow))
                return
        else:
            if exists and not parent_processor:
                self._add_issue(
                    IType.WARNING,
                    f"Repeated declaration of {name}. Skipped." +
                    subrow_issue_message(subrow))
                return
            create_new = True

        if create_new:
            p = find_or_create_processor(state=self._glb_idx,
                                         name=name,
                                         proc_attributes=attributes,
                                         proc_location=geolocation)
        else:
            if exists:
                p = ps[0]

        # Add to ProcessorsGroup, if specified
        if pgroup:
            p_set = self._p_sets.get(pgroup, ProcessorsSet(pgroup))
            self._p_sets[pgroup] = p_set
            if p_set.append(
                    p, self._glb_idx
            ):  # Appends codes to the pset if the processor was not member of the pset
                p_set.append_attributes_codes(field_values["attributes"])

        # If geolocation specified, check if it exists
        # Inside it, check it the code exists
        if p.geolocation and p.geolocation.reference:
            # Geographical reference
            gr = self._glb_idx.get(
                GeographicReference.partial_key(name=p.geolocation.reference))
            if len(gr) == 0:
                self._add_issue(
                    IType.ERROR,
                    f"Geographical reference {p.geolocation.reference} not found "
                    + subrow_issue_message(subrow))
                return
            if p.geolocation.reference and not p.geolocation.code:
                self._add_issue(
                    IType.ERROR,
                    f"Geographical reference was specified but not the code in it "
                    + subrow_issue_message(subrow))
                return
            geo_id = p.geolocation.code
            try:
                url = gr[0].attributes["data_location"]
            except:
                self._add_issue(
                    IType.ERROR,
                    f"URL not found in geographical reference {p.geolocation.reference} "
                    + subrow_issue_message(subrow))
                return
            try:
                j, ids = read_geojson(
                    url
                )  # READ the file!! (or get it from cache). Could take some time...
            except:
                self._add_issue(
                    IType.ERROR,
                    f"URL {url} in reference {p.geolocation.reference} could not be read "
                    + subrow_issue_message(subrow))
                return
            if geo_id not in ids:
                self._add_issue(
                    IType.WARNING,
                    f"Could not find code {geo_id} in file {url}, geographical reference {p.geolocation.reference} "
                    + subrow_issue_message(subrow))

        # Add Relationship "part-of" if parent was specified
        # The processor may have previously other parent processors that will keep its parentship
        if parent_processor:
            # Create "part-of" relationship
            if len(
                    self._glb_idx.get(
                        ProcessorsRelationPartOfObservation.partial_key(
                            parent_processor, p))) > 0:
                self._add_issue(
                    IType.WARNING,
                    f"{p.name} is already part-of {parent_processor.name}. Skipped."
                    + subrow_issue_message(subrow))
                return

            o1 = ProcessorsRelationPartOfObservation.create_and_append(
                parent_processor,
                p,
                None,
                behave_as=behave_as_processor,
                weight=field_values.get("parent_processor_weight"))  # Part-of
            self._glb_idx.put(o1.key(), o1)
            for hname in parent_processor.full_hierarchy_names(self._glb_idx):
                p_key = Processor.partial_key(f"{hname}.{p.name}", p.ident)
                if attributes:
                    p_key.update({
                        k: ("" if v is None else v)
                        for k, v in attributes.items()
                    })
                self._glb_idx.put(p_key, p)