コード例 #1
0
    def __init__(self, session_factory):
        # Session factory with access to business logic database
        self._session_factory = session_factory

        # Interactive session ID
        self._guid = str(uuid.uuid4())

        # User identity, if given (can be an anonymous session)
        self._identity = None  # type: Identity
        self._state = State()  # To keep the state
        self._reproducible_session = None  # type: ReproducibleSession
コード例 #2
0
def serialize_state(state: State):
    """
    Serialization prepared for a given organization of the state

    :return:
    """
    def serialize_dataframe(df):
        return df.to_json(orient="split")  # list(df.index.names), df.to_dict()

    print("  serialize_state IN")

    import copy
    # "_datasets"
    ns_ds = {}
    # Save and nullify before deep copy
    for ns in state.list_namespaces():
        _, _, _, datasets, _ = get_case_study_registry_objects(state, ns)
        ns_ds[ns] = datasets
        state.set("_datasets", create_dictionary(), ns)  # Nullify datasets

    # !!! WARNING: It destroys "state", so a DEEP COPY is performed !!!
    tmp = sys.getrecursionlimit()
    sys.setrecursionlimit(10000)
    state2 = copy.deepcopy(state)
    sys.setrecursionlimit(tmp)

    # Iterate all namespaces
    for ns in state2.list_namespaces():
        glb_idx, p_sets, hh, _, mappings = get_case_study_registry_objects(
            state2, ns)
        if glb_idx:
            tmp = glb_idx.to_pickable()
            state2.set("_glb_idx", tmp, ns)
        datasets = ns_ds[ns]
        # TODO Serialize other DataFrames.
        # Process Datasets
        for ds_name in datasets:
            ds = datasets[ds_name]
            if isinstance(ds.data, pd.DataFrame):
                tmp = serialize_dataframe(ds.data)
            else:
                tmp = None
                # ds.data = None
            # DB serialize the datasets
            lst2 = serialize(ds.get_objects_list())
            lst2.append(tmp)  # Append the serialized DataFrame
            datasets[ds_name] = lst2
        state2.set("_datasets", datasets, ns)
    tmp = serialize_from_object(
        state2)  # <<<<<<<< SLOWEST !!!! (when debugging)
    print("  serialize_state length: " + str(len(tmp)) + " OUT")

    return tmp
コード例 #3
0
def create_scales_graph(
        scale_relations: List[FactorsRelationScaleObservation]) -> nx.DiGraph:
    """
    This graph is used to put interfaces into scale, possibly in cascade (an Interface should not receive more than one scale).
    The scaling can then be updated for each scenario (parameters) and values can depend also on the TIME-PERIOD.
    Interfaces not involved in the graph may be removed from it.

    ADDITIONAL METHOD (set_update_scales_graph):
    * Set values, parameters, and evaluate scale expression. Initial values in the middle of a scaling chain are rejected.
      Only allowed at the beginning of a scale chain. The "scale_origin" property is set for nodes starting one of these chains.
    * Solve scales

    :param scale_relations:
    :return: nx.DiGraph (with the "scales graph")
    """
    G = nx.DiGraph()  # Start a graph

    # Auxiliary sets:
    # - "origin" only (origin-destination) nodes are sources for scale.
    # - "destination" nodes can only appear once as destination
    origin_interfaces: Set[Factor] = set()
    destination_interfaces: Set[Factor] = set()
    state = State(
    )  # Empty State, just to be able to evaluate expressions and detect needed parameters

    for relation in scale_relations:
        value, ast, _, issues = evaluate_numeric_expression_with_parameters(
            relation.quantity, state)
        if value:
            ast = None

        assert (len(issues) == 0)

        # Add an edge per i2i scale relation
        G.add_edge(relation.origin,
                   relation.destination,
                   rel=relation,
                   ast=ast,
                   value=value)

        # Nodes appearing in scale are not to be removed
        # G[s.origin]["remove"] = False
        # G[s.destination]["remove"] = False

        # Nodes appearing more than once as destination is a construction error
        if relation.destination in destination_interfaces:
            raise Exception(
                "An Interface should not appear in more than one Scale relation as destination"
            )

        origin_interfaces.add(relation.origin)
        destination_interfaces.add(relation.destination)

    # "Beginning" nodes are marked
    nx.set_node_attributes(G, False, "beginning")
    for i in origin_interfaces.difference(destination_interfaces):
        G.nodes[i]["beginning"] = True

    return G
コード例 #4
0
ファイル: json.py プロジェクト: rdglpz/nis-backend
def export_model_to_json(state: State) -> str:

    # Get Metadata dictionary
    metadata = state.get("_metadata")
    metadata_string = None
    if metadata:
        # Change lists of 1 element to a simple value
        metadata = {k: v[0] if len(v) == 1 else v for k, v in metadata.items()}
        metadata_string = '"Metadata": ' + json.dumps(metadata,
                                                      cls=CustomEncoder)

    print(metadata_string)

    json_structure: JsonStructureType = OrderedDict({
        "Parameters":
        Parameter,
        "CodeHierarchies":
        Hierarchy,
        "Observers":
        Observer,
        "InterfaceTypes":
        FactorType,
        "InterfaceTypeConverts":
        FactorTypesRelationUnidirectionalLinearTransformObservation,
        "Processors":
        Processor,
        "Relationships":
        OrderedDict({
            "PartOf": ProcessorsRelationPartOfObservation,
            "Upscale": ProcessorsRelationUpscaleObservation,
            "IsA": ProcessorsRelationIsAObservation,
            "DirectedFlow": FactorsRelationDirectedFlowObservation
        })
    })

    # Get objects from state
    query = BasicQuery(state)
    objects = query.execute(values_of_nested_dictionary(json_structure),
                            filt="")

    json_string = create_json_string_from_objects(objects, json_structure)

    print(json_string)

    if metadata_string:
        json_string = f'{metadata_string}, {json_string}'

    return f'{{{json_string}}}'
コード例 #5
0
def get_observations_by_time(
    prd: PartialRetrievalDictionary
) -> Dict[str, List[Tuple[float, FactorQuantitativeObservation]]]:
    """
    Process All QQ observations (intensive or extensive):
    * Store in a compact way (then clear), by Time-period, by Interface, by Observer.
    * Convert to float or prepare AST
    * Store as value the result plus the QQ observation (in a tuple)

    :param prd:
    :param relative: True->a QQ observation relative to the value of another interface
    :return: another PartialRetrievalDictionary, the Observers and the Time Periods (indexed)
    """
    observations: Dict[str, List[Tuple[float,
                                       FactorQuantitativeObservation]]] = {}
    state = State()

    # Get all observations by time
    for observation in find_quantitative_observations(
            prd, processor_instances_only=True):

        # Try to evaluate the observation value
        value, ast, _, issues = evaluate_numeric_expression_with_parameters(
            observation.value, state)

        # Store: (Value, FactorQuantitativeObservation)
        time = observation.attributes["time"]
        if time not in observations:
            observations[time] = []

        observations[time].append((ifnull(value, ast), observation))

    # Check all time periods are consistent. All should be Year or Month, but not both.
    time_period_type = get_type_from_all_time_periods(list(
        observations.keys()))
    assert (time_period_type in ["Year", "Month"])

    # Remove generic period type and insert it into all specific periods. E.g. "Year" into "2010", "2011" and "2012"
    if time_period_type in observations:
        # Generic monthly ("Month") or annual ("Year") data
        periodic_observations = observations[time_period_type]
        del observations[time_period_type]

        for time in observations:
            observations[time] += periodic_observations

    return observations
コード例 #6
0
def get_observations_OLD(prd: PartialRetrievalDictionary) \
        -> Tuple[PartialRetrievalDictionary, PartialRetrievalDictionary, Dict[str, int]]:
    """
    Process All QQ observations (intensive or extensive):
    * Store in a compact way (then clear), by Time-period, by Interface, by Observer.
    * Convert to float or prepare AST
    * Store as value the result plus the QQ observation (in a tuple)

    :param prd:
    :param relative: True->a QQ observation relative to the value of another interface
    :return: another PartialRetrievalDictionary, the Observers and the Time Periods (indexed)
    """
    observations_prd = PartialRetrievalDictionary()
    relative_observations_prd = PartialRetrievalDictionary()
    time_periods: Dict[str, int] = create_dictionary(
    )  # Dictionary of time periods and the associated IDX
    state = State()

    next_time_period_idx = 0
    for observation in find_quantitative_observations(
            prd, processor_instances_only=True):

        # Obtain time period index
        time = observation.attributes["time"]
        if time not in time_periods:
            time_periods[time] = next_time_period_idx
            next_time_period_idx += 1

        # Elaborate Key: Interface, Time, Observer
        key = dict(__i=observation.factor,
                   __t=time_periods[time],
                   __o=observation.observer)

        value, ast, _, issues = evaluate_numeric_expression_with_parameters(
            observation.value, state)
        if not value:
            value = ast

        # Store Key: (Value, FactorQuantitativeObservation)
        if observation.is_relative:
            relative_observations_prd.put(key, (value, observation))
        else:
            observations_prd.put(key, (value, observation))

    return observations_prd, relative_observations_prd, time_periods
コード例 #7
0
def convert_generator_to_native(generator_type, file_type: str, file):
    """
    Converts a generator
    Creates a generator parser, then it feeds the file type and the file
    The generator parser has to parse the file and to elaborate a native generator (JSON)

    :param generator_type:
    :param file_type:
    :param file:
    :return: Issues and output file
    """

    output = []
    if generator_type.lower() not in ["json", "native", "primitive"]:
        # Create commands generator from factory (from generator_type and file_type)
        state = State()
        commands_generator = commands_container_parser_factory(
            generator_type, file_type, file, state)
        # Loop over the IExecutableCommand instances
        for cmd, issues in commands_generator:
            # If there are syntax ERRORS, STOP!!!
            stop = False
            if issues and len(issues) > 0:
                for t in issues:
                    if t["type"] == 3:  # Error
                        stop = True
                        break

            output.append({
                "command": cmd._serialization_type,
                "label": cmd._serialization_label,
                "content": cmd.json_serialize(),
                "issues": issues
            })
            if stop:
                break

    return output
コード例 #8
0
def persistable_to_executable_command(p_cmd: CommandsContainer, limit=1000):
    """
    A persistable command can be either a single command or a sequence of commands (like a spreadsheet). In the future
    it could even be a full script.

    Because the executable command is DIRECTLY executable, it is not possible to convert from persistable to a single
    executable command. But it is possible to obtain a list of executable commands, and this is the aim of this function

    The size of the list can be limited by the parameter "limit". "0" is for unlimited

    :param p_cmd:
    :return: A list of IExecutableCommand
    """
    # Create commands generator from factory (from generator_type and file_type)
    state = State()
    commands_generator = commands_container_parser_factory(
        p_cmd.generator_type, p_cmd.file_type, p_cmd.file, state)

    # Loop over the IExecutableCommand instances
    issues_aggreg = []
    outputs = []
    count = 0
    for cmd, issues in commands_generator:
        # If there are syntax ERRORS, STOP!!!
        stop = False
        if issues and len(issues) > 0:
            for t in issues:
                if t[0] == 3:  # Error
                    stop = True
        if stop:
            break

        issues_aggreg.extend(issues)

        count += 1
        if count >= limit:
            break
コード例 #9
0
def set_update_scales_graph(
        graph: nx.DiGraph, params: Dict[str, Any],
        beginning_values: Dict[Factor, Tuple[Any,
                                             FactorQuantitativeObservation]]):
    """
    For a scaling graph:
     - set both the parameters and the values of Interfaces beginning scale-chains
     - update the scale chains accordingly

    :param graph: Graph with all scale chains
    :param params: Parameters to apply to values in edges and nodes of the graph
    :param beginning_values: Expressions (plus "unit") for beginning nodes of the graph
    :return: Nothing (the graph is updated in-place)
    """

    # Set of all nodes.
    all_interfaces = set(graph.nodes)
    # Set of "scale beginning" nodes.
    beginning_interfaces = get_scale_beginning_interfaces(graph)
    # Set of "scale following" nodes
    following_interfaces = all_interfaces.difference(beginning_interfaces)

    # Set of nodes with value
    interfaces_with_value = set(beginning_values.keys())

    # Check that all beginning interfaces have been defined (have a value)
    mandatory_to_define_all_beginning_interfaces = False

    if mandatory_to_define_all_beginning_interfaces:
        interfaces_which_should_have_a_value = beginning_interfaces.difference(
            interfaces_with_value)
        if interfaces_which_should_have_a_value:
            s = ", ".join([
                i.processor.name + ":" + i.name
                for i in interfaces_which_should_have_a_value
            ])
            raise Exception(
                "Not all scale beginning Interfaces have been assigned a value: "
                + s)

    # "following" interfaces in scale-chains should not have a value
    interfaces_which_should_not_have_a_value = following_interfaces.intersection(
        interfaces_with_value)

    if interfaces_which_should_not_have_a_value:
        s = ", ".join([
            i.processor.name + ":" + i.name
            for i in interfaces_which_should_not_have_a_value
        ])
        raise Exception(
            "Interfaces in scale chains cannot have assigned values: " + s)

    # Now expressions. First, prepare "state"
    state = State()
    state.update(params)

    # Evaluate (AST) all expressions from the INTERSECTION
    defined_beginning_interfaces = beginning_interfaces.intersection(
        interfaces_with_value)
    for i in defined_beginning_interfaces:
        expression = beginning_values[i][0]
        unit = ureg(beginning_values[i][1].attributes["unit"])
        v, _, _, issues = evaluate_numeric_expression_with_parameters(
            expression, state)
        if not v:
            raise Exception(
                f"Could not evaluate expression '{expression}': {', '.join(issues)}"
            )
        else:
            graph.nodes[i]["value"] = v * unit

    # Evaluate all edges
    for u, v, data in graph.edges(data=True):
        ast = data["ast"]
        if ast:
            v, _, _, issues = evaluate_numeric_expression_with_parameters(
                ast, state)
            if not v:
                raise Exception(
                    f"Could not evaluate edge scale expression '{ast}' for edge ({u.name}->{v.name}): {', '.join(issues)}"
                )
            else:
                graph.edges[u, v]["value"] = v

    # Now, compute values in nodes
    def compute_scaled_nodes(nodes):
        for i in nodes:
            val = graph.nodes[i]["value"]
            tmp = []
            for suc in graph.successors(i):
                # TODO Consider unit conversions, or the unit of the predecessor is inherited?
                graph.nodes[suc]["value"] = graph.nodes[i][
                    "value"] * graph.edges[i, suc]["value"]
                tmp.append(suc)
            compute_scaled_nodes(tmp)

    compute_scaled_nodes(defined_beginning_interfaces)
コード例 #10
0
    def open(self,
             session_factory,
             uuid_: str = None,
             recover_previous_state=True,
             cr_new: CreateNew = CreateNew.NO,
             allow_saving=True):
        """
        Open a work session

    +--------+--------+---------+-----------------------------------------------------------------------------------+
    | UUID   | cr_new | recover |        Behavior                                                                   |
    +--------+--------+---------+-----------------------------------------------------------------------------------+ 
    | !=None | True   | True    | Create new CS or version (branch) from "UUID", clone WS, recover State, append WS |
    | !=None | True   | False   | Create new CS or version (branch) from "UUID", Zero State, first WS               |
    | !=None | False  | True    | Recover State, append WS                                                          |
    | !=None | False  | False   | Zero State, append WS (overwrite type)                                            |
    | ==None | -      | -       | New CS and version, Zero State, first WS                                          |
    +--------+--------+---------+-----------------------------------------------------------------------------------+
    Use cases:
    * A new case study, from scratch. uuid_=None
    * A new case study, copied from another case study. uuid_=<something>, cr_new=CreateNew.CASE_STUDY, recover_previous_state=True
    * A new version of a case study
      - Copying previous version
      - Starting from scratch
    * Continue a case study version
      - But restart (from scratch)
    * Can be a Transient session

        :param uuid_: UUID of the case study or case study version. Can be None, for new case studies or for testing purposes.
        :param recover_previous_state: If an existing version is specified, it will recover its state after execution of all command_executors
        :param cr_new: If != CreateNew.NO, create either a case study or a new version. If == CreateNew.NO, append session to "uuid"
        :param allow_saving: If True, it will allow saving at the end (it will be optional). If False, trying to save will generate an Exception
        :return UUID of the case study version in use. If it is a new case study and it has not been saved, the value will be "None"
        """

        # TODO Just register for now. But in the future it should control that there is no other "allow_saving" ReproducibleSession opened
        # TODO for the same Case Study Version. So it implies modifying some state in CaseStudyVersion to have the UUID
        # TODO of the active ReproducibleSession, even if it is not in the database. Register also the date of "lock", so the
        # TODO lock can be removed in case of "hang" of the locker ReproducibleSession
        self._allow_saving = allow_saving
        self._sess_factory = session_factory
        session = self._sess_factory()
        if uuid_:
            uuid_ = str(uuid_)
            # Find UUID. Is it a Case Study or a Case Study version?
            # If it is the former, look for the active version.
            cs = session.query(CaseStudy).filter(
                CaseStudy.uuid == uuid_).first()
            if not cs:
                vs = session.query(CaseStudyVersion).filter(
                    CaseStudyVersion.uuid == uuid_).first()
                if not vs:
                    ss = session.query(CaseStudyVersionSession).filter(
                        CaseStudyVersionSession.uuid == uuid_).first()
                    if not ss:
                        raise Exception(
                            "Object '" + uuid_ +
                            "' not found, when opening a ReproducibleSession")
                    else:
                        vs = ss.version
                        cs = vs.case_study
                else:
                    cs = vs.case_study
            else:  # A case study, find the latest version (the version modified latest -by activity, newest ReproducibleSession-)
                max_date = None
                max_version = None
                for v in cs.versions:
                    for s in v.sessions:
                        if not max_date or s.open_instant > max_date:
                            max_date = s.open_instant
                            max_version = v
                vs = max_version
                cs = vs.case_study

            # List of active sessions
            # NOTE: instead of time ordering, the ID is used, assuming sessions with greater ID were created later
            lst = session.query(CaseStudyVersionSession). \
                filter(CaseStudyVersionSession.version_id == vs.id). \
                order_by(CaseStudyVersionSession.id). \
                all()
            idx = 0
            for i, ws in enumerate(lst):
                if ws.restarts:
                    idx = i
            lst = lst[idx:]  # Cut the list, keep only active sessions

            if cr_new != CreateNew.NO:  # Create either a case study or a case study version
                if cr_new == CreateNew.CASE_STUDY:
                    cs = copy.copy(cs)  # New Case Study: COPY CaseStudy
                else:
                    force_load(
                        cs
                    )  # New Case Study Version: LOAD CaseStudy (then version it)
                vs2 = copy.copy(vs)  # COPY CaseStudyVersion
                vs2.case_study = cs  # Assign case study to the new version
                if recover_previous_state:  # If the new version keeps previous state, copy it also
                    vs2.state = vs.state  # Copy state
                    vs2.state_version = vs.state_version
                    for ws in lst:  # COPY active ReproducibleSessions
                        ws2 = copy.copy(ws)
                        ws2.version = vs2
                        for c in ws.commands:  # COPY commands
                            c2 = copy.copy(c)
                            c2.session = ws2
                vs = vs2
            else:
                # Load into memory
                if len(lst) == 1:
                    ws = lst[0]
                    force_load(ws)
                force_load(vs)
                force_load(cs)

            if recover_previous_state:
                # Load state if it is persisted (if not EXECUTE, POTENTIALLY VERY SLOW)
                if vs.state:
                    # Deserialize
                    self._isess._state = deserialize_state(
                        vs.state, vs.state_version)
                else:
                    self._isess._state = State(
                    )  # Zero State, execute all commands in sequence
                    for ws in lst:
                        for c in ws.commands:
                            execute_command_container(self._isess._state, c)
                if cr_new == CreateNew.VERSION:  # TODO Check if this works in all possible circumstances (combine the parameters of the function)
                    recover_previous_state = False
            else:
                self._isess._state = State()

        else:  # New Case Study AND new Case Study Version
            cs = CaseStudy()
            vs = CaseStudyVersion()
            vs.creation_instant = datetime.datetime.utcnow()
            vs.case_study = cs

        # Detach Case Study and Case Study Version
        if cs in session:
            session.expunge(cs)
        if vs in session:
            session.expunge(vs)
        # Create the Case Study Version Session
        usr = session.query(User).filter(User.name == self._identity).first()
        if usr:
            force_load(usr)
        else:
            if allow_saving:
                raise Exception(
                    "A user is required to register which user is authoring a case study"
                )
        # TODO !!!!NEW CODE, ADDED TO SUPPORT NEEDED FUNCTIONALITY. NEEDS BETTER CODING!!!!
        restart = not recover_previous_state if uuid_ else True
        if not restart:
            self._session = ws
        else:
            self._session = CaseStudyVersionSession()
            self._session.version = vs
            self._session.who = usr
            self._session.restarts = True
        # If the Version existed, define "restarts" according to parameter "recover_previous_state"
        # ElseIf it is the first Session -> RESTARTS=True

        session.close()
        # session.expunge_all()
        self._sess_factory.remove()
コード例 #11
0
 def reset_state(self):
     """ Restart state """
     self._state = State()
コード例 #12
0
class InteractiveSession:
    """ 
    Main class for interaction with NIS
    The first thing would be to identify the user and create a GUID for the session which can be used by the web server
    to store and retrieve the interactive session state.
    
    It receives command_executors, modifying state accordingly
    If a reproducible session is opened, 
    """
    def __init__(self, session_factory):
        # Session factory with access to business logic database
        self._session_factory = session_factory

        # Interactive session ID
        self._guid = str(uuid.uuid4())

        # User identity, if given (can be an anonymous session)
        self._identity = None  # type: Identity
        self._state = State()  # To keep the state
        self._reproducible_session = None  # type: ReproducibleSession

    def reset_state(self):
        """ Restart state """
        self._state = State()
        # TODO self._recordable_session = None ??

    @property
    def state(self):
        return self._state

    def get_sf(self):
        return self._session_factory

    def set_sf(self, session_factory):
        self._session_factory = session_factory
        if self._reproducible_session:
            self._reproducible_session.set_sf(session_factory)

    def open_db_session(self):
        return self._session_factory()

    def close_db_session(self):
        self._session_factory.remove()

    def quit(self):
        """
        End interactive session
        :return: 
        """
        self.close_reproducible_session()
        self.close_db_session()

    # --------------------------------------------------------------------------------------------

    def identify(self, identity_information, testing=False):
        """
        Given credentials of some type -identity_information-, link an interactive session to an identity.
        The credentials can vary from an OAuth2 Token to user+password.
        Depending on the type of credentials, invoke a type of "identificator" or other
        An interactive session without identification is allowed to perform a subset of available operations
        
        :param identity_information: 
        :return: True if the identification was successful, False if not 
        """
        # TODO Check the credentials
        if isinstance(identity_information, dict):
            if "user" in identity_information and testing:
                # Check if the user is in the User's table
                session = self._session_factory()
                src = session.query(User).filter(
                    User.name == identity_information["user"]).first()
                # Check if the dataset exists. "ETL" it if not
                # ds = session.query(Dataset).\
                #     filter(Dataset.code == dataset).\
                #     join(Dataset.database).join(Database.data_source).\
                #     filter(DataSource.name == src_name).first()
                force_load(src)
                session.close()
                self._session_factory.remove()
                if src:
                    self._identity = src.name
                    if self._state:
                        self._state.set("_identity", self._identity)

                return src is not None
            elif "token" in identity_information:
                # TODO Validate against some Authentication service
                pass

    def get_identity_id(self):
        return self._identity

    def unidentify(self):
        # TODO The un-identification cannot be done in the following circumstances: any?
        self._identity = None
        if self._state:
            self._state.set("_identity", self._identity)

    # --------------------------------------------------------------------------------------------
    # Reproducible sessions and commands INSIDE them
    # --------------------------------------------------------------------------------------------
    def open_reproducible_session(self,
                                  case_study_version_uuid: str,
                                  recover_previous_state=True,
                                  cr_new: CreateNew = CreateNew.NO,
                                  allow_saving=True):
        self._reproducible_session = ReproducibleSession(self)
        self._reproducible_session.open(self._session_factory,
                                        case_study_version_uuid,
                                        recover_previous_state, cr_new,
                                        allow_saving)

    def close_reproducible_session(self,
                                   issues=None,
                                   output=None,
                                   save=False,
                                   from_web_service=False,
                                   cs_uuid=None,
                                   cs_name=None):
        if self._reproducible_session:
            if save:
                # TODO Save issues AND (maybe) output
                self._reproducible_session.save(from_web_service,
                                                cs_uuid=cs_uuid,
                                                cs_name=cs_name)
            uuid_, v_uuid, cs_uuid = self._reproducible_session.close()
            self._reproducible_session = None
            return uuid_, v_uuid, cs_uuid
        else:
            return None, None, None

    def reproducible_session_opened(self):
        return self._reproducible_session is not None

    @property
    def reproducible_session(self):
        return self._reproducible_session

    # --------------------------------------------------------------

    def execute_executable_command(self, cmd: IExecutableCommand):
        return execute_command(self._state, cmd)

    def register_executable_command(self, cmd: IExecutableCommand):
        self._reproducible_session.register_executable_command(cmd)

    def register_andor_execute_command_generator1(self,
                                                  c: CommandsContainer,
                                                  register=True,
                                                  execute=False):
        """
        Creates a generator parser, then it feeds the file type and the file
        The generator parser has to parse the file and to generate command_executors as a Python generator

        :param generator_type:
        :param file_type:
        :param file:
        :param register: If True, register the command in the ReproducibleSession
        :param execute: If True, execute the command in the ReproducibleSession
        :return:
        """
        if not self._reproducible_session:
            raise Exception(
                "In order to execute a command generator, a work session is needed"
            )
        if not register and not execute:
            raise Exception(
                "More than zero of the parameters 'register' and 'execute' must be True"
            )

        if register:
            self._reproducible_session.register_persistable_command(c)

        if execute:
            c.execution_start = datetime.datetime.now()
            pass_case_study = self._reproducible_session._session.version.case_study is not None
            ret = self._reproducible_session.execute_command_generator(
                c, pass_case_study)
            c.execution_end = datetime.datetime.now()
            return ret
            # Or
            # return execute_command_container(self._state, c)
        else:
            return None

    def register_andor_execute_command_generator(self,
                                                 generator_type,
                                                 file_type: str,
                                                 file,
                                                 register=True,
                                                 execute=False):
        """
        Creates a generator parser, then it feeds the file type and the file
        The generator parser has to parse the file and to generate command_executors as a Python generator

        :param generator_type: 
        :param file_type: 
        :param file: 
        :param register: If True, register the command in the ReproducibleSession
        :param execute: If True, execute the command in the ReproducibleSession
        :return: 
        """

        return self.register_andor_execute_command_generator1(
            CommandsContainer.create(generator_type, file_type, file),
            register, execute)

    # --------------------------------------------------------------------------------------------

    def get_case_studies(self):
        """ Get a list of case studies READABLE by current identity (or public if anonymous) """
        pass

    def get_case_study_versions(self, case_study: str):
        # TODO Check that the current user has READ access to the case study
        pass

    def get_case_study_version(self, case_study_version: str):
        # TODO Check that the current user has READ access to the case study
        pass

    def get_case_study_version_variables(self, case_study_version: str):
        """ A tree of variables, by type: processors, flows """
        pass

    def get_case_study_version_variable(self, case_study_version: str,
                                        variable: str):
        pass

    def remove_case_study_version(self, case_study_version: str):
        pass

    def share_case_study(self, case_study: str, identities: List[str],
                         permission: str):
        pass

    def remove_case_study_share(self, case_study: str, identities: List[str],
                                permission: str):
        pass

    def get_case_study_permissions(self, case_study: str):
        pass

    def export_case_study_version(self, case_study_version: str):
        pass

    def import_case_study(self, file):
        pass
コード例 #13
0
def flow_graph_solver(global_parameters: List[Parameter],
                      problem_statement: ProblemStatement,
                      input_systems: Dict[str, Set[Processor]], state: State):
    """
    * First scales have to be solved
    * Second direct flows
    * Third conversions of flows

    Once flows have been found, Indicators have to be gathered.

    :param global_parameters: Parameters including the default value (if defined)
    :param problem_statement: ProblemStatement object, with scenarios (parameters changing the default)
                              and parameters for the solver
    :param state: State with everything
    :param input_systems: A dictionary of the different systems to be solved
    :return: Issue[]
    """

    glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

    # Initialize dictionaries
    system_flows: Dict[str,
                       Set[FactorsRelationDirectedFlowObservation]] = dict()
    system_scales: Dict[str, Set[FactorsRelationScaleObservation]] = dict()
    system_processor_hierarchies: Dict[str, nx.DiGraph] = dict()
    for s in input_systems:
        system_flows[s] = set()
        system_scales[s] = set()
        system_processor_hierarchies[s] = dict()

    # Handle Interface Types -Scale- relations
    relations_scale_it2it = glb_idx.get(
        FactorTypesRelationUnidirectionalLinearTransformObservation.
        partial_key())

    # Handle Interfaces -Flow- relations
    relations_flow = glb_idx.get(
        FactorsRelationDirectedFlowObservation.partial_key())

    for relation in relations_flow:  # type: FactorsRelationDirectedFlowObservation
        system_flows[relation.source_factor.processor.processor_system].add(
            relation)
        system_flows[relation.target_factor.processor.processor_system].add(
            relation)

    relations_scale = glb_idx.get(
        FactorsRelationScaleObservation.partial_key())

    for relation in relations_scale:  # type: FactorsRelationScaleObservation
        system_scales[relation.origin.processor.processor_system].add(relation)
        system_scales[relation.destination.processor.processor_system].add(
            relation)

    # Handle Processors -PartOf- relations
    relations_part_of = glb_idx.get(
        ProcessorsRelationPartOfObservation.partial_key())

    for relation in relations_part_of:  # type: ProcessorsRelationPartOfObservation
        if relation.parent_processor.instance_or_archetype.lower(
        ) == "instance":
            graph = system_processor_hierarchies[
                relation.parent_processor.processor_system]

            if not graph:
                graph = nx.DiGraph()
                system_processor_hierarchies[
                    relation.parent_processor.processor_system] = graph

            graph.add_edge(
                get_processor_name(relation.child_processor, glb_idx),
                get_processor_name(relation.parent_processor, glb_idx))

    # Get all interface observations. Also resolve expressions without parameters. Cannot resolve expressions
    # depending only on global parameters because some of them can be overridden by scenario parameters.
    observations_by_time = get_observations_by_time(glb_idx)

    if len(observations_by_time) == 0:
        raise Exception(
            f"No observations have been found. The solver has nothing to solve."
        )

    # Split observations into relative and not relative
    observations_by_time_norelative , observations_by_time_relative = \
        split_observations_by_relativeness(observations_by_time)

    # Combine scenario parameters with the global parameters
    scenario_parameters: Dict[str, Dict[str, str]] = \
        {scenario_name: evaluate_parameters_for_scenario(global_parameters, scenario_params)
         for scenario_name, scenario_params in problem_statement.scenarios.items()}

    # SCALES --------------------------

    # Obtain the scale VALUES
    # scales_prd = get_scaled(scenarios=problem_statement.scenarios,
    #                         scenario_params=scenario_parameters,
    #                         relations_scale=glb_idx.get(FactorsRelationScaleObservation.partial_key()),
    #                         observations_by_time=observations_by_time_norelative)

    # FLOWS --------------------------
    for system in input_systems:
        # From Factors IN the context (LOCAL, ENVIRONMENT or OUTSIDE)
        # obtain a basic graph. Signal each Factor as LOCAL or EXTERNAL, and SOCIETY or ENVIRONMENT
        # basic_graph = prepare_interfaces_graph(systems[s][Factor])

        print(f"********************* SYSTEM: {system}")

        # Obtain a flow graph
        flow_graph = FlowGraph()
        part_of_graph = ComputationGraph()

        for relation in system_flows[
                system]:  # type: FactorsRelationDirectedFlowObservation
            flow_graph.add_edge(get_interface_name(relation.source_factor,
                                                   glb_idx),
                                get_interface_name(relation.target_factor,
                                                   glb_idx),
                                weight=relation.weight,
                                reverse_weight=None)

            assert (relation.source_factor.name == relation.target_factor.name)

            # We create another graph only with interfaces in processors with parents
            proc_hierarchy = system_processor_hierarchies[system]

            for interface in [relation.source_factor, relation.target_factor]:

                processor_name = get_processor_name(interface.processor,
                                                    glb_idx)
                interface_full_name = processor_name + ":" + interface.name

                # If "processor" is in the "PartOf" hierarchy AND the "processor:interface" is not being handled yet
                if processor_name in proc_hierarchy and interface_full_name not in part_of_graph.nodes:
                    # Insert into the Computation Graph a copy of the "PartOf" hierarchy of processors
                    # for the specific interface
                    new_edges = [(u + ":" + interface.name,
                                  v + ":" + interface.name)
                                 for u, v in weakly_connected_subgraph(
                                     proc_hierarchy, processor_name).edges]
                    part_of_graph.add_edges(new_edges, 1.0, None)

        comp_graph, issues = flow_graph.get_computation_graph()

        for relation in system_scales[
                system]:  # type: FactorsRelationScaleObservation
            comp_graph.add_edge(get_interface_name(relation.origin, glb_idx),
                                get_interface_name(relation.destination,
                                                   glb_idx),
                                weight=relation.quantity,
                                reverse_weight=None)

        for issue in issues:
            print(issue)

        print(f"****** NODES: {comp_graph.nodes}")

        # for component in nx.weakly_connected_components(part_of_graph.graph):
        #     nx.draw_kamada_kawai(part_of_graph.graph.subgraph(component), with_labels=True)
        #     plt.show()

        # TODO Expand flow graph with it2it transforms

        # Split flow graphs
        for scenario_idx, (scenario_name, scenario) in enumerate(
                problem_statement.scenarios.items()):

            print(f"********************* SCENARIO: {scenario_name}")

            scenario_state = State()
            scenario_state.update(scenario_parameters[scenario_name])

            for time_period, observations in observations_by_time_norelative.items(
            ):

                print(f"********************* TIME PERIOD: {time_period}")

                scales = {
                }  # {fact: val for fact, val in scales_prd.get(dict(__t=time_period, __s=scenario_idx))}

                # Final values are taken from "scales" or from "observations" that need to computed
                graph_params = {}
                for expression, obs in observations:
                    interface_name = get_interface_name(obs.factor, glb_idx)
                    if interface_name not in comp_graph.nodes:
                        print(
                            f"WARNING: observation at interface '{interface_name}' is not taken into account."
                        )
                    else:
                        if scales.get(obs.factor):
                            graph_params[interface_name] = scales[obs.factor]
                        else:
                            value, ast, _, issues = evaluate_numeric_expression_with_parameters(
                                expression, scenario_state)
                            if not value:
                                raise Exception(
                                    f"Cannot evaluate expression '{expression}' for observation at interface '{interface_name}'"
                                )

                            graph_params[interface_name] = value

                # ----------------------------------------------------

                compute_nodes = [
                    n for n in comp_graph.nodes if not graph_params.get(n)
                ]

                # Compute the missing information with the computation graph
                if len(compute_nodes) > 0:

                    print(f"****** UNKNOWN NODES: {compute_nodes}")
                    print(f"****** PARAMS: {graph_params}")

                    conflicts = comp_graph.compute_param_conflicts(
                        set(graph_params.keys()))

                    for s, (param, values) in enumerate(conflicts.items()):
                        print(f"Conflict {s + 1}: {param} -> {values}")

                    combinations = ComputationGraph.compute_param_combinations(
                        conflicts)

                    for s, combination in enumerate(combinations):
                        print(f"Combination {s}: {combination}")

                        filtered_params = {
                            k: v
                            for k, v in graph_params.items()
                            if k in combination
                        }
                        results, _ = comp_graph.compute_values(
                            compute_nodes, filtered_params)

                        results_with_values = {
                            k: v
                            for k, v in results.items() if v
                        }
                        print(f'  results_with_values={results_with_values}')

                        # TODO: work with "part_of_graph"
                        #  - Params: graph_params + results
                        #  - Compute conflicts, combinations
                        #  - For each combination "compute_values"
                else:
                    print(
                        "There aren't nodes with unknown values. Nothing to solve."
                    )

                # TODO Overwrite "obs" with "scales" results
                # TODO Put observations into the flow-graph

                # TODO Put processors into scale (intensive to extensive conversion)
                # scale_unit_processors(flow_graph, params, relative_observations_prd)

                # for sub_fg in nx.weakly_connected_component_subgraphs(flow_graph):
                # TODO Elaborate information flow graph
                #      Cycles allowed?
                # ifg = get_information_flow_graph(sub_fg)
                # TODO Solve information flow graph. From all possible combinations:
                #  bottom-up if top-down USE
                #  bottom-up if top-down DO NOT USE
                #  top-down  if bottom-up USE
                #  top-down  if bottom-up DO NOT USE
                # solve_flow_graph(sub_fg, ifg)  # Each value: Interface, Scenario, Time, Given/Computed -> VALUE (or UNDEFINED)
                # TODO Put results back

        # TODO INDICATORS --- (INSIDE FLOWS)

    return []
コード例 #14
0
        "(Param * 3 >= 0.3) AND (Param2 * 2 <= 0.345)", "cos(Param*3.1415)",
        "'Hola' + Param1"
    ]
    for e in examples:
        try:
            ast = string_to_ast(arith_boolean_expression, e)
            print(ast)
        except:
            print("Incorrect")

    s = "c1 + c30 - c2 - 10"
    res = string_to_ast(hierarchy_expression, s)
    s = "ds.col"
    res = string_to_ast(h_name, s)

    s = State()
    examples = [
        "5-2017", "2017-5", "2017/5", "2017-05 - 2018-01", "2017",
        "5-2017 - 2018-1", "2017-2018", "Year", "Month"
    ]
    for example in examples:
        print(example)
        res = string_to_ast(time_expression, example)
        print(res)
        print(f'Is year = {is_year(example)}')
        print(f'Is month = {is_month(example)}')
        print("-------------------")

    s.set("HH", DottedDict({"Power": {"p": 34.5, "Price": 2.3}}))
    s.set("EN", DottedDict({"Power": {"Price": 1.5}}))
    s.set("HH", DottedDict({"Power": 25}), "ns2")
コード例 #15
0
def flow_graph_solver(global_parameters: List[Parameter],
                      problem_statement: ProblemStatement,
                      input_systems: Dict[str, Set[Processor]], state: State):
    """
    * First scales have to be solved
    * Second direct flows
    * Third conversions of flows

    Once flows have been found, Indicators have to be gathered.

    :param global_parameters: Parameters including the default value (if defined)
    :param problem_statement: ProblemStatement object, with scenarios (parameters changing the default)
                              and parameters for the solver
    :param state: State with everything
    :param input_systems: A dictionary of the different systems to be solved
    :return: Issue[]
    """
    class Edge(NamedTuple):
        src: Factor
        dst: Factor
        weight: Optional[str]

    def add_edges(edges: List[Edge]):
        for src, dst, weight in edges:
            src_name = get_interface_name(src, glb_idx)
            dst_name = get_interface_name(dst, glb_idx)
            if "Archetype" in [
                    src.processor.instance_or_archetype,
                    dst.processor.instance_or_archetype
            ]:
                print(
                    f"WARNING: excluding relation from '{src_name}' to '{dst_name}' because of Archetype processor"
                )
            else:
                relations.add_edge(src_name, dst_name, weight=weight)

    glb_idx, _, _, _, _ = get_case_study_registry_objects(state)

    # Get all interface observations. Also resolve expressions without parameters. Cannot resolve expressions
    # depending only on global parameters because some of them can be overridden by scenario parameters.
    time_observations_absolute, time_observations_relative = get_observations_by_time(
        glb_idx)

    if len(time_observations_absolute) == 0:
        raise Exception(
            f"No absolute observations have been found. The solver has nothing to solve."
        )

    relations = nx.DiGraph()

    # Add Interfaces -Flow- relations (time independent)
    add_edges([
        Edge(r.source_factor, r.target_factor, r.weight) for r in glb_idx.get(
            FactorsRelationDirectedFlowObservation.partial_key())
    ])

    # Add Processors -Scale- relations (time independent)
    add_edges([
        Edge(r.origin, r.destination, r.quantity)
        for r in glb_idx.get(FactorsRelationScaleObservation.partial_key())
    ])

    # TODO Expand flow graph with it2it transforms
    # relations_scale_it2it = glb_idx.get(FactorTypesRelationUnidirectionalLinearTransformObservation.partial_key())

    # First pass to resolve weight expressions: only expressions without parameters can be solved
    for _, _, data in relations.edges(data=True):
        expression = data["weight"]
        if expression:
            value, ast, _, _ = evaluate_numeric_expression_with_parameters(
                expression, state)
            data["weight"] = ifnull(value, ast)

    for scenario_idx, (scenario_name, scenario_params) in enumerate(
            problem_statement.scenarios.items()):

        print(f"********************* SCENARIO: {scenario_name}")

        scenario_state = State()
        scenario_combined_params = evaluate_parameters_for_scenario(
            global_parameters, scenario_params)
        scenario_state.update(scenario_combined_params)

        for time_period, observations in time_observations_absolute.items():

            print(f"********************* TIME PERIOD: {time_period}")

            # Final values are taken from "observations" that need to computed
            graph_params = {}

            # Second and last pass to resolve observation expressions with parameters
            for expression, obs in observations:
                interface_name = get_interface_name(obs.factor, glb_idx)
                if interface_name not in relations.nodes:
                    print(
                        f"WARNING: observation at interface '{interface_name}' is not taken into account."
                    )
                else:
                    value, ast, _, issues = evaluate_numeric_expression_with_parameters(
                        expression, scenario_state)
                    if not value:
                        raise Exception(
                            f"Cannot evaluate expression '{expression}' for observation at "
                            f"interface '{interface_name}'. Issues: {', '.join(issues)}"
                        )
                    graph_params[interface_name] = value

            assert (graph_params is not None)

            # Add Processors internal -RelativeTo- relations (time dependent)
            # Transform relative observations into graph edges
            for expression, obs in time_observations_relative[time_period]:
                relations.add_edge(get_interface_name(obs.relative_factor,
                                                      glb_idx),
                                   get_interface_name(obs.factor, glb_idx),
                                   weight=expression)

            # Second and last pass to resolve weight expressions: expressions with parameters can be solved
            for u, v, data in relations.edges(data=True):
                expression = data["weight"]
                if expression:
                    value, ast, _, _ = evaluate_numeric_expression_with_parameters(
                        expression, scenario_state)
                    if not value:
                        raise Exception(
                            f"Cannot evaluate expression '{expression}' for weight "
                            f"from interface '{u}' to interface '{v}'. Issues: {', '.join(issues)}"
                        )
                    data["weight"] = value

            # ----------------------------------------------------

            if time_period == '2008':
                for component in nx.weakly_connected_components(relations):
                    nx.draw_kamada_kawai(relations.subgraph(component),
                                         with_labels=True)
                    plt.show()

            flow_graph = FlowGraph(relations)
            comp_graph, issues = flow_graph.get_computation_graph()

            for issue in issues:
                print(issue)

            print(f"****** NODES: {comp_graph.nodes}")

            # ----------------------------------------------------

            # Obtain nodes without a value
            compute_nodes = [
                n for n in comp_graph.nodes if not graph_params.get(n)
            ]

            # Compute the missing information with the computation graph
            if len(compute_nodes) == 0:
                print("All nodes have a value. Nothing to solve.")
                return []

            print(f"****** UNKNOWN NODES: {compute_nodes}")
            print(f"****** PARAMS: {graph_params}")

            conflicts = comp_graph.compute_param_conflicts(
                set(graph_params.keys()))

            for s, (param, values) in enumerate(conflicts.items()):
                print(f"Conflict {s + 1}: {param} -> {values}")

            combinations = ComputationGraph.compute_param_combinations(
                conflicts)

            for s, combination in enumerate(combinations):
                print(f"Combination {s}: {combination}")

                filtered_params = {
                    k: v
                    for k, v in graph_params.items() if k in combination
                }
                results, _ = comp_graph.compute_values(compute_nodes,
                                                       filtered_params)

                results_with_values = {k: v for k, v in results.items() if v}
                print(f'  results_with_values={results_with_values}')

                # TODO: work with "part_of_graph"
                #  - Params: graph_params + results
                #  - Compute conflicts, combinations
                #  - For each combination "compute_values"

        # TODO INDICATORS

    # ----------------------------------------------------
    # ACCOUNTING PER SYSTEM

    for system in input_systems:

        # Handle Processors -PartOf- relations
        proc_hierarchy = nx.DiGraph()
        for relation in glb_idx.get(
                ProcessorsRelationPartOfObservation.partial_key(
                )):  # type: ProcessorsRelationPartOfObservation
            if relation.parent_processor.instance_or_archetype == "Instance":
                proc_hierarchy.add_edge(
                    get_processor_name(relation.child_processor, glb_idx),
                    get_processor_name(relation.parent_processor, glb_idx))

        part_of_graph = ComputationGraph()

        # for relation in system_flows[system]:  # type: FactorsRelationDirectedFlowObservation
        #
        #     # We create another graph only with interfaces in processors with parents
        #     for interface in [relation.source_factor, relation.target_factor]:
        #
        #         processor_name = get_processor_name(interface.processor, glb_idx)
        #         interface_full_name = processor_name+":"+interface.name
        #
        #         # If "processor" is in the "PartOf" hierarchy AND the "processor:interface" is not being handled yet
        #         if processor_name in proc_hierarchy and interface_full_name not in part_of_graph.nodes:
        #             # Insert into the Computation Graph a copy of the "PartOf" hierarchy of processors
        #             # for the specific interface
        #             new_edges = [(u+":"+interface.name, v+":"+interface.name)
        #                          for u, v in weakly_connected_subgraph(proc_hierarchy, processor_name).edges]
        #             part_of_graph.add_edges(new_edges, 1.0, None)

        # for component in nx.weakly_connected_components(part_of_graph.graph):
        #     nx.draw_kamada_kawai(part_of_graph.graph.subgraph(component), with_labels=True)
        #     plt.show()

    return []
コード例 #16
0
def evaluate_parameters_for_scenario(base_params: List[Parameter],
                                     scenario_params: Dict[str, str]):
    """
    Obtain a dictionary (parameter -> value), where parameter is a string and value is a literal: number, boolean,
    category or string.

    Start from the base parameters then overwrite with the values in the current scenario.

    Parameters may depend on other parameters, so this has to be considered before evaluation.
    No cycles are allowed in the dependencies, i.e., if P2 depends on P1, P1 cannot depend on P2.
    To analyze this, first expressions are evaluated, extracting which parameters appear in each of them. Then a graph
    is elaborated based on this information. Finally, an algorithm to find cycles is executed.

    :param base_params:
    :param scenario_params:
    :return:
    """
    # Create dictionary without evaluation
    result_params = create_dictionary()
    result_params.update(
        {p.name: p.default_value
         for p in base_params if p.default_value})

    # Overwrite with scenario expressions or constants
    result_params.update(scenario_params)

    state = State()
    known_params = create_dictionary()
    unknown_params = create_dictionary()

    # Now, evaluate ALL expressions
    for param, expression in result_params.items():
        value, ast, params, issues = evaluate_numeric_expression_with_parameters(
            expression, state)
        if not value:  # It is not a constant, store the parameters on which this depends
            if case_sensitive:
                unknown_params[param] = (ast, set(params))
            else:
                unknown_params[param] = (ast, set([p.lower() for p in params]))
        else:  # It is a constant, store it
            result_params[param] = value  # Overwrite
            known_params[param] = value

    cycles = get_circular_dependencies(unknown_params)
    if len(cycles) > 0:
        raise Exception(
            f"Parameters cannot have circular dependencies. {len(cycles)} cycles were detected: "
            f"{':: '.join(cycles)}")

    # Initialize state with known parameters
    state.update(known_params)

    # Loop until no new parameters can be evaluated
    previous_len_unknown_params = len(unknown_params) + 1
    while len(unknown_params) < previous_len_unknown_params:
        previous_len_unknown_params = len(unknown_params)

        for param in list(
                unknown_params
        ):  # A list(...) is used because the dictionary can be modified inside
            ast, params = unknown_params[param]
            if params.issubset(known_params):
                value, _, _, issues = evaluate_numeric_expression_with_parameters(
                    ast, state)
                if not value:
                    raise Exception(
                        f"It should be possible to evaluate the parameter '{param}'. "
                        f"Issues: {', '.join(issues)}")
                else:
                    del unknown_params[param]
                    result_params[param] = value
                    state.set(param, value)

    if len(unknown_params) > 0:
        raise Exception(
            f"Could not evaluate the following parameters: {', '.join(unknown_params)}"
        )

    return result_params
コード例 #17
0
def ast_evaluator(exp: Dict,
                  state: State,
                  obj,
                  issue_lst,
                  evaluation_type="numeric"):
    """
    Numerically evaluate the result of the parse of "expression" rule (not valid for the other "expression" rules)

    :param exp: Dictionary representing the AST (output of "string_to_ast" function)
    :param state: "State" used to obtain variables/objects
    :param obj: An object used when evaluating hierarchical variables. simple names, functions and datasets are considered members of this object
    :param issue_lst: List in which issues have to be annotated
    :param evaluation_type: "numeric" for full evaluation, "static" to return True if the expression can be evaluated
            (explicitly mentioned variables are defined previously)
    :return: value (scalar EXCEPT for named parameters, which return a tuple "parameter name - parameter value"), list of unresolved variables
    """
    val = None
    unresolved_vars = set()
    if "type" in exp:
        t = exp["type"]
        if t in ("int", "float", "str", "boolean"):
            if evaluation_type == "numeric":
                return exp["value"], unresolved_vars
            elif evaluation_type == "static":
                return unresolved_vars
        elif t == "named_parameter":
            # This one returns a tuple (parameter name, parameter value, unresolved variables)
            v, tmp = ast_evaluator(exp["value"], state, obj, issue_lst,
                                   evaluation_type)
            unresolved_vars.update(tmp)
            return exp["param"], v, unresolved_vars
        elif t == "key_value_list":
            d = create_dictionary()
            for k, v in exp["parts"].items():
                d[k], tmp = ast_evaluator(v, state, obj, issue_lst,
                                          evaluation_type)
                unresolved_vars.update(tmp)
            return d, unresolved_vars
        elif t == "dataset":
            # Function parameters and Slice parameters
            func_params = [
                ast_evaluator(p, state, obj, issue_lst, evaluation_type)
                for p in exp["func_params"]
            ]
            slice_params = [
                ast_evaluator(p, state, obj, issue_lst, evaluation_type)
                for p in exp["slice_params"]
            ]

            if evaluation_type == "numeric":
                # Find dataset named "exp["name"]"
                if obj is None:
                    # Global dataset
                    ds = state.get(exp["name"], exp["ns"])
                    if not ds:
                        issue_lst.append(
                            (3,
                             "Global dataset '" + exp["name"] + "' not found"))
                else:
                    # Dataset inside "obj"
                    try:
                        ds = getattr(obj, exp["name"])
                    except:
                        ds = None
                    if not ds:
                        issue_lst.append(
                            (3, "Dataset '" + exp["name"] + "' local to " +
                             str(obj) + " not found"))

                if ds and isinstance(ds, ExternalDataset):
                    return ds.get_data(None, slice_params, None, None,
                                       func_params)
                else:
                    return None
            elif evaluation_type == "static":
                # Find dataset named "exp["name"]"
                if obj is None:
                    # Global dataset
                    ds = state.get(exp["name"], exp["ns"])
                    if not ds:
                        issue_lst.append(
                            (3,
                             "Global dataset '" + exp["name"] + "' not found"))
                    else:
                        ds = True
                else:
                    ds = True  # We cannot be sure it will be found, but do not break the evaluation
                # True if the Dataset is True, and the parameters are True
                return ds and all(func_params) and all(slice_params)
        elif t == "function":  # Call function
            # First, obtain the Parameters
            args = []
            kwargs = {}
            can_resolve = True
            for p in [
                    ast_evaluator(p, state, obj, issue_lst, evaluation_type)
                    for p in exp["params"]
            ]:
                if len(p) == 3:
                    kwargs[p[0]] = p[1]
                    tmp = p[2]
                else:
                    args.append(p[0])
                    tmp = p[1]
                unresolved_vars.update(tmp)
                if len(tmp) > 0:
                    can_resolve = False

            if evaluation_type == "numeric":
                if obj is None:
                    # Check if it can be resolved (all variables specified)
                    # Check if global function exists, then call it. There are no function namespaces (at least for now)
                    if can_resolve and exp["name"] in global_functions:
                        _f = global_functions[exp["name"]]
                        mod_name, func_name = _f["full_name"].rsplit('.', 1)
                        mod = importlib.import_module(mod_name)
                        func = getattr(mod, func_name)
                        if _f["kwargs"]:
                            kwargs.update(_f["kwargs"])
                        obj = func(*args, *kwargs)
                else:
                    # Call local function (a "method")
                    try:
                        obj = getattr(obj, exp["name"])
                        obj = obj(*args, **kwargs)
                    except:
                        obj = None
                return obj, unresolved_vars
            elif evaluation_type == "static":
                if obj is None:
                    # Check if global function exists, then call it. There are no function namespaces (at least for now)
                    if exp["name"] in global_functions:
                        _f = global_functions[exp["name"]]
                        mod_name, func_name = _f["full_name"].rsplit('.', 1)
                        mod = importlib.import_module(mod_name)
                        func = getattr(mod, func_name)
                        # True if everything is True: function defined and all parameters are True
                        obj = func and all(args) and all(kwargs.values())
                else:
                    # Call local function (a "method")
                    obj = True
                return obj
        elif t == "h_var":
            # Evaluate in sequence
            obj = None
            _namespace = exp.get("ns", None)
            for o in exp["parts"]:
                if isinstance(o, str):
                    # Simple name
                    if obj is None:
                        obj = state.get(o, _namespace)
                        if not obj:
                            issue_lst.append(
                                (3, "'" + o +
                                 "' is not globally declared in namespace '" +
                                 (_namespace if _namespace else "default") +
                                 "'"))
                            if _namespace:
                                unresolved_vars.add(_namespace + "::" + o)
                            else:
                                unresolved_vars.add(o)
                    else:
                        if isinstance(obj, ExternalDataset):
                            # Check if "o" is column (measure) or dimension
                            if o in obj.get_columns(
                            ) or o in obj.get_dimensions():
                                obj = obj.get_data(o, None)
                            else:
                                issue_lst.append((
                                    3, "'" + o +
                                    "' is not a measure or dimension of the dataset."
                                ))
                        else:
                            try:
                                obj = getattr(obj, o)
                            except:
                                issue_lst.append((3, "'" + o + "' is not a ."))
                else:
                    # Dictionary: function call or dataset access
                    if obj is None:
                        o["ns"] = _namespace
                    obj = ast_evaluator(o, state, obj, issue_lst,
                                        evaluation_type)
            if obj is None or isinstance(obj, (str, int, float, bool)):
                return obj, unresolved_vars
            # TODO elif isinstance(obj, ...) depending on core object types, invoke a default method, or
            #  issue ERROR if it is not possible to cast to something simple
            else:
                return obj, unresolved_vars
        elif t == "condition":  # Evaluate IF part to a Boolean. If True, return the evaluation of the THEN part; if False, return None
            if_result, tmp = ast_evaluator(exp["if"], state, obj, issue_lst,
                                           evaluation_type)
            unresolved_vars.update(tmp)
            if len(tmp) == 0:
                if if_result:
                    then_result, tmp = ast_evaluator(exp["then"], state, obj,
                                                     issue_lst,
                                                     evaluation_type)
                    unresolved_vars.update(tmp)
                    if len(tmp) > 0:
                        then_result = None
                    return then_result, unresolved_vars
            else:
                return None, unresolved_vars
        elif t == "conditions":
            for c in exp["parts"]:
                cond_result, tmp = ast_evaluator(c, state, obj, issue_lst,
                                                 evaluation_type)
                unresolved_vars.update(tmp)
                if len(tmp) == 0:
                    if cond_result:
                        return cond_result, unresolved_vars
            return None, unresolved_vars
        elif t == "reference":
            return "[" + exp[
                "ref_id"] + "]", unresolved_vars  # TODO Return a special type
        elif t in ("u+", "u-", "multipliers", "adders", "comparison", "not",
                   "and", "or"):  # Arithmetic and Boolean
            # Evaluate recursively the left and right operands
            if t in ("u+", "u-"):
                if evaluation_type == "numeric":
                    current = 0
                else:
                    current = True
            else:
                current, tmp1 = ast_evaluator(exp["terms"][0], state, obj,
                                              issue_lst, evaluation_type)
                unresolved_vars.update(tmp1)

            for i, e in enumerate(exp["terms"][1:]):
                following, tmp2 = ast_evaluator(e, state, obj, issue_lst,
                                                evaluation_type)
                unresolved_vars.update(tmp2)

                if len(tmp1) == 0 and len(tmp2) == 0:
                    if evaluation_type == "numeric":
                        # Type casting for primitive types
                        # TODO For Object types, apply default conversion. If both sides are Object, assume number
                        if (isinstance(current, (int, float)) and isinstance(following, (int, float))) or \
                                (isinstance(current, bool) and isinstance(following, bool)) or \
                                (isinstance(current, str) and isinstance(following, str)):
                            pass  # Do nothing
                        else:  # In others cases, CAST to the operand of the left. This may result in an Exception
                            following = type(current)(following)

                        op = exp["ops"][i].lower()
                        if op in ("+", "-", "u+", "u-"):
                            if current is None:
                                current = 0
                            if following is None:
                                following = 0
                            if op in ("-", "u-"):
                                following = -following

                            current += following
                        elif op in ("*", "/", "//", "%"):
                            if following is None:
                                following = 1
                            if current is None:
                                current = 1
                            if op == "*":
                                current *= following
                            elif op == "/":
                                current /= following
                            elif op == "//":
                                current //= following
                            elif op == "%":
                                current %= following
                        elif op == "not":
                            current = not bool(following)
                        elif op == "and":
                            current = current and following
                        elif op == "or":
                            current = current or following
                        else:  # Comparators
                            fn = opMap[op]
                            current = fn(current, following)
                    elif evaluation_type == "static":
                        current = current and following
                else:
                    current = None  # Could not evaluate because there are missing variables

            return current, unresolved_vars
        else:
            issue_lst.append((3, "'type' = " + t + " not supported."))
    else:
        issue_lst.append((3, "'type' not present in " + str(exp)))

    return val, unresolved_vars