コード例 #1
0
ファイル: generator.py プロジェクト: e7ChatsApp/inmanta-core
    def execute(self, requires: Dict[object, object], instance: Resolver,
                queue: QueueScheduler) -> object:
        """
        Evaluate this statement
        """
        LOGGER.log(LOG_LEVEL_TRACE,
                   "executing subconstructor for %s implement %s", self.type,
                   self.implements.location)
        condition = self.implements.constraint.execute(requires, instance,
                                                       queue)
        try:
            inmanta_type.Bool().validate(condition)
        except RuntimeException as e:
            e.set_statement(self.implements)
            e.msg = (
                "Invalid value `%s`: the condition for a conditional implementation can only be a boolean expression"
                % condition)
            raise e
        if not condition:
            return None

        myqueue = queue.for_tracker(ImplementsTracker(self, instance))

        implementations = self.implements.implementations

        for impl in implementations:
            if instance.add_implementation(impl):
                # generate a subscope/namespace for each loop
                xc = ExecutionContext(
                    impl.statements,
                    instance.for_namespace(impl.statements.namespace))
                xc.emit(myqueue)

        return None
コード例 #2
0
ファイル: test_slots.py プロジェクト: e7ChatsApp/inmanta-core
def test_slots_rt():
    ns = Namespace("root", None)
    rs = Resolver(ns)
    e = Entity("xx", ns)
    qs = QueueScheduler(None, [], [], None, set())
    r = RelationAttribute(e, None, "xx", Location("", 1))
    i = Instance(e, rs, qs)

    assert_slotted(ResultVariable())
    assert_slotted(AttributeVariable(None, None))
    assert_slotted(Promise(None, None))
    assert_slotted(ListVariable(r, i, qs))
    assert_slotted(OptionVariable(r, i, qs))

    assert_slotted(qs)
    assert_slotted(DelegateQueueScheduler(qs, None))

    assert_slotted(Waiter(qs))

    assert_slotted(
        ExecutionUnit(qs, r, ResultVariable(), {}, Literal(""), None))
    assert_slotted(HangUnit(qs, r, {}, None, Resumer()))
    assert_slotted(RawUnit(qs, r, {}, Resumer()))

    assert_slotted(FunctionUnit(qs, rs, ResultVariable(), {}, None))

    assert_slotted(i)
コード例 #3
0
 def requires_emit(self, resolver: Resolver,
                   queue: QueueScheduler) -> Dict[object, ResultVariable]:
     # FIXME: may be done more efficient?
     out = {
         self.name: resolver.lookup(self.full_name)
     }  # type : Dict[object, ResultVariable]
     return out
コード例 #4
0
 def requires_emit_gradual(
     self, resolver: Resolver, queue: QueueScheduler, resultcollector: ResultCollector
 ) -> Dict[object, ResultVariable]:
     var = resolver.lookup(self.full_name)
     var.listener(resultcollector, self.location)
     out = {self.name: var}  # type : Dict[object, ResultVariable]
     return out
コード例 #5
0
 def requires_emit(self, resolver: Resolver, queue: QueueScheduler) -> Dict[object, ResultVariable]:
     try:
         resv = resolver.for_namespace(self.implements.constraint.namespace)
         return self.implements.constraint.requires_emit(resv, queue)
     except NotFoundException as e:
         e.set_statement(self.implements)
         raise e
コード例 #6
0
def graph() -> Iterator[DataflowGraph]:
    namespace: Namespace = Namespace("dummy_namespace")
    resolver: Resolver = Resolver(namespace, enable_dataflow_graph=True)
    block: BasicBlock = BasicBlock(namespace, [])
    xc: ExecutionContext = ExecutionContext(block, resolver)
    block.namespace.scope = xc

    yield DataflowGraph(resolver)
コード例 #7
0
    def execute(self, requires: Dict[object, ResultVariable], instance: Resolver, queue: QueueScheduler) -> object:
        """
            Evaluate this statement
        """
        expr = self.implements.constraint
        if not expr.execute(requires, instance, queue):
            return None

        myqueue = queue.for_tracker(ImplementsTracker(self, instance))

        implementations = self.implements.implementations

        for impl in implementations:
            if instance.add_implementation(impl):
                # generate a subscope/namespace for each loop
                xc = ExecutionContext(impl.statements, instance.for_namespace(impl.statements.namespace))
                xc.emit(myqueue)

        return None
コード例 #8
0
    def requires_emit(self, resolver: Resolver, queue: QueueScheduler) -> Dict[object, ResultVariable]:
        # direct
        preout = [x for x in self._direct_attributes.items()]
        preout.extend([x for x in self.type.get_entity().get_default_values().items()])

        out2 = {rk: rv for (k, v) in self.type.get_defaults().items()
                for (rk, rv) in v.requires_emit(resolver.for_namespace(v.get_namespace()), queue).items()}

        out = {rk: rv for (k, v) in preout for (rk, rv) in v.requires_emit(resolver, queue).items()}
        out.update(out2)

        return out
コード例 #9
0
ファイル: generator.py プロジェクト: inmanta/inmanta-core
 def execute(self, requires: Dict[object, object], resolver: Resolver,
             queue: QueueScheduler) -> object:
     """
     Evaluate this statement.
     """
     cond: object = self.condition.execute(requires, resolver, queue)
     if isinstance(cond, Unknown):
         return None
     try:
         inmanta_type.Bool().validate(cond)
     except RuntimeException as e:
         e.set_statement(self)
         e.msg = "Invalid value `%s`: the condition for an if statement can only be a boolean expression" % cond
         raise e
     branch: BasicBlock = self.if_branch if cond else self.else_branch
     xc = ExecutionContext(branch, resolver.for_namespace(branch.namespace))
     xc.emit(queue)
     return None
コード例 #10
0
    def fetch_variable(
        self, requires: Dict[object, ResultVariable], resolver: Resolver, queue_scheduler: QueueScheduler
    ) -> ResultVariable:
        """
        Fetches the referred variable
        """
        if self.instance:
            # get the Instance
            obj = self.instance.execute({k: v.get_value() for k, v in requires.items()}, resolver, queue_scheduler)

            if isinstance(obj, list):
                raise RuntimeException(self, "can not get a attribute %s, %s is a list" % (self.attribute, obj))
            if not isinstance(obj, Instance):
                raise RuntimeException(self, "can not get a attribute %s, %s not an entity" % (self.attribute, obj))

            # get the attribute result variable
            return obj.get_attribute(self.attribute)
        else:
            return resolver.lookup(self.attribute)
コード例 #11
0
ファイル: test_slots.py プロジェクト: e7ChatsApp/inmanta-core
def test_slots_dataflow():
    namespace: Namespace = Namespace("root", None)
    resolver: Resolver = Resolver(namespace)

    graph: DataflowGraph = DataflowGraph(resolver)
    assignable_node: AssignableNode = AssignableNode("node")
    value_node: ValueNode = ValueNode(42)
    instance_node: InstanceNode = InstanceNode([])

    assert_slotted(graph)
    assert_slotted(assignable_node)
    assert_slotted(assignable_node.equivalence)
    assert_slotted(value_node)
    assert_slotted(instance_node)

    assert_slotted(AttributeNodeReference(assignable_node.reference(), "attr"))
    assert_slotted(VariableNodeReference(assignable_node))
    assert_slotted(ValueNodeReference(value_node))
    assert_slotted(InstanceNodeReference(instance_node))
    assert_slotted(
        Assignment(assignable_node.reference(), value_node, Statement(),
                   graph))
    assert_slotted(NodeStub("stub"))
    assert_slotted(AttributeNode(instance_node, "attr"))
コード例 #12
0
ファイル: scheduler.py プロジェクト: inmanta/inmanta-core
    def run(self, compiler: "Compiler", statements: Sequence["Statement"], blocks: Sequence["BasicBlock"]) -> bool:
        """
        Evaluate the current graph
        """
        prev = time.time()
        start = prev

        # first evaluate all definitions, this should be done in one iteration
        self.define_types(compiler, statements, blocks)
        attributes_with_precedence_rule: List[RelationAttribute] = self._set_precedence_rules_on_relationship_attributes()

        # give all loose blocks an empty XC
        # register the XC's as scopes
        # All named scopes are now present

        for block in blocks:
            res = Resolver(block.namespace, self.track_dataflow)
            xc = ExecutionContext(block, res)
            block.context = xc
            block.namespace.scope = xc
            block.warn_shadowed_variables()

        # setup queues
        # queue for runnable items
        basequeue: Deque[Waiter] = deque()
        # queue for RV's that are delayed
        waitqueue = PrioritisedDelayedResultVariableQueue(attributes_with_precedence_rule)
        # queue for RV's that are delayed and had no effective waiters when they were first in the waitqueue
        zerowaiters: Deque[DelayedResultVariable[Any]] = deque()
        # queue containing everything, to find hanging statements
        all_statements: Set[Waiter] = set()

        # Wrap in object to pass around
        queue = QueueScheduler(compiler, basequeue, waitqueue, self.types, all_statements)

        # emit all top level statements
        for block in blocks:
            block.context.emit(queue.for_tracker(ModuleTracker(block)))

        # start an evaluation loop
        i = 0
        count = 0
        max_iterations = int(os.getenv("INMANTA_MAX_ITERATIONS", MAX_ITERATIONS))
        while i < max_iterations:
            now = time.time()

            # check if we can stop the execution
            if len(basequeue) == 0 and len(waitqueue) == 0 and len(zerowaiters) == 0:
                break
            else:
                i += 1

            LOGGER.debug(
                "Iteration %d (e: %d, w: %d, p: %d, done: %d, time: %f)",
                i,
                len(basequeue),
                len(waitqueue),
                len(zerowaiters),
                count,
                now - prev,
            )
            prev = now

            # evaluate all that is ready
            while len(basequeue) > 0:
                next = basequeue.popleft()
                try:
                    next.execute()
                    all_statements.discard(next)
                    count = count + 1
                except UnsetException as e:
                    # some statements don't know all their dependencies up front,...
                    next.requeue_with_additional_requires(object(), e.get_result_variable())

            # all safe stmts are done
            progress = False
            assert not basequeue

            # find a RV that has waiters, so freezing creates progress
            while len(waitqueue) > 0 and not progress:
                next_rv = waitqueue.popleft()
                if next_rv.hasValue:
                    # already froze itself
                    continue
                if next_rv.get_progress_potential() <= 0:
                    zerowaiters.append(next_rv)
                elif next_rv.get_waiting_providers() > 0:
                    # definitely not done
                    # drop from queue
                    # will requeue when value is added
                    next_rv.unqueue()
                else:
                    # freeze it and go to next iteration, new statements will be on the basequeue
                    LOGGER.log(LOG_LEVEL_TRACE, "Freezing %s", next_rv)
                    next_rv.freeze()
                    progress = True

            # no waiters in waitqueue,...
            # see if any zerowaiters have become gotten waiters
            if not progress:
                zerowaiters_tmp = [w for w in zerowaiters if not w.hasValue]
                waitqueue.replace(w for w in zerowaiters_tmp if w.get_progress_potential() > 0)
                zerowaiters = deque(w for w in zerowaiters_tmp if w.get_progress_potential() <= 0)
                while len(waitqueue) > 0 and not progress:
                    LOGGER.debug("Moved zerowaiters to waiters")
                    next_rv = waitqueue.popleft()
                    if next_rv.get_waiting_providers() > 0:
                        next_rv.unqueue()
                    else:
                        LOGGER.log(LOG_LEVEL_TRACE, "Freezing %s", next_rv)
                        next_rv.freeze()
                        progress = True

            if not progress:
                # nothing works anymore, attempt to unfreeze wait cycle
                progress = self.find_wait_cycle(attributes_with_precedence_rule, queue.allwaiters)

            if not progress:
                # no one waiting anymore, all done, freeze and finish
                LOGGER.debug("Finishing statements with no waiters")

                while len(zerowaiters) > 0:
                    next_rv = zerowaiters.pop()
                    next_rv.freeze()

        now = time.time()
        LOGGER.info(
            "Iteration %d (e: %d, w: %d, p: %d, done: %d, time: %f)",
            i,
            len(basequeue),
            len(waitqueue),
            len(zerowaiters),
            count,
            now - prev,
        )

        if i == max_iterations:
            raise CompilerException(f"Could not complete model, max_iterations {max_iterations} reached.")

        excns: List[CompilerException] = []
        self.freeze_all(excns)

        now = time.time()
        LOGGER.info(
            "Total compilation time %f",
            now - start,
        )

        if len(excns) == 0:
            pass
        elif len(excns) == 1:
            raise excns[0]
        else:
            raise MultiException(excns)

        if all_statements:
            stmt = None
            for st in all_statements:
                if isinstance(st, ExecutionUnit):
                    stmt = st
                    break

            assert stmt is not None

            raise RuntimeException(stmt.expression, "not all statements executed %s" % all_statements)

        return True
コード例 #13
0
ファイル: assign.py プロジェクト: wdesmedt/inmanta
 def emit(self, resolver: Resolver, queue: QueueScheduler) -> None:
     target = resolver.lookup(self.name)
     reqs = self.value.requires_emit(resolver, queue)
     ExecutionUnit(queue, resolver, target, reqs, self.value)
コード例 #14
0
ファイル: scheduler.py プロジェクト: wdesmedt/inmanta
    def run(self, compiler, statements, blocks):
        """
            Evaluate the current graph
        """
        prev = time.time()

        # first evaluate all definitions, this should be done in one iteration
        self.define_types(compiler, statements, blocks)

        # give all loose blocks an empty XC
        # register the XC's as scopes
        # All named scopes are now present
        for block in blocks:
            res = Resolver(block.namespace)
            xc = ExecutionContext(block, res)
            block.context = xc
            block.namespace.scope = xc

        # setup queues
        # queue for runnable items
        basequeue = []
        # queue for RV's that are delayed
        waitqueue = []
        # queue for RV's that are delayed and had no waiters when they were first in the waitqueue
        zerowaiters = []
        # queue containing everything, to find haning statements
        all_statements = []

        # Wrap in object to pass around
        queue = QueueScheduler(compiler, basequeue, waitqueue, self.types,
                               all_statements)

        # emit all top level statements
        for block in blocks:
            block.context.emit(queue.for_tracker(ModuleTracker(block)))

        # start an evaluation loop
        i = 0
        count = 0
        while i < MAX_ITERATIONS:
            now = time.time()

            # check if we can stop the execution
            if len(basequeue) == 0 and len(waitqueue) == 0 and len(
                    zerowaiters) == 0:
                break
            else:
                i += 1

            LOGGER.debug(
                "Iteration %d (e: %d, w: %d, p: %d, done: %d, time: %f)", i,
                len(basequeue), len(waitqueue), len(zerowaiters), count,
                now - prev)
            prev = now

            # evaluate all that is ready
            while len(basequeue) > 0:
                next = basequeue.pop(0)
                try:
                    next.execute()
                    count = count + 1
                except UnsetException as e:
                    # some statements don't know all their dependencies up front,...
                    next. await (e.get_result_variable())

            # all safe stmts are done
            progress = False

            # find a RV that has waiters, so freezing creates progress
            while len(waitqueue) > 0 and not progress:
                next = waitqueue.pop(0)
                if len(next.waiters) == 0:
                    zerowaiters.append(next)
                elif next.get_waiting_providers() > 0:
                    # definitely not done
                    # drop from queue
                    # will requeue when value is added
                    next.unqueue()
                else:
                    # freeze it and go to next iteration, new statements will be on the basequeue
                    next.freeze()
                    progress = True

            # no waiters in waitqueue,...
            # see if any zerowaiters have become gotten waiters
            if not progress:
                waitqueue = [w for w in zerowaiters if len(w.waiters) is not 0]
                queue.waitqueue = waitqueue
                zerowaiters = [w for w in zerowaiters if len(w.waiters) is 0]
                while len(waitqueue) > 0 and not progress:
                    LOGGER.debug("Moved zerowaiters to waiters")
                    next = waitqueue.pop(0)
                    if next.get_waiting_providers() > 0:
                        next.unqueue()
                    else:
                        next.freeze()
                        progress = True

            # no one waiting anymore, all done, freeze and finish
            if not progress:
                LOGGER.debug("Finishing statements with no waiters")
                while len(zerowaiters) > 0:
                    next = zerowaiters.pop()
                    next.freeze()

        now = time.time()
        LOGGER.debug("Iteration %d (e: %d, w: %d, p: %d, done: %d, time: %f)",
                     i, len(basequeue), len(waitqueue), len(zerowaiters),
                     count, now - prev)

        if i == MAX_ITERATIONS:
            print("could not complete model")
            return False
        # now = time.time()
        # print(now - prev)
        # end evaluation loop
        # self.dump_not_done()
        # print(basequeue, waitqueue)
        # dumpHangs()
        # self.dump()
        # rint(len(self.types["std::Entity"].get_all_instances()))

        excns = []
        self.freeze_all(excns)

        if len(excns) == 0:
            pass
        elif len(excns) == 1:
            raise excns[0]
        else:
            raise MultiException(excns)

        all_statements = [x for x in all_statements if not x.done]

        if all_statements:
            stmt = None
            for st in all_statements:
                if isinstance(st, ExecutionUnit):
                    stmt = st
                    break

            raise RuntimeException(
                stmt.expression,
                "not all statements executed %s" % all_statements)
        # self.dump("std::File")

        return True
コード例 #15
0
ファイル: assign.py プロジェクト: inmanta/inmanta-core
 def emit(self, resolver: Resolver, queue: QueueScheduler) -> None:
     self._add_to_dataflow_graph(resolver.dataflow_graph)
     target = resolver.lookup(str(self.name))
     assert isinstance(target, ResultVariable)
     reqs = self.value.requires_emit(resolver, queue)
     ExecutionUnit(queue, resolver, target, reqs, self.value, owner=self)