Пример #1
0
def test_append_with_category():
    trace = TraceStore(10)
    trace.append("EVENT", "CATEGORY")
    assert len(trace.store) == 1
    assert trace.store[0][1] == "EVENT"
    assert type(trace.store[0][0]) == datetime.datetime
    assert trace.store[0][2] == "CATEGORY"
Пример #2
0
def test_filter():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)

    result = [x[1] for x in trace.filter()]
    assert result == [0, 1, 2, 3, 4]
Пример #3
0
def test_append_to_top():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)
    trace.append("EVENT")
    assert len(trace.store) == 6
    assert trace.store[0][1] == "EVENT"
Пример #4
0
def test_filter_limit():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)

    result = [x[1] for x in trace.filter(limit=3)]
    assert result == [2, 3, 4]
Пример #5
0
def test_append():
    trace = TraceStore(10)
    trace.append("EVENT")
    assert len(trace.store) == 1
    assert trace.store[0][1] == "EVENT"
    assert type(trace.store[0][0]) == datetime.datetime
    assert trace.store[0][2] is None
Пример #6
0
def test_all_limit():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)
    all = trace.all(limit=2)
    events = [e[1] for e in all]

    assert events == [3, 4]
Пример #7
0
def test_append_max_size():
    trace = TraceStore(2)
    for i in range(5):
        trace.append(i)

    assert len(trace.store) == 2
    assert trace.store[0][1] == 4
    assert trace.store[1][1] == 3
Пример #8
0
def test_reset():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)
    assert len(trace.store) == 5

    trace.reset()
    assert len(trace.store) == 0
Пример #9
0
def test_filter_category():
    cycle = itertools.cycle(range(3))
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i, str(next(cycle)))

    result = [x[2] for x in trace.filter(category="0")]
    assert result == ["0", "0"]
Пример #10
0
def test_all():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)
    all = trace.all()
    events = [e[1] for e in all]

    assert events == [0, 1, 2, 3, 4]
Пример #11
0
def test_decomposition():
    trace = TraceStore(10)
    for i in range(5):
        event = dict(name=f"name{i}", id={i})
        trace.append(event)
    all = trace.all()
    events = [event['name'] for (date, event, category) in all]
    # print(events)
    assert events == ['name0', 'name1', 'name2', 'name3', 'name4']
Пример #12
0
def test_filter_category_limit():
    cycle = itertools.cycle(range(3))
    trace = TraceStore(10)
    for i in range(5):
        trace.append(1, str(next(cycle)))

    result = [
        category
        for (tw, event, category) in trace.filter(category="0", limit=1)
    ]
    assert result == ["0"]
Пример #13
0
def test_filter_from_limit(trace_store_message_factory):
    cycle = itertools.cycle(range(3))
    trace = TraceStore(10)
    for i in range(5):
        msg = trace_store_message_factory(body=str(i),
                                          app_id="{}@sender".format(
                                              next(cycle)))
        trace.append(msg)

    result = [x[1].body for x in trace.filter(app_id="0@sender", limit=1)]
    assert result == ["3"]
Пример #14
0
def test_filter_from_and_category(trace_store_message_factory):
    cycle = itertools.cycle(range(3))
    trace = TraceStore(10)
    for i in range(5):
        c = str(next(cycle))
        msg = trace_store_message_factory(body=str(c),
                                          app_id="{}@sender".format(c))
        trace.append(msg, c)

    result = [(event.body, category)
              for (tw, event,
                   category) in trace.filter(app_id="1@sender", category="1")]
    assert result == [("1", "1"), ("1", "1")]
Пример #15
0
def test_received():
    Event = namedtuple("Event", ["value", "sent"])
    trace = TraceStore(10)
    for i in range(5):
        trace.append(Event(i, True))

    empty_received = trace.received()
    assert len(empty_received) == 0

    for i in range(5, 10):
        trace.append(Event(i, False))

    received = [r[1].value for r in trace.received()]
    assert len(received) == 5
    assert received == [5, 6, 7, 8, 9]

    limit_received = [r[1].value for r in trace.received(limit=3)]
    assert len(limit_received) == 3
    assert limit_received == [7, 8, 9]
Пример #16
0
def test_len():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)
    assert trace.len() == 5
Пример #17
0
class Core(MyService):
    """Docstring for Core.

    Every queue is automatically bound to the default exchange with a routing key which is the same as the queue name.
    All async tasks must only be started in on_start method because only there the eventloop is configured.

    """

    def __init__(
        self,
        *,
        identity=None,
        config=None,
        clock=None,
        channel_number: int = None,
        beacon: NodeT = None,
        loop: asyncio.AbstractEventLoop = None,
    ) -> None:
        identity = identity or str(uuid.uuid4())
        super().__init__(identity=identity, beacon=beacon, loop=loop)

        self.config = {}
        if config is not None:
            if not isinstance(config, dict):
                self.log.error(
                    f"Configuration must be valid dictionay, got {config}. Resetting to {{}}."
                )
            else:
                self.log.info(f"Configuration: {config}.")
                self.config = config

        self.connection = None
        self.channel = None
        self.channel_number = channel_number
        self.direct_queue = None
        self.topic_exchange = None
        self.fanout_exchange = None
        self.behaviours = self._children
        self.traces = TraceStore(size=1000)
        self.peers = TraceStore(size=100)

        self.futures = dict()  # store for RPC futures

        self.handlers: Registry = Registry()

        self.clock = clock

        self.web = None  # set by class AsgiAgent
        self.ws = None

    async def __aenter__(self) -> Core:
        await super(Core, self).__aenter__()
        return self

    async def __aexit__(
        self,
        exc_type: Type[BaseException] = None,
        exc_val: BaseException = None,
        exc_tb: TracebackType = None,
    ) -> Optional[bool]:
        await super(Core, self).__aexit__()
        return None

    async def on_first_start(self):
        ...

    async def on_start(self):
        self.log.info("Starting agent.")
        try:
            self.connection = await connect_robust(url=RMQ_URL)
        except ConnectionError as e:
            self.log.exception(f"Check RabbitMQ: {e}")  # TODO: implement RMW precheck

        # needs to be managed when several cores running in one process (else always 1)
        if self.channel_number:
            self.channel = await self.connection.channel(
                channel_number=self.channel_number
            )
        else:
            self.channel = await self.connection.channel()

        await self.channel.set_qos(prefetch_count=1)

        await self.configure_exchanges()

        try:
            await self._configure_agent_queues()
        except ChannelLockedResource as e:
            self.log.error(f"Potential identity conflict: {self.identity}.")
            raise

        self.log.info(f"Start consuming: {self.direct_queue}, {self.fanout_queue}")
        await self.direct_queue.consume(
            consumer_tag=self.identity, callback=self.on_message
        )
        await self.fanout_queue.consume(callback=self.on_message)

        await self._update_peers()
        # TODO: refactor for better understanding and configuration
        if self.config.get("UPDATE_PEER_INTERVAL") is not None:
            interval = self.config.get("UPDATE_PEER_INTERVAL")
            self.log.debug(f"Starting peer update with interval: {interval}")
            # noinspection PyAsyncCall
            self.add_future(
                self.periodic_update_peers(interval)
            )  # service awaits future

        await self.setup()

    async def setup(self):
        """ to be overwritten by user """
        pass

    async def configure_exchanges(self):
        """ Configures the exchanges: TOPIC, FANOUT """
        self.topic_exchange = await self.channel.declare_exchange(
            name=BINDING_KEY_TOPIC, type=ExchangeType.TOPIC
        )
        self.fanout_exchange = await self.channel.declare_exchange(
            name=BINDING_KEY_FANOUT, type=ExchangeType.FANOUT
        )
        self.log.info(
            f"Exchanges created: {self.topic_exchange}, {self.fanout_exchange}"
        )

    async def _configure_agent_queues(self):
        queue_name = self.identity
        self.direct_queue = await self.channel.declare_queue(
            name=queue_name, auto_delete=False, durable=False, exclusive=True
        )

        self.fanout_queue = await self.channel.declare_queue(
            name="", auto_delete=False, durable=False, exclusive=True
        )
        self.log.info(f"Queues declared: {self.direct_queue}, {self.fanout_queue}")

        await self.fanout_queue.bind(
            self.fanout_exchange, routing_key=BINDING_KEY_FANOUT
        )
        self.log.info(
            f"Binding: {self.fanout_queue} to {self.fanout_exchange}: BindingKey: {BINDING_KEY_FANOUT}"
        )

    async def on_started(self):
        ...

    async def dummy(self):
        return True

    async def stop(self) -> None:
        """Stop the service."""
        if not self._stopped.is_set():
            # self._log_mundane('Stopping...')
            self.log.info(f"Stopping agent and behaviours: {self.list_behaviour()}...")
            self._stopped.set()
            await self._stop_children()  # tw: order reversed with regards to service.stop()
            await self.on_stop()
            self.log.debug("Shutting down...")
            if self.wait_for_shutdown:
                self.log.debug("Waiting for shutdown")
                await asyncio.wait_for(self._shutdown.wait(), self.shutdown_timeout)
                self.log.debug("Shutting down now")
            await self._stop_futures()
            await self._stop_exit_stacks()
            await self.on_shutdown()
            self.log.debug("-Stopped!")

    async def on_stop(self):
        """ Stops an agent and kills all its behaviours. """
        await self.teardown()
        await self.connection.close()
        await self.channel.close()
        self.log.info(f"Agent stopped: {self.state}")

    async def teardown(self):
        """" To be overwritten by user """
        pass

    async def on_shutdown(self):
        self.set_shutdown()
        self.log.info(f"Agent shutdown: {self.state}")

    # async def _async_connect(self):  # pragma: no cover
    #     try:
    #         self.connection = await connect_robust(url=RMQ_URL)
    #         aenter = type(self.connection).__aenter__(self.connection)
    #         self.channel = await aenter
    #         self.log.info(f"Agent {self.identity} connected and authenticated.")
    #     except Exception:
    #         raise

    # async def _async_disconnect(self):
    #     if self.is_alive:
    #         aexit = self.connection.__aexit__(*sys.exc_info())
    #         await aexit
    #         self.log.info("Client disconnected.")

    def has_behaviour(self, behaviour):
        """ Tests for behaviour """
        return behaviour in self.behaviours

    def list_behaviour(self):
        """ Lists all behaviours """
        return [str(behav) for behav in self.behaviours]

    def get_behaviour(self, name: str) -> Optional[ServiceT]:
        """ Returns the behaviour """
        behav = [behav for behav in self.behaviours if str(behav).endswith(name)]
        if len(behav) > 1:
            self.log.warning(
                f"{len(behav)} behaviours found for {name}. Name not unique!"
            )
        elif len(behav) == 0:
            return None
        return behav[0]

    async def call(self, msg: str, target: str = None) -> str:
        """ Sends PRC call """
        if target is None:
            target = self.identity  # loopback send

        result = None
        correlation_id = str(uuid.uuid4())
        future = self.loop.create_future()

        # create awaitable future, so that in background future can be resolved while here awaiting future.result
        self.futures[correlation_id] = future

        await self.direct_send(msg, RmqMessageTypes.RPC.name, target, correlation_id)

        try:
            async with timeout(delay=TIMEOUT):
                result = await future
        except asyncio.TimeoutError as e:
            rpc_message = RpcMessage.from_json(msg)
            err_msg = f"{self}: TimeoutError after {TIMEOUT}s while waiting for RPC request: {rpc_message.c_type}: {correlation_id}"
            future = self.futures.pop(correlation_id)
            # future.set_exception(e)
            future.cancel()
            self.log.error(err_msg)

            result = RpcError(error=err_msg)

        return result

    async def direct_send(
        self,
        msg: str,
        msg_type: RmqMessageTypes.name,
        target: str = None,
        correlation_id: str = None,
        headers: dict = None,
    ) -> None:
        """ Sends message to default exchange """
        if target is None:
            target = self.identity  # loopback send to itself

        await self.channel.default_exchange.publish(
            message=self._create_message(msg, msg_type, correlation_id, headers),
            routing_key=target,
            timeout=None,
        )
        self._add_trace_outgoing(correlation_id, headers, msg, msg_type, target, target)
        self.log.debug(
            f"Sent message: {msg}, routing_key: {self.identity}, type: {msg_type}"
        )

    async def fanout_send(
        self,
        msg: str,
        msg_type: RmqMessageTypes.name,
        correlation_id: str = None,
        headers: dict = None,
    ) -> None:
        """ Sends message to fanout exchange """

        await self.fanout_exchange.publish(
            message=self._create_message(msg, msg_type, correlation_id, headers),
            routing_key=BINDING_KEY_FANOUT,
            timeout=None,
        )
        self._add_trace_outgoing(
            correlation_id, headers, msg, msg_type, "fanout", BINDING_KEY_FANOUT
        )
        self.log.debug(f"Sent fanout message: {msg}, routing_key: {BINDING_KEY_FANOUT}")

    async def publish(self, msg: str, routing_key: str, headers: dict = None) -> None:
        """ Publishes message to topic """
        await self.topic_exchange.publish(
            message=self._create_message(
                msg,
                msg_type=RmqMessageTypes.PUBSUB.name,
                correlation_id=None,
                headers=headers,
            ),
            routing_key=routing_key,
            timeout=None,
        )
        self._add_trace_outgoing(
            None, headers, msg, RmqMessageTypes.PUBSUB.name, "publish", routing_key
        )
        self.log.debug(f"Sent: {msg}, routing_key: {routing_key}")

    def _create_message(
        self,
        msg: str,
        msg_type: RmqMessageTypes.name,
        correlation_id: str = None,
        headers: dict = None,
    ) -> Message:
        return Message(
            content_type="application/json",
            body=msg.encode(),
            timestamp=datetime.now(),
            type=msg_type,
            app_id=self.identity,
            user_id="guest",
            headers=headers,
            correlation_id=correlation_id,
        )

    def _add_trace_outgoing(
        self, correlation_id, headers, msg, msg_type, target, routing_key
    ):
        self.traces.append(
            TraceStoreMessage(
                body=msg,
                headers=headers,
                correlation_id=correlation_id,
                type=msg_type,
                target=target,
                routing_key=routing_key,
            ),
            category="outgoing",
        )

    async def on_message(self, message: IncomingMessage):
        """ Handle incoming messages

            Well defined types (RmqMessageTypes) are sent to system handlers,
            all others are enqueued to behaviour mailbox for user handling.
        """
        # If context processor will catch an exception, the message will be returned to the queue.
        async with message.process():
            self.log.debug(f"Received (info/body:")
            self.log.debug(f"   {message.info()}")
            self.log.debug(f"   {message.body.decode()}")
            self.traces.append(TraceStoreMessage.from_msg(message), category="incoming")

            if message.type in (RmqMessageTypes.CONTROL.name, RmqMessageTypes.RPC.name):
                handler = self.handlers.get(handler=message.type)
                if issubclass(handler, SystemHandler):
                    handler_instance = handler(core=self)
                    return await handler_instance.handle(message)
                else:
                    return await handler(self, message)

            for behaviour in self.behaviours:
                await behaviour.enqueue(message)
                self.log.debug(f"Message enqueued to: {behaviour} --> {message.body}")
                self.traces.append(
                    TraceStoreMessage.from_msg(message), category=str(behaviour)
                )

    async def _update_peers(self) -> None:
        msg = PingControl().serialize()
        correlation_id = str(uuid.uuid4())
        await self.fanout_send(
            msg=msg,
            msg_type=RmqMessageTypes.CONTROL.name,
            correlation_id=correlation_id,
        )

    async def periodic_update_peers(self, interval):
        """ Sends periodic keepalive message to all peers (if UPDATE_PEER_INTERVAL is set)
            and publishes the latest peer responses as peer list to websocket.
        """
        _interval = want_seconds(interval)
        async for _ in self.itertimer(_interval):
            await self._update_peers()
            peers = await self.list_peers()
            msg = {"from": self.identity, "peers": peers}
            await self._publish_ws(msg)

    async def list_peers(self) -> TraceStore:  # TODO: make property out of method
        """ list all peers which have responded to the latest PING """
        latest = self.peers.latest()
        corr_id = latest[2]
        peers = sorted(
            [status for (ts, status, cor_id) in self.peers.filter(category=corr_id)],
            key=lambda status: status.name,
        )
        peers = CoreStatus.schema().dump(peers, many=True)
        return peers

    async def _publish_ws(self, msg: JSONType):
        if self.web and self.web.ws:
            self.log.debug(f"Publishing ws message: {msg}")
            try:
                await self.web.ws.send_json(msg)
            except RuntimeError as e:
                self.log.exception(e)

    def __repr__(self):
        return "{}".format(self.__class__.__name__)

    @property
    def status(self):
        behav_stati = list()
        for behav in self.behaviours:
            behav_status = ServiceStatus(name=str(behav), state=behav.state)
            behav_stati.append(behav_status)
        return CoreStatus(name=self.identity, state=self.state, behaviours=behav_stati)
Пример #18
0
def test_latest():
    trace = TraceStore(10)
    for i in range(5):
        trace.append(i)
    latest = trace.latest()
    assert latest[1] == 4