コード例 #1
0
    def _read_sensor(  # pylint: disable=too-many-arguments
        self, source_uuid: UUID, sid: int, unit: str, topic: str,
        callback_config: AdvancedCallbackConfiguration
    ) -> AsyncGenerator[DataEvent, None]:
        monitor_stream = (
            stream.repeat(self.device, interval=1)
            | pipe.map(
                async_(lambda sensor: sensor.get_callback_configuration(sid)))
            | pipe.map(lambda current_config: None
                       if current_config == callback_config else self.device)
            | pipe.filter(lambda sensor: sensor is not None)
            | pipe.action(lambda sensor: logging.getLogger(__name__).info(
                "Resetting callback config for %s", sensor))
            | pipe.action(
                async_(lambda sensor: sensor.set_callback_configuration(
                    sid, *callback_config)))
            | pipe.filter(lambda x: False))

        return stream.merge(
            stream.just(monitor_stream),
            stream.iterate(self.device.read_events(sids=(sid, )))
            | pipe.map(lambda item: DataEvent(sender=source_uuid,
                                              topic=topic,
                                              value=item.payload,
                                              sid=item.sid,
                                              unit=str(unit))),
        )
コード例 #2
0
ファイル: test_misc.py プロジェクト: timworx/aiostream
async def test_action(assert_run, event_loop):
    with event_loop.assert_cleanup():
        lst = []
        xs = stream.range(3) | add_resource.pipe(1) | pipe.action(lst.append)
        await assert_run(xs, [0, 1, 2])
        assert lst == [0, 1, 2]

    with event_loop.assert_cleanup():
        queue = asyncio.Queue()
        xs = stream.range(3) | add_resource.pipe(1) | pipe.action(queue.put)
        await assert_run(xs, [0, 1, 2])
        assert queue.get_nowait() == 0
        assert queue.get_nowait() == 1
        assert queue.get_nowait() == 2
コード例 #3
0
ファイル: test_misc.py プロジェクト: vxgmichel/aiorx
async def test_action(assert_run, event_loop):
    with event_loop.assert_cleanup():
        lst = []
        xs = stream.range(3) | add_resource.pipe(1) | pipe.action(lst.append)
        await assert_run(xs, [0, 1, 2])
        assert lst == [0, 1, 2]

    with event_loop.assert_cleanup():
        queue = asyncio.Queue()
        xs = stream.range(3) | add_resource.pipe(1) | pipe.action(queue.put)
        await assert_run(xs, [0, 1, 2])
        assert queue.get_nowait() == 0
        assert queue.get_nowait() == 1
        assert queue.get_nowait() == 2
コード例 #4
0
async def run(
    subscription_kinds: List[SubscriptionKind],
    topic: str,
    begin_timestamp=None,
    interval_sec=None,
    num_msgs=10,
    on_message: Optional[Callable[[Any], Any]] = None,
):
    def log_message(msg):
        if not isinstance(msg, list):
            #LOGGER.info("Status: %s", json.dumps(msg))
            pass
        else:
            LOGGER.debug("Received message: %s", json.dumps(msg))

    def maybe_callback(m):
        if on_message:
            on_message(m)

    # the continuous version takes messages up until a time point
    if interval_sec:
        async with KrakenClient() as client:
            for kind in subscription_kinds:
                await subscribe(client, kind, PAIRS, SUBSCRIPTION_DEPTH)
            await (client.stream(reconnect_count=WEBSOCKET_RECONNECT_COUNT)
                   | pipe.action(log_message)
                   | pipe.filter(lambda msg: isinstance(msg, list))
                   | pipe.map(json.dumps)
                   #| pipe.print()
                   | pipe.takewhile(
                       lambda x: time.time() - begin_timestamp < interval_sec)
                   | pipe.action(maybe_callback))
            client.disconnect()

    # the discrete version takes 100 distinct messages
    else:
        async with KrakenClient() as client:
            for kind in subscription_kinds:
                await subscribe(client, kind, PAIRS, SUBSCRIPTION_DEPTH)
            await (client.stream(reconnect_count=WEBSOCKET_RECONNECT_COUNT)
                   | pipe.action(log_message)
                   | pipe.filter(lambda msg: isinstance(msg, list))
                   | pipe.map(json.dumps)
                   #| pipe.print()
                   | pipe.take(num_msgs)
                   | pipe.action(maybe_callback))
            print("disconnecting")
            client.disconnect()
コード例 #5
0
    def stream_data(self) -> AsyncGenerator[DataEvent, None]:
        """
        Generate the initial configuration of the sensor, configure it, and finally stream the data from the sensor.
        If there is a configuration update, reconfigure the sensor and start streaming again.
        Returns
        -------
        AsyncGenerator of DataEvent
            The data from the device
        """
        # Generates the first configuration
        # Query the database and if it does not have a config for the sensor, wait until there is one

        data_stream = (
            stream.chain(
                stream.call(
                    call_safely, "db_labnode_sensors/get_config", "db_labnode_sensors/status_update", self.__uuid
                )
                | pipe.takewhile(lambda config: config is not None),
                stream.iterate(event_bus.subscribe(f"nodes/by_uuid/{self.__uuid}/update")),
            )
            | pipe.action(
                lambda config: logging.getLogger(__name__).info(
                    "Got new configuration for: %s",
                    self._device,
                )
            )
            | pipe.map(self._create_config)
            | pipe.switchmap(
                lambda config: stream.empty()
                if config is None or not config["enabled"]
                else (self._configure_and_stream(config))
            )
        )

        return data_stream
コード例 #6
0
    def stream_data(self,
                    config: dict[str, Any]) -> AsyncGenerator[DataEvent, None]:
        """
        Stream the data from the sensor.
        Parameters
        ----------
        config: dict
            A dictionary containing the sensor configuration.

        Returns
        -------
        AsyncGenerator
            The asynchronous stream.
        """
        data_stream = (stream.chain(
            stream.just(config),
            stream.iterate(
                event_bus.subscribe(f"nodes/by_uuid/{self.__uuid}/update")))
                       |
                       pipe.action(lambda _: logging.getLogger(__name__).info(
                           "Got new configuration for: %s", self) if config is
                                   not None else logging.getLogger(__name__).
                                   info("Removed configuration for: %s", self))
                       | pipe.map(self._parse_config)
                       | pipe.switchmap(lambda conf: stream.empty(
                       ) if conf is None or not conf["enabled"] else
                                        (self._configure_and_stream(conf))))

        return data_stream
コード例 #7
0
ファイル: norm_server.py プロジェクト: vxgmichel/aiorx
async def euclidean_norm_handler(reader, writer):

    # Define lambdas
    strip =        lambda x: x.decode().strip()
    nonempty =     lambda x: x != ''
    square =       lambda x: x ** 2
    write_cursor = lambda x: writer.write(b'> ')
    square_root =  lambda x: x ** 0.5

    # Create awaitable
    handle_request = (
        stream.iterate(reader)
        | pipe.print('string: {}')
        | pipe.map(strip)
        | pipe.takewhile(nonempty)
        | pipe.map(float)
        | pipe.map(square)
        | pipe.print('square: {:.2f}')
        | pipe.action(write_cursor)
        | pipe.accumulate(initializer=0)
        | pipe.map(square_root)
        | pipe.print('norm -> {:.2f}')
    )

    # Loop over norm computations
    while not reader.at_eof():
        writer.write(INSTRUCTIONS.encode())
        try:
            result = await handle_request
        except ValueError:
            writer.write(ERROR.encode())
        else:
            writer.write(RESULT.format(result).encode())
コード例 #8
0
ファイル: norm_server.py プロジェクト: timworx/aiostream
async def euclidean_norm_handler(reader, writer):

    # Define lambdas
    strip = lambda x: x.decode().strip()
    nonempty = lambda x: x != ''
    square = lambda x: x**2
    write_cursor = lambda x: writer.write(b'> ')
    square_root = lambda x: x**0.5

    # Create awaitable
    handle_request = (stream.iterate(reader)
                      | pipe.print('string: {}')
                      | pipe.map(strip)
                      | pipe.takewhile(nonempty)
                      | pipe.map(float)
                      | pipe.map(square)
                      | pipe.print('square: {:.2f}')
                      | pipe.action(write_cursor)
                      | pipe.accumulate(initializer=0)
                      | pipe.map(square_root)
                      | pipe.print('norm -> {:.2f}'))

    # Loop over norm computations
    while not reader.at_eof():
        writer.write(INSTRUCTIONS.encode())
        try:
            result = await handle_request
        except ValueError:
            writer.write(ERROR.encode())
        else:
            writer.write(RESULT.format(result).encode())
コード例 #9
0
    async def run(self) -> None:
        """
        The main task, that reads data from the sensors and pushes it onto the event_bus.
        """
        # Generate the UUIDs of new sensors
        sensor_stream = stream.chain(
            stream.iterate(
                iterate_safely(f"{self.__topic}/get",
                               f"{self.__topic}/status_update")),
            stream.iterate(event_bus.subscribe(f"{self.__topic}/add_host")),
        ) | pipe.flatmap(
            lambda item: stream.chain(
                (stream.call(event_bus.call, f"{self.__topic}/get_config", item
                             ) | catch.pipe(TopicNotRegisteredError)),
                stream.iterate(
                    event_bus.subscribe(f"nodes/by_uuid/{item}/update")),
            )
            | pipe.until(lambda config: config is None)
            | pipe.map(lambda config: config if self._is_config_valid(
                self.__node_id, config) else None)
            | pipe.map(self._create_transport)
            | pipe.switchmap(lambda transport: stream.empty() if transport is
                             None else stream.iterate(transport.stream_data()))
            | pipe.action(lambda data: event_bus.publish("wamp/publish", data)
                          ))

        await sensor_stream
コード例 #10
0
ファイル: labnode.py プロジェクト: PatrickBaus/sensorDaemon
    def _stream_data(self, transport):
        config_stream = (
            with_context(
                transport,
                on_exit=lambda: logging.getLogger(__name__).info(
                    "Disconnected from APQ Labnode at %s (%s).", transport.uri, transport.label
                ),
            )
            | pipe.action(
                lambda _: logging.getLogger(__name__).info(
                    "Connected to APQ Labnode at %s (%s).", transport.uri, transport.label
                )
            )
            | pipe.map(LabnodeSensor)
            | pipe.action(async_(lambda sensor: sensor.enumerate()))
            | pipe.switchmap(lambda sensor: sensor.stream_data())
        )

        return config_stream
コード例 #11
0
async def test_stream_from_queue():
    with assertImmediate():
        queue = asyncio.Queue()
        await queue.put(0)

        async def put_next(item):
            await queue.put(item + 1)

        await assert_stream(
            [0, 1, 2],
            (stream_from_queue(queue) | pipe.action(put_next))[:3])
コード例 #12
0
async def test_stream_from_queue_eof():
    with assertImmediate():
        EOF = object()
        queue = asyncio.Queue()
        await queue.put(3)

        async def put_next(item):
            await queue.put(item - 1 if item > 0 else EOF)

        await assert_stream(
            [3, 2, 1, 0],
            stream_from_queue(queue, EOF, use_is=True) | pipe.action(put_next))
コード例 #13
0
async def handle_message_async(body):
    msg = json.loads(body.data.decode('utf-8'))

    print(f'Received msg: {msg}')
    msg = await create_workdir(msg)
    filenames = np.empty(3, dtype=object)
    msg['filenames'] = filenames

    await asyncio.gather(
        process_band(msg, 0, 'i'),
        process_band(msg, 1, 'r'),
        process_band(msg, 2, 'g')
    )

    await (stream.just(msg)
        | pipe.flatmap(get_object_msgs)
        | pipe.action(create_data_cube)
        | pipe.action(cutout_fits)
        | pipe.action(create_jpeg)
        | pipe.action(move_files_to_gcs))

    await cleanup(msg)
コード例 #14
0
    def stream_data(self):
        """
        Discover all Tinkerforge devices connected via this transport.
        Yields
        -------

        """
        data_stream = (
            stream.just(self)
            | pipe.action(
                lambda transport: logging.getLogger(__name__).info(
                    "Connecting to Tinkerforge host at %s (%s).", transport.uri, transport.label
                )
            )
            | pipe.switchmap(self._stream_transport)
            | retry.pipe((ConnectionError, asyncio.TimeoutError), self.reconnect_interval)
        )
        return data_stream
コード例 #15
0
 def _stream_transport(transport: TinkerforgeTransport):
     sensor_stream = stream.chain(
         stream.call(transport.enumerate) | pipe.filter(lambda x: False),
         stream.iterate(transport.read_enumeration())
         | pipe.action(lambda enumeration: event_bus.publish(f"nodes/tinkerforge/{enumeration[1].uid}/remove", None))
         | pipe.filter(lambda enumeration: enumeration[0] is not EnumerationType.DISCONNECTED)
         | pipe.starmap(lambda enumeration_type, sensor: TinkerforgeSensor(sensor))
         | pipe.map(lambda sensor: sensor.stream_data())
         | pipe.flatten(),
     ) | context.pipe(
         transport,
         on_enter=lambda: logging.getLogger(__name__).info(
             "Connected to Tinkerforge host at %s (%s).", transport.uri, transport.label
         ),
         on_exit=lambda: logging.getLogger(__name__).info(
             "Disconnected from Tinkerforge host at %s (%s).", transport.uri, transport.label
         ),
     )
     return sensor_stream
コード例 #16
0
    def stream_data(self):
        """
        Discover all Tinkerforge devices connected via this transport.
        Yields
        -------

        """
        data_stream = (
            stream.just(self)
            | pipe.action(lambda transport: logging.getLogger(__name__).info(
                "Connecting to %s at %s (%s).", transport.name, transport.uri,
                transport.label))
            | pipe.switchmap(self._stream_data)
            | retry.pipe(
                (GpibError, asyncio.TimeoutError), self.reconnect_interval))
        # We need to catch the TimeoutError here, because most protocols like SCPI have no means of synchronizing
        # messages. This means, that we will lose sync after a timeout. In these cases, we reconnect the transport.
        # In case of a GpibError, we error out and stop the device.

        return data_stream
コード例 #17
0
    def stream_data(self,
                    config: dict[str, Any]) -> AsyncGenerator[DataEvent, None]:
        """
        Enumerate the device, then read data from it.

        Parameters
        ----------
        config: dict
            A dict containing the configuration for the device

        Yields
        -------
        DataEvent
            The data from the device
        """
        return stream.chain(
            stream.just(self)
            | pipe.action(async_(lambda sensor: sensor.enumerate()))
            | pipe.filter(lambda x: False),
            super().stream_data(config),
        )
コード例 #18
0
    def stream_data(self) -> AsyncGenerator[DataEvent, None]:
        """
        Generate the initial configuration of the sensor, configure it, and finally stream the data from the sensor.
        If there is a configuration update, reconfigure the sensor and start streaming again.
        Returns
        -------
        AsyncGenerator of DataEvent
            The data from the device
        """
        # Generates the first configuration
        # Query the database and if it does not have a config for the sensor, wait until there is one

        data_stream = stream.chain(
            stream.just(self),
            stream.iterate(
                event_bus.subscribe(
                    f"nodes/tinkerforge/{self.device.uid}/remove"))[:1]
            | pipe.map(lambda x: None),
        ) | pipe.switchmap(
            lambda sensor: stream.empty() if sensor is None else
            (self._stream_config_updates(sensor)
             | pipe.switchmap(lambda config: stream.chain(
                 stream.just(config),
                 stream.iterate(
                     event_bus.subscribe(
                         f"nodes/by_uuid/{config['uuid']}/remove"))[:1]
                 | pipe.map(lambda x: None),
             ))
             | pipe.action(lambda config: logging.getLogger(__name__).info(
                 "Got new configuration for: %s",
                 sensor.device,
             ))
             | pipe.map(self._create_config)
             | pipe.switchmap(lambda config: stream.empty()
                              if config is None or not config["enabled"] else
                              (self._configure_and_stream(config)))))

        return data_stream
コード例 #19
0
    def stream_data(self):
        """
        Discover all Tinkerforge devices connected via this transport.
        Yields
        -------

        """
        data_stream = (
            stream.just(self)
            | pipe.action(
                lambda transport: logging.getLogger(__name__).info(
                    "Connecting to %s at %s (%s).", transport.name, transport.uri, transport.label
                )
            )
            | pipe.switchmap(self._stream_data)
            | retry.pipe((OSError, asyncio.TimeoutError), self.reconnect_interval)
        )
        # We need to catch OSError, which is the parent of ConnectionError, because a connection to localhost
        # might resolve to 2 IPs and then return  multiple exception at once if all IPs fail, which is an
        # OSError.
        # We also need to catch the TimeoutError here, because most protocols like SCPI have no means of synchronizing
        # messages. This means, that we will lose sync after a timeout. We therefore need to reconnect in these cases.

        return data_stream
コード例 #20
0
ファイル: worker.py プロジェクト: yottamoe/sdss_gz_ml
async def handle_message_async(msg):
    await (stream.just(msg)
           | pipe.action(retrieve_file)
           | pipe.action(move_file_to_gcs))