async def test_switchmap(assert_run, event_loop): with event_loop.assert_cleanup(): xs = stream.range(0, 5, interval=1) ys = xs | pipe.switchmap(lambda x: stream.range(x, x + 2, interval=2)) await assert_run(ys, [0, 1, 2, 3, 4, 5]) assert event_loop.steps == [1, 1, 1, 1, 1, 1] with event_loop.assert_cleanup(): xs = stream.range(0, 5, interval=1) ys = xs | pipe.switchmap(lambda x: stream.range(x, x + 2, interval=2)) await assert_run(ys[:3], [0, 1, 2]) assert event_loop.steps == [1, 1]
async def test_switchmap(assert_run, event_loop): with event_loop.assert_cleanup(): xs = stream.range(0, 30, 10, interval=3) ys = xs | pipe.switchmap(lambda x: stream.range(x, x + 5, interval=1)) await assert_run(ys, [0, 1, 2, 10, 11, 12, 20, 21, 22, 23, 24]) assert event_loop.steps == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # Test cleanup procedure with event_loop.assert_cleanup(): xs = stream.range(0, 5, interval=1) ys = xs | pipe.switchmap(lambda x: stream.range(x, x + 2, interval=2)) await assert_run(ys[:3], [0, 1, 2]) assert event_loop.steps == [1, 1]
async def test_switchmap(assert_run, event_loop): with event_loop.assert_cleanup(): xs = stream.range(0, 30, 10, interval=3) ys = xs | pipe.switchmap(lambda x: stream.range(x, x+5, interval=1)) await assert_run(ys, [0, 1, 2, 10, 11, 12, 20, 21, 22, 23, 24]) assert event_loop.steps == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # Test cleanup procedure with event_loop.assert_cleanup(): xs = stream.range(0, 5, interval=1) ys = xs | pipe.switchmap(lambda x: stream.range(x, x+2, interval=2)) await assert_run(ys[:3], [0, 1, 2]) assert event_loop.steps == [1, 1]
async def run(self) -> None: """ The main task, that reads data from the sensors and pushes it onto the event_bus. """ # Generate the UUIDs of new sensors sensor_stream = stream.chain( stream.iterate( iterate_safely(f"{self.__topic}/get", f"{self.__topic}/status_update")), stream.iterate(event_bus.subscribe(f"{self.__topic}/add_host")), ) | pipe.flatmap( lambda item: stream.chain( (stream.call(event_bus.call, f"{self.__topic}/get_config", item ) | catch.pipe(TopicNotRegisteredError)), stream.iterate( event_bus.subscribe(f"nodes/by_uuid/{item}/update")), ) | pipe.until(lambda config: config is None) | pipe.map(lambda config: config if self._is_config_valid( self.__node_id, config) else None) | pipe.map(self._create_transport) | pipe.switchmap(lambda transport: stream.empty() if transport is None else stream.iterate(transport.stream_data())) | pipe.action(lambda data: event_bus.publish("wamp/publish", data) )) await sensor_stream
def stream_data(self) -> AsyncGenerator[DataEvent, None]: """ Generate the initial configuration of the sensor, configure it, and finally stream the data from the sensor. If there is a configuration update, reconfigure the sensor and start streaming again. Returns ------- AsyncGenerator of DataEvent The data from the device """ # Generates the first configuration # Query the database and if it does not have a config for the sensor, wait until there is one data_stream = ( stream.chain( stream.call( call_safely, "db_labnode_sensors/get_config", "db_labnode_sensors/status_update", self.__uuid ) | pipe.takewhile(lambda config: config is not None), stream.iterate(event_bus.subscribe(f"nodes/by_uuid/{self.__uuid}/update")), ) | pipe.action( lambda config: logging.getLogger(__name__).info( "Got new configuration for: %s", self._device, ) ) | pipe.map(self._create_config) | pipe.switchmap( lambda config: stream.empty() if config is None or not config["enabled"] else (self._configure_and_stream(config)) ) ) return data_stream
def stream_data(self, config: dict[str, Any]) -> AsyncGenerator[DataEvent, None]: """ Stream the data from the sensor. Parameters ---------- config: dict A dictionary containing the sensor configuration. Returns ------- AsyncGenerator The asynchronous stream. """ data_stream = (stream.chain( stream.just(config), stream.iterate( event_bus.subscribe(f"nodes/by_uuid/{self.__uuid}/update"))) | pipe.action(lambda _: logging.getLogger(__name__).info( "Got new configuration for: %s", self) if config is not None else logging.getLogger(__name__). info("Removed configuration for: %s", self)) | pipe.map(self._parse_config) | pipe.switchmap(lambda conf: stream.empty( ) if conf is None or not conf["enabled"] else (self._configure_and_stream(conf)))) return data_stream
def stream_data(self) -> AsyncGenerator[DataEvent, None]: """ Generate the initial configuration of the sensor, configure it, and finally stream the data from the sensor. If there is a configuration update, reconfigure the sensor and start streaming again. Returns ------- AsyncGenerator of DataEvent The data from the device """ # Generates the first configuration # Query the database and if it does not have a config for the sensor, wait until there is one data_stream = stream.chain( stream.just(self), stream.iterate( event_bus.subscribe( f"nodes/tinkerforge/{self.device.uid}/remove"))[:1] | pipe.map(lambda x: None), ) | pipe.switchmap( lambda sensor: stream.empty() if sensor is None else (self._stream_config_updates(sensor) | pipe.switchmap(lambda config: stream.chain( stream.just(config), stream.iterate( event_bus.subscribe( f"nodes/by_uuid/{config['uuid']}/remove"))[:1] | pipe.map(lambda x: None), )) | pipe.action(lambda config: logging.getLogger(__name__).info( "Got new configuration for: %s", sensor.device, )) | pipe.map(self._create_config) | pipe.switchmap(lambda config: stream.empty() if config is None or not config["enabled"] else (self._configure_and_stream(config))))) return data_stream
def stream_data(self): """ Discover all Tinkerforge devices connected via this transport. Yields ------- """ data_stream = ( stream.just(self) | pipe.action( lambda transport: logging.getLogger(__name__).info( "Connecting to Tinkerforge host at %s (%s).", transport.uri, transport.label ) ) | pipe.switchmap(self._stream_transport) | retry.pipe((ConnectionError, asyncio.TimeoutError), self.reconnect_interval) ) return data_stream
def _stream_data(self, transport): config_stream = ( with_context( transport, on_exit=lambda: logging.getLogger(__name__).info( "Disconnected from APQ Labnode at %s (%s).", transport.uri, transport.label ), ) | pipe.action( lambda _: logging.getLogger(__name__).info( "Connected to APQ Labnode at %s (%s).", transport.uri, transport.label ) ) | pipe.map(LabnodeSensor) | pipe.action(async_(lambda sensor: sensor.enumerate())) | pipe.switchmap(lambda sensor: sensor.stream_data()) ) return config_stream
def stream_data(self): """ Discover all Tinkerforge devices connected via this transport. Yields ------- """ data_stream = ( stream.just(self) | pipe.action(lambda transport: logging.getLogger(__name__).info( "Connecting to %s at %s (%s).", transport.name, transport.uri, transport.label)) | pipe.switchmap(self._stream_data) | retry.pipe( (GpibError, asyncio.TimeoutError), self.reconnect_interval)) # We need to catch the TimeoutError here, because most protocols like SCPI have no means of synchronizing # messages. This means, that we will lose sync after a timeout. In these cases, we reconnect the transport. # In case of a GpibError, we error out and stop the device. return data_stream
def stream_data(self): """ Discover all Tinkerforge devices connected via this transport. Yields ------- """ data_stream = ( stream.just(self) | pipe.action( lambda transport: logging.getLogger(__name__).info( "Connecting to %s at %s (%s).", transport.name, transport.uri, transport.label ) ) | pipe.switchmap(self._stream_data) | retry.pipe((OSError, asyncio.TimeoutError), self.reconnect_interval) ) # We need to catch OSError, which is the parent of ConnectionError, because a connection to localhost # might resolve to 2 IPs and then return multiple exception at once if all IPs fail, which is an # OSError. # We also need to catch the TimeoutError here, because most protocols like SCPI have no means of synchronizing # messages. This means, that we will lose sync after a timeout. We therefore need to reconnect in these cases. return data_stream