def __init__(self): super(ResultSaver, self).__init__() self._stop = subject.Subject() self.config = config.SettingAccessor(self.config_prefix) self.subjects: Subjects = services.service_provider.SubjectProvider().get_or_create_instance(None) self.saving_scheduler = scheduler.NewThreadScheduler() self.subjects.image_producer.pipe( operators.observe_on(self.saving_scheduler), operators.filter(self.config_enabled_filter("save_images")), operators.take_until(self._stop), ).subscribe(ErrorToConsoleObserver(self.save_image)) self.subjects.detection_result.pipe( operators.observe_on(self.saving_scheduler), operators.filter(self.config_enabled_filter("save_labels")), operators.take_until(self._stop), ).subscribe(ErrorToConsoleObserver(self.save_labels)) self.subjects.add_to_timeline.pipe( operators.observe_on(self.saving_scheduler), operators.filter(self.config_enabled_filter("save_events")), operators.take_until(self._stop), ).subscribe(ErrorToConsoleObserver(self.save_timeline_events)) config.setting_updated_channel.pipe( operators.filter(self._directory_filter), operators.take_until(self._stop), ).subscribe(ErrorToConsoleObserver(lambda x: self.initialize_saving_directory())) self.initialize_saving_directory()
def __init__(self, ip: str, port: int): """ :param ip: :param port: """ super().__init__() self.socket = None self.ip = ip self.port = port self.is_connected = False self.unpacker = msgpack.Unpacker() self.cq = CompletionQueue() self.error_channel = subject.Subject() # background thread for incoming messages self.incoming_schduler = scheduler.NewThreadScheduler() # configure subscriptions self.input_port_updated = req.InputPortUpdateEvent(self.socket, self.cq) \ .register() \ .pipe(operators.map(lambda vals: [SimexPort(x) for x in vals[0]])) self.error_occured = req.ErrorEvent(self.socket, self.cq) \ .register() \ .pipe(operators.map(lambda vals: vals[0])) self.params_updated = req.ParameterEvent(self.socket, self.cq) \ .register() \ .pipe(operators.flat_map(lambda x: self.get_parameters())) self.simulation_state_updated = req.SimulationStateEvent(self.socket, self.cq) \ .register() \ .pipe(operators.map(lambda vals: SimexSimulationState(vals[0])))
def flow_control(should_stop: subject.BehaviorSubject) -> rx.Observable: """ Flow control operator. Buffer the previous items and emit them gracefully (respecting the given should_stop/back pressure) :param should_stop: :return: """ buffer = [] _stop = subject.Subject() _upstream_completed = False def on_error(ex): _stop.on_next(0) raise ex def on_upstream_completed(): _upstream_completed = True def upstream(source: rx.Observable): source.pipe(operators.take_until(_stop)).subscribe( lambda x: buffer.append(x), on_error, on_upstream_completed) def downstream_subscribe(observer: rx.core.Observer, sch: rx.typing.Scheduler = None): def emit_next(): if len(buffer): observer.on_next(buffer.pop(0)) else: if _upstream_completed: observer.on_completed() def schedule_emit_next_until(until: subject.Subject): stop_emitting = False def _action(sch: rx.typing.Scheduler, state=None): emit_next() def until_on_next(v): nonlocal stop_emitting stop_emitting = True until.pipe(operators.take_until(_stop)).subscribe(until_on_next, scheduler=sch) if not stop_emitting: sch.schedule(_action) def should_stop_updated(val: bool): if val: # should stop, do nothing until next message pass else: # normal operation # Cannot guarantee that the should_stop will emit every time the value is received. schedule_emit_next_until(should_stop) should_stop.pipe( operators.take_until(_stop)).subscribe(should_stop_updated) return rx.create(downstream_subscribe)
def _buffer_until_complete(source: rx.Observable): def do_boundary(): boundary.on_next(0) boundary.on_completed() boundary = subject.Subject() source.subscribe(on_completed=do_boundary) return operators.buffer(boundary)(source)
def __init__(self): super().__init__() self.config = config.SettingAccessor(self.config_prefix) self.fps = self.config["fps"] self.images = self._source_images() self._stop = subject.Subject() self.running = False self.feed_scheduler = ThreadPoolScheduler()
def __init__(self, serial_port, baud_rate, command_interval=0.25, terminator='\r\n'): self.terminator = terminator self.serial_port = serial_port self.baud_rate = baud_rate self.command_interval = command_interval self.command_queue = subject.Subject() self.command_queue.pipe(rate_limit(command_interval)).subscribe( self._send_command, error_handler) self.resolve_queue = subject.Subject() self.serial = serial.Serial(serial_port, baud_rate, xonxoff=True, timeout=0.5) # software flow control
def __init__(self): super().__init__() self.config = config.SettingAccessor(self.config_prefix) config.setting_updated_channel.pipe( operators.filter(lambda x: x[0] == f"{ResultProcessor.config_prefix}/group_size") ).subscribe( ErrorToConsoleObserver(lambda x: self.configure_subscriptions())) self._stop = subject.Subject() self.subjects: Subjects = services.service_provider.SubjectProvider( ).get_or_create_instance(None) self.configure_subscriptions()
def __init__(self): self.logger = logging.getLogger("console") self.config = config.SettingAccessor(self.config_prefix) self.logger.debug( f"Simex address = {self.config['ip']}:{self.config['port']}") self.instance = simex.SimexRemote(self.config['ip'], self.config['port']) self.connected = subject.BehaviorSubject(False) self.execution_thread = scheduler.NewThreadScheduler() self.subjects: Subjects = services.service_provider.SubjectProvider( ).get_or_create_instance(None) self._stop = subject.Subject()
def __init__(self, *args, **kwargs): super().__init__() self.config = config.SettingAccessor(self.config_prefix) self.batch_size = self.config["batch_size"] self.inference_comm = InferenceComm() self._stop = subject.Subject() self.feed_scheduler = scheduler.ThreadPoolScheduler() self.process_scheduler = scheduler.ThreadPoolScheduler() self.subject_provider = services.service_provider.SubjectProvider() self.subjects: Subjects = self.subject_provider.get_or_create_instance( None)
class Subjects(object): """ Internal message channel multiplexier """ image_producer: typing.Subject[AcquiredImage, AcquiredImage] = subject.Subject() image_source_connected: subject.BehaviorSubject = subject.BehaviorSubject( False) sample_image_data: typing.Subject[SampleImageData, SampleImageData] = subject.Subject() detection_result: typing.Subject[DetectionsInImage, DetectionsInImage] = subject.Subject() analyzer_back_pressure_detected: subject.BehaviorSubject = subject.BehaviorSubject( False) rendered_sample_image_producer: typing.Subject[ np.ndarray, np.ndarray] = subject.Subject() analyzer_connected: subject.BehaviorSubject = subject.BehaviorSubject( False) processed_distributions: typing.Subject[ ProcessedDistributions, ProcessedDistributions] = subject.Subject() add_to_timeline: typing.Subject[TimelineDataPoint, TimelineDataPoint] = subject.Subject() def __init__(self): super(Subjects, self).__init__()
def __init__(self, client: mqtt_wrapper.MQTTClientWrapper, topic: Union[str, None], qos=0, options=None, properties=None, config=None, name=""): self.options = options self.properties = properties self.qos = qos self.topic = topic self.client = client self.message_subject = subject.Subject() self.broker_subscribed = False self.subscription: rx.disposable.Disposable = None self.can_subscribe = BehaviorSubject(False) Node.__init__(self, name=name, config=config) rx.Observable.__init__(self, subscribe=self.subscribe_func)
def test_ordered_resolve(self): observable = subject.Subject() piped = observable.pipe(ordered_resolution()) piped.subscribe(lambda v: print(f"1: {v}"), lambda err: print(f"1: {err}"), lambda: print("1 complete.")) piped.subscribe(lambda v: print(f"2: {v}"), lambda err: print(f"2: {err}"), lambda: print("2 complete.")) observable.on_next(1) observable.on_next(2)
def __init__(self): from harvesters.core import Harvester super().__init__() self.config = config.SettingAccessor(self.config_prefix) self.logger = logging.getLogger("console") self._stop = subject.Subject() self.driver = Harvester() self.driver.add_cti_file(self.config["cti_path"]) self.acquirer = None self.simex_instance = None self.running = False self.scheduler = scheduler.NewThreadScheduler() self.scheduler.schedule( lambda sc, state: self.driver.update_device_info_list())
def __init__(self, uuid: str): self.uuid = uuid self.light_state = RxSubject.Subject()
def __init__(self): super().__init__() self._stop = subject.Subject() self.scheduler = scheduler.ThreadPoolScheduler()
def __init__(self, *args, **kwargs): self._observable = subject.Subject()
import os import typing from pydoc import locate from rx import subject from PyQt5 import QtWidgets, QtGui, QtCore setting_updated_channel = subject.Subject() class SettingRegistry(object): def __init__(self, key, value, type="str", title=None): self.value = value self.title = title self.type = type self.key = key class SettingAccessor(object): def __init__(self, section=None): self.section = f"{section}/" if section else "" def __getitem__(self, item): type = global_settings.value(f"{self.section}{item}/type", defaultValue="str") return global_settings.value(f"{self.section}{item}", type=locate(type)) def __setitem__(self, key, value): k = f"{self.section}{key}" global_settings.setValue(k, value) setting_updated_channel.on_next((k, value))
def setUp(self) -> None: # create source stream (fast) self.source = rx.interval(0.1) # and fast consumer self.observer = rx.core.Observer(lambda x: print(f"Received{x}")) self._stop = subject.Subject()
class SimexIO(object): config_prefix = "SimexIO" image_feed_chan_in = subject.Subject() camera_temperature_chan_in = subject.Subject() def __init__(self): self.logger = logging.getLogger("console") self.config = config.SettingAccessor(self.config_prefix) self.logger.debug( f"Simex address = {self.config['ip']}:{self.config['port']}") self.instance = simex.SimexRemote(self.config['ip'], self.config['port']) self.connected = subject.BehaviorSubject(False) self.execution_thread = scheduler.NewThreadScheduler() self.subjects: Subjects = services.service_provider.SubjectProvider( ).get_or_create_instance(None) self._stop = subject.Subject() @staticmethod @config.DefaultSettingRegistration(config_prefix) def default_settings(configPrefix): config.default_settings(configPrefix, [ config.SettingRegistry( "ip", "127.0.0.1", type="str", title="Block server IP"), config.SettingRegistry( "port", 12305, type="int", title="Block server port"), config.SettingRegistry( "buffer_count", 5, type="int", title="Image statistics averaging window size"), config.SettingRegistry( "temperature_sample_time", 1.0, type="float", title="Temperature report sample time(sec)"), ]) def connect(self): if self.instance.is_connected: error_message = "SimexIO is already connected." self.logger.warning(error_message) return rx.throw(RuntimeError(error_message)) def subscribe(observer: rx.typing.Observer, scheduler=None): try: self.logger.debug("connecting to SimexIO") self.instance.connect() except Exception as ex: self.logger.error( f"Failed to connect to SimexIO. Exception: {ex}") return self.instance.verify_compatibility().pipe( operators.flat_map(self._simex_verify_version_cb), operators.map(self._simex_enforce_port_configuration_cb), operators.map(self._simex_configure_subscription), ).subscribe(observer) return Disposable(lambda: None) return rx.create(subscribe) def disconnect(self): if not self.instance.is_connected: self.logger.warning("SimexIO is not connected. Cannot disconnect.") return try: self.logger.debug("disconnecting SimexIO") self.instance.disconnect() self._stop.on_next(None) self.connected.on_next(False) except Exception as ex: self.logger.error(f"Failed to disconnect SimexIO. Exception: {ex}") def _simex_verify_version_cb(self, x=None): if not x: raise RuntimeError( "Simex server compatibility verification failed") else: # verify port configuration return self.instance.request_info() def _simex_enforce_port_configuration_cb(self, x: simex.SimexInfo = None): if len(x.output_ports) == 1: self.logger.debug("SimexIO initialized, start data transimittion") self.connected.on_next(True) else: self.logger.error( "Failed to verify SimexIO configuration. The connection will be closed." ) self.disconnect() raise RuntimeError("Failed to verify SimexIO configuration") def _simex_connect_error_cb(self, err=None): self.logger.error( f"Exception occured in SimexIO validation chain: {err}") def _simex_configure_subscription(self, x=None): self.subjects.image_producer.pipe( operators.observe_on(self.execution_thread), operators.map(lambda acquired_image: acquired_image.image ), # pluck the image array operators.map(lambda im: np.median(im)), operators.buffer_with_count(self.config["buffer_count"]), operators.map(lambda medians: np.mean(medians)), operators.take_until(self._stop) ).subscribe( ErrorToConsoleObserver(lambda t: self.instance.request_port_update( OutputPorts.BRIGHTNESS.value, np.asarray(t, dtype=np.float64)). subscribe(ErrorToConsoleObserver())))
class InferenceComm(object): connection_chan = subject.BehaviorSubject(False) result_chan = subject.Subject() error_chan = subject.Subject() stats_chan = subject.Subject() back_pressure_chan = subject.BehaviorSubject(False) def __init__(self): super().__init__() self.channel = None self.stub: grpc_service.InferenceStub self.start_time_queue = [] self.stub = None def on_connect_state_change(self, state: grpc.ChannelConnectivity): if state == grpc.ChannelConnectivity.READY: self.connection_chan.on_next(True) if state == grpc.ChannelConnectivity.TRANSIENT_FAILURE or state == grpc.ChannelConnectivity.SHUTDOWN: if self.connection_chan.value: self.connection_chan.on_next(False) def connect_to_grpc_server(self, ip, port): if self.connection_chan.value: return if self.channel is not None: self.channel.close() self.channel = grpc.insecure_channel(f"{ip}:{port}") self.channel.subscribe(self.on_connect_state_change, True) self.stub = grpc_service.InferenceStub(self.channel) def back_pressure_detection(self): if len(self.start_time_queue) > 2: self.back_pressure_chan.on_next(True) return True else: self.back_pressure_chan.on_next(False) return False def inference_done(self, future: grpc.Future): elapsed_time = time.time() - self.start_time_queue.pop(0) try: self.back_pressure_detection() inference_result: grpc_def.InferenceResult = future.result(None) num_processed_images = len(inference_result.result) stats = InferenceStats(num_processed_images, elapsed_time) self.stats_chan.on_next(stats) # notify new detections self.result_chan.on_next(inference_result) except Exception as ex: self.error_chan.on_next(f"Inference error: {ex}") def clean(self): if self.channel: self.channel.close() def stop(self): if self.connection_chan.value: self.clean() self.connection_chan.on_next(False) def feed_images(self, image_and_name): if not self.connection_chan.value: self.error_chan.on_next("Server is not connected. Cannot feed image.") return req = grpc_def.ImageBatchRequest() req.opt.num_image_returned = 1 for image, name in image_and_name: req_img = grpc_def.Image() req_img.name = name req_img.images_data = image req.images.append(req_img) self.start_time_queue.append(time.time()) resp: grpc.Future = self.stub.Inference.future(req) resp.add_done_callback(self.inference_done) self.back_pressure_detection() @staticmethod def to_detected_object(detection: grpc_def.Detection): detected_object = DetectedObject() rle = { "counts": detection.rle.counts, "size": list(detection.rle.size), } bbox = detection.bbox detected_object.label = detection.category detected_object.maskRLE = rle detected_object.mask = mask_util.decode(rle) detected_object.bbox = (bbox.xlt, bbox.ylt, bbox.xrb, bbox.yrb) detected_object.score = detection.confidence return detected_object
def __init__(self): super().__init__() self._stop = subject.Subject() self.logger = logging.getLogger("console") self.back_pressure_lock = Condition()