def watch(id_: str, obs: Observable): ingestor_mode_health = obs.pipe( ops.map(lambda x: self._ingestor_mode_to_health(id_, x))) obs.pipe( heartbeat(self.LIVELINESS), ops.map(lambda x: to_ingestor_health(id_, x)), self._combine_most_critical(ingestor_mode_health), ).subscribe(self.rmf.ingestor_health.on_next, scheduler=self.scheduler)
async def observable_to_async_event_generator(observable: Observable): queue = asyncio.Queue() def on_next(i): queue.put_nowait(i) observable.pipe(materialize()).subscribe(on_next=on_next) while True: value = await queue.get() yield value queue.task_done()
def watch(id_: str, obs: Observable): """ :param obs: Observable[RobotState] """ robot_mode_health = obs.pipe( ops.map(lambda x: self._robot_mode_to_health(id_, x))) obs.pipe( heartbeat(self.LIVELINESS), ops.map(lambda x: to_robot_health(id_, x)), self._combine_most_critical(robot_mode_health), ).subscribe(self.rmf.robot_health.on_next, scheduler=self.scheduler)
def accumulate_and_clear(values: Observable, clear: Observable) -> Observable: return merge_either(values, clear.pipe(ops.map(lambda _: True))).pipe( ops.scan( lambda collected, either: [] if either[1] else collected + [either[0]], [], ))
def generate_move_path(file_stream: rx.Observable, storage_dir: str) -> rx.Observable: """Identify the appropriate move path for files in the file_stream.""" return file_stream.pipe( operators.map( lambda target: fc.identify_photo_move_path(storage_dir, target)), operators.map(lambda target: target.clear_contents_data()), )
def enqueue(self, observable: Observable, group: str = 'default-group', retries: int = 0, description: str = None) -> Observable: def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, status: description })) log_status('ENQUEUED') output = Subject() errors = Subject() output_finalized = Subject() def handle_error(e, _): log_status('FAILED') errors.on_next(e) return empty() def set_output_finalized(): output_finalized.on_next(True) work = of(True).pipe( do_action(lambda _: log_status('STARTED')), take_until(output_finalized), flat_map(lambda _: observable.pipe( map(lambda value: of({ 'value': value, 'output': output })), retry_with_backoff( retries=retries, description='{}.enqueue(group={}, description={})'.format( self, group, description)), catch(handler=handle_error), take_until(output_finalized), take_until_disposed())), concat(of(of({ 'completed': True, 'output': output }))), finally_action(lambda: log_status('COMPLETED'))) self._queue.on_next({'work': work, 'group': group}) return output.pipe(observe_on(self.request_scheduler), throw_when(errors), take_while(lambda r: not r.get('completed')), map(lambda r: r.get('value')), finally_action(set_output_finalized))
def __init__(self, inObs: Observable, outSubject: Subject): """ Parameters --------- inObs : Observable<bytes> Observable<bytes> that the instance subscribes to in order to receive data packets. The Observable should emit objects of type bytes outSubject : Subject<bytes> Subscribe to the outSubject to publish messages (i. e. send the to the receiver) """ super().__init__() self._in: Observable = inObs.pipe(map(self._mapIncoming), publish()) self._in.connect() self._out = Subject() self._out.pipe(map(self._mapOutgoing)).subscribe(outSubject)
def from_rx( source: rx.Observable, batch_size: int = None, overflow_strategy: OverflowStrategy = None, is_batched: bool = None, ) -> Flowable: """ Wrap a rx.Observable and exposes it as a Flowable, relaying signals in a backpressure-aware manner. :param source: an rx.observable :param overflow_strategy: define which batches are ignored once the buffer is full :param batch_size: determines the number of elements that are sent in a batch :param is_batched: if set to True, the elements emitted by the source rx.Observable are either of type List or of type Iterator """ if is_batched is True: batched_source = source else: if batch_size is None: batch_size = 1 batched_source = source.pipe(operators.buffer_with_count(batch_size), ) if isinstance(overflow_strategy, DropOld) or isinstance( overflow_strategy, ClearBuffer): return init_flowable( FromRxEvictingFlowable( batched_source=batched_source, overflow_strategy=overflow_strategy, )) else: if overflow_strategy is None: buffer_size = math.inf elif isinstance(overflow_strategy, BackPressure): buffer_size = overflow_strategy.buffer_size else: raise AssertionError( 'only BackPressure is currently supported as overflow strategy' ) return init_flowable( FromRxBufferingFlowable( batched_source=batched_source, overflow_strategy=overflow_strategy, buffer_size=buffer_size, ))
def enqueue(self, observable: Observable, group: str = None, retries: int = 0, description: str = None): # Provide a function returning a callable? description = description or str(Observable) key = '{}({})'.format(description, random.random()) def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, 'key': key, status: description })) log_status('ENQUEUED') error: Optional[Exception] = None def handle_error(e): log_status('FAILED') nonlocal error error = e return of({'key': key, 'error': e}) def throw_if_error(request): if error: return throw(error) else: return of(request) def extract_value(value): if type(value) == Observable: return value else: return of(value) request = of(True).pipe( do_action(lambda _: log_status('STARTED')), flat_map(lambda _: observable.pipe( flat_map(extract_value), map(lambda value: { 'key': key, 'value': value }), retry_with_backoff( retries=retries, description='{}.enqueue(group={}, description={})'.format( self, group, description)), catch(handler=lambda e, o: handle_error(e)), )), concat( of({ 'key': key, 'complete': True }).pipe(do_action(lambda _: log_status('COMPLETED'))))) result_stream = self._output.pipe( filter(lambda request: request['key'] == key), flat_map(lambda request: throw_if_error(request)), take_while(lambda request: not request.get('complete')), flat_map(lambda request: of(request.get('value')))) self._requests.on_next({ 'request': request, 'concurrency_group': group }) return result_stream
def right(a: Observable) -> Observable: return a.pipe(ops.map(lambda v: (None, v)))
def left(a: Observable) -> Observable: return a.pipe(ops.map(lambda v: (v, None)))
def generate_file_metadata(file_stream: rx.Observable) -> rx.Observable: """Add various file metadata to the FileTargets being streamed.""" return file_stream.pipe( operators.map(fl.sha256_file), operators.map(fl.encode_shasum), )
def generate_image_metadata(file_stream: rx.Observable) -> rx.Observable: """Add image metadata to file_stream.""" return file_stream.pipe( operators.map(im.parse_image_meta_for_file_target), operators.map(im.identify_image_datestamp), )
def upstream(source: rx.Observable): source.pipe(operators.take_until(_stop)).subscribe( lambda x: buffer.append(x), on_error, on_upstream_completed)
def load_file_content(file_stream: rx.Observable) -> rx.Observable: """Load files content from disk.""" return file_stream.pipe(operators.map(fl.load_file_contents))
def consume(self, xs: rx.Observable): obs = xs.pipe(ops.do_action(lambda x: results.append(x))) obs.subscribe()
def forward(self, xs: rx.Observable) -> rx.Observable: return xs.pipe(ops.map(str))
def enqueue(self, observable: Observable, group: str = 'default-group', retries: int = 0, description: str = None): description = description or str(Observable) key = '{}({})'.format(description, random.random()) def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, 'key': key, status: description })) log_status('ENQUEUED') error: Optional[Exception] = None def handle_error(e): log_status('FAILED') nonlocal error error = e return of({'key': key, 'error': e}) def throw_if_error(r): if error: return throw(error) else: return of(r) request_disposed = Subject() def dispose_request(): request_disposed.on_next(True) request = of(True).pipe( do_action(lambda _: log_status('STARTED')), flat_map( lambda _: observable.pipe( map(lambda value: { 'key': key, 'value': value }), retry_with_backoff(retries=retries, description= '{}.enqueue(group={}, description={})'. format(self, group, description)), catch(handler=lambda e, o: handle_error(e)), take_until(request_disposed), take_until_disposed(), ), ), concat( of({ 'key': key, 'complete': True }).pipe(do_action(lambda _: log_status('COMPLETED')))), ) result_stream = self._output.pipe( observe_on(self.request_scheduler), filter(lambda r: r['key'] == key), flat_map(lambda r: throw_if_error(r)), take_while(lambda r: not r.get('complete')), flat_map(lambda r: of(r.get('value'))), finally_action(dispose_request)) self._requests.on_next({ 'request': request, 'concurrency_group': group }) return result_stream
def general_node_grouper(observable: rx.Observable): return observable.pipe( op.map(lambda dic: raw_node_dict_to_formatted_node_dict(dic)), op.reduce(lambda acc, act: reduce_node_dict(acc, act)), op.map(lambda dic: node_dict_to_node_dict_with_list(dic)), )
def general_edge_grouper(observable: rx.Observable): return observable.pipe( op.map(raw_edge_dict_to_formatted_edge_dict), op.reduce(reduce_edge_dict), op.map(edge_dict_to_edge_dict_with_list), )