def create(): def handler1(e, source): first_handler_called[0] = True return o2 def handler2(e, source): second_handler_called[0] = True return o3 return o1.pipe(ops.catch(handler1), ops.catch(handler2))
def _retryable(self, data: str, delay: datetime.timedelta): return rx.of(data).pipe( ops.delay(duetime=delay, scheduler=self._scheduler), ops.map(lambda x: self._http(x)), ops.catch(handler=lambda exception, source: self._retry_handler(exception, source, data)), )
def _get_status(self) -> Observable: observable = self._get_status_interactor.execute().pipe( operators.catch(self._log_exception_return_empty_observable), operators.map(self.log_status)) assert isinstance(observable, Observable) return observable
def do_retry(source, tries, exception): if tries <= retries: logging.warning( 'retry_with_backoff(tries={}, retries={}, exception={}, description={})' .format(tries, retries, exception, description)) return of(None).pipe( delay(backoff(tries), TimeoutScheduler()), flat_map(source), catch(handler=lambda e, src: do_retry(src, tries + 1, e))) else: return throw(exception)
def _retryable(self, data: str, delay: timedelta): return rx.of(data).pipe( # use delay if its specified ops.delay(duetime=delay, scheduler=self._write_options.write_scheduler), # invoke http call ops.map(lambda x: self._http(x)), # if there is an error than retry ops.catch(handler=lambda exception, source: self._retry_handler( exception, source, data)), )
def enqueue(self, observable: Observable, group: str = 'default-group', retries: int = 0, description: str = None) -> Observable: def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, status: description })) log_status('ENQUEUED') output = Subject() errors = Subject() output_finalized = Subject() def handle_error(e, _): log_status('FAILED') errors.on_next(e) return empty() def set_output_finalized(): output_finalized.on_next(True) work = of(True).pipe( do_action(lambda _: log_status('STARTED')), take_until(output_finalized), flat_map(lambda _: observable.pipe( map(lambda value: of({ 'value': value, 'output': output })), retry_with_backoff( retries=retries, description='{}.enqueue(group={}, description={})'.format( self, group, description)), catch(handler=handle_error), take_until(output_finalized), take_until_disposed())), concat(of(of({ 'completed': True, 'output': output }))), finally_action(lambda: log_status('COMPLETED'))) self._queue.on_next({'work': work, 'group': group}) return output.pipe(observe_on(self.request_scheduler), throw_when(errors), take_while(lambda r: not r.get('completed')), map(lambda r: r.get('value')), finally_action(set_output_finalized))
def route_error(obs, convert): """ Handles error raised by obs observable catches any error raised by obs, maps it to anther object with the convert function, and emits in on the error observer. """ def catch_error(e, source): sink_scheduler.schedule( lambda _1, _2: sink_observer.on_next(convert(e))) return rx.empty() return obs.pipe(ops.catch(catch_error))
def configure_timed_read(self): interval = self.config.getfloat("fp50", "interval") if interval > 0: logger.info("Configuring timed read") # enabled rx.interval(interval, scheduler=NewThreadScheduler()).pipe( operators.flat_map(lambda x: self.control.get_power()), operators.map(lambda x: self.upload_power(x)), operators.delay(self.config.getfloat("fp50", "query_delay")), operators.flat_map( lambda x: self.control.get_internal_temperature()), operators.map(lambda x: self.upload_internal_temperature(x)), operators.catch(error_handler)).subscribe()
def catchOperator(self): def test(i): if (i == 4): raise Exception('four') else: return i source = of(1, 2, 3, 4, 5, 6, 7) handler = of(11, 12, 13, 14) result = source.pipe(op.map(lambda item: test(item)), op.catch(handler)) result.subscribe( lambda item: print('"Subscribe Catch" output: {}'.format(item)))
def retry_with_backoff( retries: int = 0, description: str = None, backoff: Optional[Mapper] = default_backoff ) -> Callable[[Observable], Observable]: def do_retry(source, tries, exception): print( 'retry_with_backoff(tries={}, retries={}, description={}, exception={})' .format(tries, retries, description, exception)) if tries <= retries: logging.warning( 'retry_with_backoff(tries={}, retries={}, exception={}, description={})' .format(tries, retries, exception, description)) return of(None).pipe( delay(backoff(tries), TimeoutScheduler.singleton()), flat_map(source), catch(handler=lambda e, src: do_retry(src, tries + 1, e))) else: return throw(exception) return rx.pipe(catch(handler=lambda e, o: do_retry(o, 1, e)))
def create(): def handler(e, source): handler_called[0] = True return o2 return rx.throw('ex').pipe(ops.catch(handler))
def create(): def handler(e, source): handler_called[0] = True return o2 return o1.pipe(ops.catch(handler))
def create(): return o1.pipe(ops.catch(o2))
def create(): def handler(e, source): handler_called[0] = True raise Exception(ex2) return o1.pipe(ops.catch(handler))
def route_error(item, convert): def catch_item(e, source): sink_observer.on_next(convert(e)) return rx.empty() return item.pipe(ops.catch(catch_item))
def enqueue(self, observable: Observable, group: str = None, retries: int = 0, description: str = None): # Provide a function returning a callable? description = description or str(Observable) key = '{}({})'.format(description, random.random()) def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, 'key': key, status: description })) log_status('ENQUEUED') error: Optional[Exception] = None def handle_error(e): log_status('FAILED') nonlocal error error = e return of({'key': key, 'error': e}) def throw_if_error(request): if error: return throw(error) else: return of(request) def extract_value(value): if type(value) == Observable: return value else: return of(value) request = of(True).pipe( do_action(lambda _: log_status('STARTED')), flat_map(lambda _: observable.pipe( flat_map(extract_value), map(lambda value: { 'key': key, 'value': value }), retry_with_backoff( retries=retries, description='{}.enqueue(group={}, description={})'.format( self, group, description)), catch(handler=lambda e, o: handle_error(e)), )), concat( of({ 'key': key, 'complete': True }).pipe(do_action(lambda _: log_status('COMPLETED'))))) result_stream = self._output.pipe( filter(lambda request: request['key'] == key), flat_map(lambda request: throw_if_error(request)), take_while(lambda request: not request.get('complete')), flat_map(lambda request: of(request.get('value')))) self._requests.on_next({ 'request': request, 'concurrency_group': group }) return result_stream
"""Filtering""" op.debounce() op.distinct() op.filter() op.element_at() op.first() op.ignore_elements() op.last() op.skip() op.skip_last() op.take() op.take_last() # ... """Error Handling""" op.catch() op.retry() """Utility""" op.delay() op.materialize() op.time_interval() op.timeout() op.timestamp() """Conditional and Boolean""" op.all() op.contains() op.default_if_empty() op.sequence_equal() op.skip_until()
def unlock_request(img_faces_pair): return rx_request('post', ARGS.door_api_url, timeout=0.3).pipe( ops.do_action(on_error=lambda e: logger.exception(e)), ops.retry(3), ops.catch(rx.empty()), ops.do_action(on_next=lambda _: logger.info('Door unlocked\n')), ops.do_action(on_next=lambda _: log_unlock(img_faces_pair)))
import rx import rx.operators as ops err = rx.throw("error!") err.pipe(ops.catch(rx.from_([1, 2, 3]))).subscribe( on_next=lambda i: print("item: {}".format(i)), on_error=lambda e: print("error: {}".format(e)), on_completed=lambda: print("completed"))
def enqueue(self, observable: Observable, group: str = 'default-group', retries: int = 0, description: str = None): description = description or str(Observable) key = '{}({})'.format(description, random.random()) def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, 'key': key, status: description })) log_status('ENQUEUED') error: Optional[Exception] = None def handle_error(e): log_status('FAILED') nonlocal error error = e return of({'key': key, 'error': e}) def throw_if_error(r): if error: return throw(error) else: return of(r) request_disposed = Subject() def dispose_request(): request_disposed.on_next(True) request = of(True).pipe( do_action(lambda _: log_status('STARTED')), flat_map( lambda _: observable.pipe( map(lambda value: { 'key': key, 'value': value }), retry_with_backoff(retries=retries, description= '{}.enqueue(group={}, description={})'. format(self, group, description)), catch(handler=lambda e, o: handle_error(e)), take_until(request_disposed), take_until_disposed(), ), ), concat( of({ 'key': key, 'complete': True }).pipe(do_action(lambda _: log_status('COMPLETED')))), ) result_stream = self._output.pipe( observe_on(self.request_scheduler), filter(lambda r: r['key'] == key), flat_map(lambda r: throw_if_error(r)), take_while(lambda r: not r.get('complete')), flat_map(lambda r: of(r.get('value'))), finally_action(dispose_request)) self._requests.on_next({ 'request': request, 'concurrency_group': group }) return result_stream