def test_return_most_recent(self): healths = [ BasicHealthModel( name="test", health_status=HealthStatus.DEAD, health_message="first", ), BasicHealthModel( name="test", health_status=HealthStatus.DEAD, health_message="second", ), ] obs_a = rx.of(healths[0]).pipe(ops.timestamp(scheduler=HistoricalScheduler(1))) obs_b = rx.of(healths[1]).pipe(ops.timestamp(scheduler=HistoricalScheduler(2))) result: BasicHealthModel = None def assign(v): nonlocal result result = v obs_a.pipe(ops.combine_latest(obs_b), most_critical()).subscribe(assign) self.assertEqual(result.health_status, HealthStatus.DEAD) self.assertEqual(result.health_message, "second")
def _combine_most_critical(*obs: Sequence[rx.Observable]): """ Combines an observable sequence of an observable sequence of BasicHealthModel to an observable sequence of BasicHealthModel with the most critical health status. If there are multiple BasicHealthModel with the same criticality, the most recent item is chosen. :param obs: Sequence[rx.Observable[BasicHealthModel]] """ return rx.pipe( ops.timestamp(), ops.combine_latest(*[x.pipe(ops.timestamp()) for x in obs]), most_critical(), )
def timestampOperator(self): source = interval(1).pipe( op.map(lambda second: self.intense_calculation(second)), op.timestamp() ) source2 = of('Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon').pipe( op.map(lambda second: self.intense_calculation(second)), op.timestamp() ) result = source.pipe( op.merge(source2) ) result.subscribe(lambda item: print('"Subscribe Timestamp" output: {}'.format(item)))
def create(): def mapper(x): return Timestamp(x.value, x.timestamp) return xs.pipe( ops.timestamp(), ops.map(mapper), )
def test_returns_dead_over_unhealthy(self): healths = [ BasicHealthModel( name="test", health_status=HealthStatus.DEAD, ), BasicHealthModel( name="test", health_status=HealthStatus.UNHEALTHY, ), ] obs_a = rx.of(healths[0]).pipe(ops.timestamp(scheduler=HistoricalScheduler(1))) obs_b = rx.of(healths[1]).pipe(ops.timestamp(scheduler=HistoricalScheduler(2))) result: BasicHealthModel = None def assign(v): nonlocal result result = v obs_a.pipe(ops.combine_latest(obs_b), most_critical()).subscribe(assign) self.assertEqual(result.health_status, HealthStatus.DEAD)
def subscribe(observer, scheduler_=None): nonlocal duetime _scheduler = scheduler or scheduler_ or timeout_scheduler if isinstance(duetime, datetime): duetime = _scheduler.to_datetime(duetime) - _scheduler.now else: duetime = _scheduler.to_timedelta(duetime) cancelable = SerialDisposable() exception = [None] active = [False] running = [False] queue = [] def on_next(notification): should_run = False with source.lock: if notification.value.kind == 'E': del queue[:] queue.append(notification) exception[0] = notification.value.exception should_run = not running[0] else: queue.append(Timestamp(value=notification.value, timestamp=notification.timestamp + duetime)) should_run = not active[0] active[0] = True if should_run: if exception[0]: observer.on_error(exception[0]) else: mad = MultipleAssignmentDisposable() cancelable.disposable = mad def action(scheduler, state): if exception[0]: return with source.lock: running[0] = True while True: result = None if queue and queue[0].timestamp <= scheduler.now: result = queue.pop(0).value if result: result.accept(observer) if not result: break should_continue = False recurse_duetime = 0 if queue: should_continue = True diff = queue[0].timestamp - scheduler.now zero = timedelta(0) if isinstance(diff, timedelta) else 0 recurse_duetime = max(zero, diff) else: active[0] = False ex = exception[0] running[0] = False if ex: observer.on_error(ex) elif should_continue: mad.disposable = scheduler.schedule_relative(recurse_duetime, action) mad.disposable = _scheduler.schedule_relative(duetime, action) subscription = source.pipe( ops.materialize(), ops.timestamp() ).subscribe_(on_next, scheduler=scheduler_) return CompositeDisposable(subscription, cancelable)
op.skip() op.skip_last() op.take() op.take_last() # ... """Error Handling""" op.catch() op.retry() """Utility""" op.delay() op.materialize() op.time_interval() op.timeout() op.timestamp() """Conditional and Boolean""" op.all() op.contains() op.default_if_empty() op.sequence_equal() op.skip_until() op.skip_while() op.take_until() op.take_while() """Connectable""" op.publish() op.ref_count() op.replay()
import rx import rx.operators as ops from rx.subject import Subject import time numbers = Subject() numbers.pipe(ops.timestamp()).subscribe( on_next=lambda i: print("on_next {}: {}".format(i.value, i.timestamp)), on_error=lambda e: print("on_error: {}".format(e)), on_completed=lambda: print("on_completed") ) numbers.on_next(1) time.sleep(0.1) numbers.on_next(2) time.sleep(0.1) numbers.on_next(3) time.sleep(0.1) numbers.on_next(4)
#!/usr/bin/python3 from datetime import timedelta import rx.operators as op from velib_rx.dbus import DBus import pickle # this will record all propertyChanged of the matching service(s) # it will not yet record properties for which no propertyChanged has been fired # see TODO in observe_ve_property service = 'com.victronenergy.meteo.*' t_seconds = 30 events = [] dbus = DBus.new_tcp_connection('192.168.178.137') properties = dbus.observe_ve_property(service_name=service) properties.pipe(op.do_action(print), op.timestamp()).subscribe(events.append) print(f'recording {service} for {t_seconds} seconds') dbus.run_for_timespan(timedelta(seconds=t_seconds)) file_name = 'recorded.dat' with open(file_name, 'ab') as f: pickle.dump(events, f, protocol=0) print(f'done, wrote {file_name}')
def create(): return rx.never().pipe(ops.timestamp())
def create(): return rx.throw(ex).pipe(ops.timestamp())
def create(): return rx.empty().pipe(ops.timestamp())