def test_amb_never2(self): scheduler = TestScheduler() l = rx.never() r = rx.never() def create(): return l.pipe(ops.amb(r)) results = scheduler.start(create) assert results.messages == []
def test_skip_until_never_never(self): scheduler = TestScheduler() l = rx.never() r = rx.never() def create(): return l.pipe(ops.skip_until(r)) results = scheduler.start(create) assert results.messages == []
def test_concat_never_never(self): scheduler = TestScheduler() e1 = rx.never() e2 = rx.never() def create(): return e1.pipe(ops.concat(e2)) results = scheduler.start(create) assert results.messages == []
def test_merge_never2(self): scheduler = TestScheduler() n1 = rx.never() n2 = rx.never() def create(): return rx.merge(n1, n2) results = scheduler.start(create) assert results.messages == []
def test_zip_never_never(self): scheduler = TestScheduler() o1 = rx.never() o2 = rx.never() def create(): return o1.pipe(ops.zip(o2)) results = scheduler.start(create) assert results.messages == []
def test_amb_never3(self): scheduler = TestScheduler() n1 = rx.never() n2 = rx.never() n3 = rx.never() def create(): return rx.amb(n1, n2, n3) results = scheduler.start(create) assert results.messages == []
def test_with_latest_from_never_never(self): scheduler = TestScheduler() e1 = rx.never() e2 = rx.never() def create(): return e1.pipe( ops.with_latest_from(e2), ops.map(sum), ) results = scheduler.start(create) assert results.messages == []
def test_replay_time_multiple_connections(self): xs = rx.never() ys = xs.pipe(ops.replay(window=100)) connection1 = ys.connect() connection2 = ys.connect() assert connection1 == connection2 connection1.dispose() connection2.dispose() connection3 = ys.connect() assert connection1 != connection3
def test_publish_multipleconnections(self): xs = rx.never() ys = xs.pipe(ops.publish()) connection1 = ys.connect() connection2 = ys.connect() assert(connection1 == connection2) connection1.dispose() connection2.dispose() connection3 = ys.connect() assert(connection1 != connection3) connection3.dispose()
def test_concat_never_empty(self): scheduler = TestScheduler() msgs1 = [on_next(150, 1), on_completed(230)] e1 = scheduler.create_hot_observable(msgs1) e2 = rx.never() def create(): return e2.pipe(ops.concat(e1)) results = scheduler.start(create) assert results.messages == []
def test_merge_never_return(self): scheduler = TestScheduler() msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(245)] r1 = scheduler.create_hot_observable(msgs1) n1 = rx.never() def create(): return rx.merge(n1, r1) results = scheduler.start(create) assert results.messages == [on_next(210, 2)]
def test_zip_non_empty_never(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_next(215, 2), on_completed(220)] e1 = scheduler.create_hot_observable(msgs) e2 = rx.never() def create(): return e1.pipe(ops.zip(e2), ops.map(sum)) results = scheduler.start(create) assert results.messages == []
def test_zip_never_empty(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_completed(210)] o1 = rx.never() o2 = scheduler.create_hot_observable(msgs) def create(): return o1.pipe(ops.zip(o2)) results = scheduler.start(create) assert results.messages == [on_completed(210)]
def test_zip_never_empty(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_completed(210)] o1 = rx.never() o2 = scheduler.create_hot_observable(msgs) def create(): return o1.pipe(ops.zip(o2)) results = scheduler.start(create) assert results.messages == []
def test_skip_until_never_empty(self): scheduler = TestScheduler() r_msgs = [on_next(150, 1), on_completed(225)] l = rx.never() r = scheduler.create_hot_observable(r_msgs) def create(): return l.pipe(ops.skip_until(r)) results = scheduler.start(create) assert results.messages == []
def test_on_error_resume_next_end_with_never(self): scheduler = TestScheduler() msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(220)] o1 = scheduler.create_hot_observable(msgs1) o2 = rx.never() def create(): return rx.on_error_resume_next(o1, o2) results = scheduler.start(create) assert results.messages == [on_next(210, 2)]
def test_catch_never(self): scheduler = TestScheduler() msgs2 = [on_next(240, 5), on_completed(250)] o1 = rx.never() o2 = scheduler.create_hot_observable(msgs2) def create(): return o1.pipe(ops.catch(o2)) results = scheduler.start(create) assert results.messages == []
def test_amb_empty_never(self): scheduler = TestScheduler() r_msgs = [on_next(150, 1), on_completed(225)] n = rx.never() e = scheduler.create_hot_observable(r_msgs) def create(): return e.pipe(ops.amb(n)) results = scheduler.start(create) assert results.messages == [on_completed(225)]
def test_catch_error_never(self): ex = 'ex' scheduler = TestScheduler() msgs1 = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_error(230, ex)] o1 = scheduler.create_hot_observable(msgs1) o2 = rx.never() def create(): return o1.pipe(ops.catch(o2)) results = scheduler.start(create) assert results.messages == [on_next(210, 2), on_next(220, 3)]
def test_replay_count_multiple_connections(self): xs = rx.never() ys = xs.pipe(ops.replay(None, 3)) connection1 = ys.connect() connection2 = ys.connect() assert connection1 == connection2 connection1.dispose() connection2.dispose() connection3 = ys.connect() assert connection1 != connection3
def test_merge_never_error(self): ex = 'ex' scheduler = TestScheduler() msgs1 = [on_next(150, 1), on_next(210, 2), on_error(245, ex)] e1 = scheduler.create_hot_observable(msgs1) n1 = rx.never() def create(): return rx.merge(n1, e1) results = scheduler.start(create) assert results.messages == [on_next(210, 2), on_error(245, ex)]
def test_take_until_preempt_never_error(self): ex = 'ex' scheduler = TestScheduler() r_msgs = [on_next(150, 1), on_error(225, ex)] l = rx.never() r = scheduler.create_hot_observable(r_msgs) def create(): return l.pipe(ops.take_until(r)) results = scheduler.start(create) assert results.messages == [on_error(225, ex)]
def test_on_error_resume_next_start_with_never(self): scheduler = TestScheduler() msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(220)] o1 = rx.never() o2 = scheduler.create_hot_observable(msgs1) def create(): return rx.on_error_resume_next(o1, o2) results = scheduler.start(create) assert results.messages == []
def test_skip_until_never_error(self): ex = 'ex' scheduler = TestScheduler() r_msgs = [on_next(150, 1), on_error(225, ex)] l = rx.never() r = scheduler.create_hot_observable(r_msgs) def create(): return l.pipe(ops.skip_until(r)) results = scheduler.start(create) assert results.messages == [on_error(225, ex)]
def test_skip_until_somedata_never(self): scheduler = TestScheduler() l_msgs = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)] l = scheduler.create_hot_observable(l_msgs) r = rx.never() def create(): return l.pipe(ops.skip_until(r)) results = scheduler.start(create) assert results.messages == []
def test_zip_error_never(self): ex = 'ex' scheduler = TestScheduler() msgs2 = [on_next(150, 1), on_error(220, ex)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs2) def create(): return e2.pipe(ops.zip(e1), ops.map(sum)) results = scheduler.start(create) assert results.messages == [on_error(220, ex)]
def create(): nonlocal completed def on_next(x): i[0] += 1 def on_completed(): nonlocal completed completed = True return rx.never().pipe( _.do_action(on_next=on_next, on_completed=on_completed), )
def test_fork_join_never_error(self): ex = RxException() scheduler = TestScheduler() e1 = rx.never() e2 = scheduler.create_hot_observable( [on_next(150, 1), on_next(230, 2), on_error(300, ex)]) results = scheduler.start(lambda: rx.fork_join(e1, e2)) assert results.messages == [on_error(300, ex)]
def test_zip_never_non_empty(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_next(215, 2), on_completed(220)] e1 = scheduler.create_hot_observable(msgs) e2 = rx.never() def create(): return e2.pipe( ops.zip(e1), ops.map(sum)) results = scheduler.start(create) assert results.messages == []
def test_take_until_nopreempt_somedata_never(self): scheduler = TestScheduler() l_msgs = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)] l = scheduler.create_hot_observable(l_msgs) r = rx.never() def create(): return l.pipe(ops.take_until(r)) results = scheduler.start(create) assert results.messages == [on_next(210, 2), on_next( 220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)]
def test_with_latest_from_return_never(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_next(215, 2), on_completed(210)] e1 = scheduler.create_hot_observable(msgs) e2 = rx.never() def create(): return e2.pipe( ops.with_latest_from(e1), ops.map(sum), ) results = scheduler.start(create) assert results.messages == []
def test_combine_latest_never_empty(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_completed(210)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e1.pipe( ops.combine_latest(e2), ops.map(sum), ) results = scheduler.start(create) assert results.messages == []
def test_combine_latest_empty_never(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_completed(210)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e2.pipe( ops.combine_latest(e1), ops.map(sum), ) results = scheduler.start(create) assert results.messages == []
def test_with_latest_from_empty_never(self): scheduler = TestScheduler() msgs = [on_next(150, 1), on_completed(210)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e2.pipe( ops.with_latest_from(e1), ops.map(sum), ) results = scheduler.start(create) assert results.messages == [on_completed(210)]
def test_zip_error_never(self): ex = 'ex' scheduler = TestScheduler() msgs2 = [on_next(150, 1), on_error(220, ex)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs2) def create(): return e2.pipe( ops.zip(e1), ops.map(sum)) results = scheduler.start(create) assert results.messages == [on_error(220, ex)]
def on_mouse_over(self) -> Observable: position = MouseInput.input(self).observe("position") local_pos = position.pipe(ops.map(lambda p: p - self.offset)) return self.on_mouse_move.pipe( ops.map(lambda e: e.position), ops.map(lambda p: rx.concat( rx.of(p), rx.never().pipe( ops.take_until( local_pos.pipe( ops.filter(lambda l: not self.bounds.contains(l))) )))), ops.exclusive(), ops.map(lambda p: MouseOverEvent(self, p)))
def test_with_latest_from_never_on_error(self): ex = 'ex' scheduler = TestScheduler() msgs = [on_next(150, 1), on_error(220, ex)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e1.pipe( ops.with_latest_from(e2), ops.map(sum), ) results = scheduler.start(create) assert results.messages == [on_error(220, ex)]
def test_with_latest_from_throw_never(self): ex = 'ex' scheduler = TestScheduler() msgs = [on_next(150, 1), on_error(220, ex)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e2.pipe( ops.with_latest_from(e1), ops.map(sum), ) results = scheduler.start(create) assert results.messages == [on_error(220, ex)]
def test_combine_latest_never_on_error(self): ex = 'ex' scheduler = TestScheduler() msgs = [on_next(150, 1), on_error(220, ex)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e1.pipe( ops.combine_latest(e2), ops.map(sum), ) results = scheduler.start(create) assert results.messages == [on_error(220, ex)]
def test_combine_latest_throw_never(self): ex = 'ex' scheduler = TestScheduler() msgs = [on_next(150, 1), on_error(220, ex)] e1 = rx.never() e2 = scheduler.create_hot_observable(msgs) def create(): return e2.pipe( ops.combine_latest(e1), ops.map(sum), ) results = scheduler.start(create) assert results.messages == [on_error(220, ex)]
def test_catch_error_never(self): ex = 'ex' scheduler = TestScheduler() msgs1 = [ on_next(150, 1), on_next(210, 2), on_next(220, 3), on_error(230, ex) ] o1 = scheduler.create_hot_observable(msgs1) o2 = rx.never() def create(): return o1.pipe(ops.catch(o2)) results = scheduler.start(create) assert results.messages == [on_next(210, 2), on_next(220, 3)]
def makinage(aio_scheduler, sources): def on_error(e): raise e config, read_request, http_request = read_config_from_args( sources.argv.argv, sources.file.response, sources.http.response, scheduler=aio_scheduler ) first_config = rx.concat(config.pipe(ops.take(1),), rx.never()) kafka_source = sources.kafka.response.pipe( trace_observable("kafka source1"), ops.replay(), ops.ref_count(), trace_observable("kafka source2"), ) kafka_source.subscribe(on_error=on_error) kafka_request = first_config.pipe( ops.flat_map(lambda i: create_operators( i, config, kafka_source, sources.kafka.feedback.pipe(ops.share()), )), ops.subscribe_on(aio_scheduler), trace_observable("makinage"), ) ''' config.pipe(ops.subscribe_on(aio_scheduler)).subscribe( on_next=print, on_error=print, ) ''' return MakiNageSink( file=file.Sink(request=read_request), http=http.Sink(request=http_request), kafka=kafka.Sink(request=kafka_request), )
def initialize_regulators(config, kafka_feedback): regulators = {} for regulator in config: control = kafka_feedback.pipe( trace_observable("regulator feedback"), ops.filter(lambda i: i[0] == regulator['feedback']), ops.map(lambda i: i[1] / 1000), pid(rx.concat(rx.just(1.0), rx.never()), -0.001, -0.001, 0.0), #ops.map(lambda i: 1/i if i != 0 else 1.0), ops.map(lambda i: max(min(i, 0.01), 0.0)), trace_observable("regulator"), ) regulators[regulator['control']] = control return regulators
def _amb(*sources: Observable) -> Observable: """Propagates the observable sequence that reacts first. Example: >>> winner = amb(xs, ys, zs) Returns: An observable sequence that surfaces any of the given sequences, whichever reacted first. """ acc = never() def func(previous, current): return _.amb(previous)(current) for source in sources: acc = func(acc, source) return acc
def _amb(*args: Observable) -> Observable: """Propagates the observable sequence that reacts first. Example: >>> winner = amb(xs, ys, zs) Returns: An observable sequence that surfaces any of the given sequences, whichever reacted first. """ acc = never() if isinstance(args[0], list): items = args[0] else: items = list(args) def func(previous, current): return _.amb(previous)(current) for item in items: acc = func(acc, item) return acc
def create(): return never().pipe(_.scan(lambda acc, x: acc + x))
def create_operators(config, config_source, kafka_source, kafka_feedback): ''' creates the operators declared in config Args: config: a dict containing the configuration file. todo: observable. kafka_source: The kafka response observable kafka_feedback: The kafka backpressure process feedback Returns: An observable containing tuples of (topic, observable). ''' try: source_type = kafka.DataSourceType.STREAM if "source_type" in config['application']: source_type = kafka.DataSourceType.BATCH if config['application']['source_type'] == "batch" else kafka.DataSourceType.STREAM topics, pull_mode = initialize_topics(config['topics']) datafeed_mode = kafka.DataFeedMode.PULL if pull_mode is True else kafka.DataFeedMode.PUSH if 'regulators' in config: regulators = initialize_regulators( config['regulators'], kafka_feedback) else: regulators = {} producers = [] consumers = [] for k, operator in config['operators'].items(): factory = import_function(operator['factory']) sources = [] if 'sources' in operator: for source in operator['sources']: print('create source {}'.format(source)) consumers.append(kafka.ConsumerTopic( topic=source, decode=topics[source].decode, control=regulators[source] if source in regulators else None, start_from=topics[source].start_from, )) sources.append(create_source_observable( kafka_source, topics[source], )) print(sources) sinks = factory(config_source, *sources) print("sinks: {}".format(sinks)) if 'sinks' in operator: for index, sink in enumerate(operator['sinks']): print('create sink {} at {}'.format(sink, index)) producers.append(kafka.ProducerTopic( topic=sink, records=sinks[index], map_key=lambda i: None, encode=topics[sink].encode, map_partition=topics[sink].map_partition )) kafka_sink = [] if len(consumers) > 0: kafka_sink.append(kafka.Consumer( server=config['kafka']['endpoint'], group=config['application']['name'], topics=rx.from_(set(consumers)), source_type=source_type, feed_mode=datafeed_mode, )) if len(producers) > 0: kafka_sink.append(kafka.Producer( server=config['kafka']['endpoint'], topics=rx.from_(producers) )) kafka_sink = rx.from_(kafka_sink) if len(kafka_sink) > 0 else rx.never() return kafka_sink except Exception as e: print("Error while creating operators: {}, {}".format( e, traceback.format_exc()))
def create(): return never().pipe(_.debounce(10))
def create_observable(d): create_invoked[0] += 1 return rx.never()
def create(): return rx.never().pipe(ops.sample(1))
def create(): def func(acc, x): return acc + x return never().pipe(_.scan(seed=seed, accumulator=func))
def duration_mapper(_): return rx.never()
def _timeout_with_mapper( first_timeout: Optional[Observable] = None, timeout_duration_mapper: Optional[Callable[[Any], Observable]] = None, other: Optional[Observable] = None ) -> Callable[[Observable], Observable]: """Returns the source observable sequence, switching to the other observable sequence if a timeout is signaled. res = timeout_with_mapper(rx.timer(500)) res = timeout_with_mapper(rx.timer(500), lambda x: rx.timer(200)) res = timeout_with_mapper(rx.timer(500), lambda x: rx.timer(200)), rx.return_value(42)) Args: first_timeout -- [Optional] Observable sequence that represents the timeout for the first element. If not provided, this defaults to rx.never(). timeout_duration_mapper -- [Optional] Selector to retrieve an observable sequence that represents the timeout between the current element and the next element. other -- [Optional] Sequence to return in case of a timeout. If not provided, this is set to rx.throw(). Returns: The source sequence switching to the other sequence in case of a timeout. """ first_timeout = first_timeout or rx.never() other = other or rx.throw(Exception('Timeout')) def timeout_with_mapper(source: Observable) -> Observable: def subscribe(observer, scheduler=None): subscription = SerialDisposable() timer = SerialDisposable() original = SingleAssignmentDisposable() subscription.disposable = original switched = False _id = [0] def set_timer(timeout: Observable) -> None: my_id = _id[0] def timer_wins(): return _id[0] == my_id d = SingleAssignmentDisposable() timer.disposable = d def on_next(x): if timer_wins(): subscription.disposable = other.subscribe( observer, scheduler=scheduler) d.dispose() def on_error(e): if timer_wins(): observer.on_error(e) def on_completed(): if timer_wins(): subscription.disposable = other.subscribe(observer) d.disposable = timeout.subscribe_(on_next, on_error, on_completed, scheduler) set_timer(first_timeout) def observer_wins(): res = not switched if res: _id[0] += 1 return res def on_next(x): if observer_wins(): observer.on_next(x) timeout = None try: timeout = timeout_duration_mapper(x) except Exception as e: observer.on_error(e) return set_timer(timeout) def on_error(error): if observer_wins(): observer.on_error(error) def on_completed(): if observer_wins(): observer.on_completed() original.disposable = source.subscribe_(on_next, on_error, on_completed, scheduler) return CompositeDisposable(subscription, timer) return Observable(subscribe) return timeout_with_mapper
def create(): return rx.never().pipe(ops.time_interval())