def __init__(self):
     self.initRequest = Subject()
     self.addSegmentConfigRequest = Subject()
     self.removeSegmentConfigRequest = Subject()
     self.updateSegmentConfigRequest = Subject()
     self.generateManipulatorRequest = Subject()
     self.updateTensionsRequest = Subject()
     self.computeStateRequest = Subject()
     
     segment_config_repo_op = ops.merge(
         self.initRequest.pipe(
             ops.map(Repo().publish_init_segments_config)
         ),
         self.addSegmentConfigRequest.pipe(
             ops.map(Repo().add_segment)
         ),
         self.removeSegmentConfigRequest.pipe(
             ops.map(Repo().remove_segment)
         ), 
         self.updateSegmentConfigRequest.pipe(
             ops.map(Repo().update_segment_config)
         )
     )
     self._segment_configs_stream = Observable().pipe(
         segment_config_repo_op,
     )
     
     # TODO
     self._segment_configs_err_stream = Observable().pipe(
         segment_config_repo_op,
         ops.flat_map(lambda x: Observable.just(Repo().get_error()))
     )
     
     self._tension_inputs_stream = Observable().pipe(
         ops.merge(
             self.generateManipulatorRequest.pipe(
                 ops.map(Repo().generate_manipulator)
             ),
             self.updateTensionsRequest.pipe(
                 ops.map(Repo().updateTensions)
             ),
         )
     )
     
     compute_state_result = self.computeStateRequest.pipe(
         ops.map(Repo().computeTensions)
     )
     
     self._text_result_stream = compute_state_result.pipe(
         ops.map(format_manipulator)
     )
     
     self._graph_stream = compute_state_result.pipe(
     )
Exemplo n.º 2
0
    def __init__(self,
                 concurrency_per_group,
                 delay_seconds=0,
                 description=None):
        self._queue = Subject()
        self._description = description
        self.request_scheduler = ThreadPoolScheduler(concurrency_per_group)
        producer_scheduler = ThreadPoolScheduler(concurrency_per_group)

        def on_next(result):
            output = result['output']
            output.on_next({
                'value': result.get('value'),
                'completed': result.get('completed')
            })

        self._subscription = self._queue.pipe(
            observe_on(producer_scheduler), group_by(lambda r: r['group']),
            flat_map(lambda concurrency_group: concurrency_group.pipe(
                map(lambda r: r['work']),
                delay(delay_seconds),
                merge(max_concurrent=concurrency_per_group),
                merge_all(),
            )), take_until_disposed()).subscribe(
                on_next=on_next,
                on_error=lambda error: logging.exception(
                    'Error in {} request stream'.format(self)),
                scheduler=producer_scheduler)
Exemplo n.º 3
0
    def mergeOperator(self):
        source = of(1, 2, 3, 4, 5)
        source2 = of(11, 12, 13, 14, 15, 16)

        result = source.pipe(op.merge(source2))

        result.subscribe(
            lambda item: print('"Subscribe Merge" output: {}'.format(item)))
Exemplo n.º 4
0
def merge_(
    stream1: Union[rx.typing.Observable[T1], rx.typing.Subject[T1, T1], ],
    stream2: Union[rx.typing.Observable[T2], rx.typing.Subject[T2, T2], ],
) -> rx.typing.Observable[Union[T1, T2]]:
    return cast(rx.Observable, stream1)\
        .pipe(
            ops.merge(cast(rx.Observable, stream2))
        )
Exemplo n.º 5
0
def test_spot_instance_checker_through_404(requests_mock):
    requests_mock.get(INSTANCE_ACTION_URL, text="test", status_code=404)

    o = spot_instance_check_observable(0.1).pipe(ops.merge(rx.timer(0.5)),
                                                 ops.first())

    def on_next(x):
        assert x == 0

    o.subscribe(on_next=on_next, scheduler=CurrentThreadScheduler())
Exemplo n.º 6
0
def test_spot_instance_checker_terminate(requests_mock):
    body = {"action": "terminate", "time": "2017-09-18T08:22:00Z"}
    requests_mock.get(INSTANCE_ACTION_URL, json=body)

    o = spot_instance_check_observable(0.1).pipe(ops.merge(rx.timer(0.5)),
                                                 ops.first())

    def on_next(x):
        assert isinstance(x, CheckerMessage)
        assert x.checker_type == "spot_instance"
        assert x.body == f'"spot/instance-action": {body}'

    o.subscribe(on_next=on_next, scheduler=CurrentThreadScheduler())
Exemplo n.º 7
0
def test_spot_instance_checker_observer(requests_mock):
    with mock.patch("igata.checkers.aws.observers.logger.info") as mock_method:
        requests_mock.get(INSTANCE_ACTION_URL,
                          json={
                              "action": "terminate",
                              "time": "2017-09-18T08:22:00Z"
                          })

        o = spot_instance_check_observable(0.1).pipe(
            ops.merge(rx.timer(0.3)), ops.take(2),
            ops.filter(lambda x: isinstance(x, CheckerMessage)))

        o.subscribe(CheckersObserver(), scheduler=CurrentThreadScheduler())
        mock_method.assert_called_once()
Exemplo n.º 8
0
    def complexMerge(self):
        # This is infinite
        source = interval(1).pipe(
            op.map(lambda item: item * 10),
            op.map(lambda second: self.intense_calculation(second)))

        source2 = of('Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon').pipe(
            op.map(lambda second: self.intense_calculation(second)))

        result = source.pipe(op.merge(source2))

        result.subscribe(on_next=lambda item: print(
            '"Subscribe Complex Merge" output: {}'.format(item)),
                         on_error=lambda err: print('Error: {}'.format(e)))

        input('Press any key to exit\n')
Exemplo n.º 9
0
    def timestampOperator(self):
        source = interval(1).pipe(
            op.map(lambda second: self.intense_calculation(second)),
            op.timestamp()
        )
        
        source2 = of('Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon').pipe(
            op.map(lambda second: self.intense_calculation(second)),
            op.timestamp()
        )

        result = source.pipe(
            op.merge(source2)
        )

        result.subscribe(lambda item: print('"Subscribe Timestamp" output: {}'.format(item)))
Exemplo n.º 10
0
def getCategoryAndDependencies(selectedJobId):
    return rx.of(selectedJobId).pipe(
        ops.flat_map(
            lambda selectedJobId: getCategory(selectedJobId)
        ),
        ops.flat_map(
            lambda category: getTestruns(category)
        ),
        ops.flat_map(
            lambda testruns: rx.from_(testruns)
        ),
        ops.map(
            lambda testrun: getTestrunDependencies(testrun)
        ),
        ops.merge(max_concurrent=1),
    )
Exemplo n.º 11
0
 def __init__(self, concurrency_per_group, description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self._requests = Subject()
     self._output = ReplaySubject()
     self._description = description
     self._subscription = self._requests.pipe(
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group)))).subscribe(
                 on_next=lambda request: self._output.on_next(request),
                 on_error=lambda error: logging.exception(
                     'Error in {} request stream'.format(self)),
                 on_completed=lambda: logging.error(
                     '{} request stream unexpectedly completed'.format(self
                                                                       )),
                 scheduler=self.scheduler)
Exemplo n.º 12
0
    def play_step(self, step, cancel):
        interval = rx.interval(0.1)
        interval_steps = rx.just(step).pipe(
            ops.flat_map(lambda step: interval.pipe(ops.map(lambda _: step))))

        step_done = interval_steps.pipe(
            ops.filter(lambda step: self.player.position() >= step.step_end),
            ops.do_action(
                lambda step: self.player.set_position(step.loop_start)),
            ops.take(1))

        loop_done = interval_steps.pipe(
            ops.filter(lambda step: self.player.position() >= step.loop_end),
            ops.do_action(
                lambda step: self.player.set_position(step.loop_start)),
            ops.take_until(cancel.pipe(ops.skip(1))))

        return step_done.pipe(ops.merge(loop_done))
Exemplo n.º 13
0
    def __init__(self):
        self.__rawPluginOutputStream = Subject()
        self.__connectionStatusStream = Subject()

        self.__speechStream = with_latest_from(
            self.__rawPluginOutputStream.pipe(
                map(lambda dehydratedMsgDict: rehydrateMessage(
                    dehydratedMsgDict))),
            self.__connectionStatusStream,
        ).pipe(
            map(lambda combinedTuple: {
                **combinedTuple[0],
                **combinedTuple[1]
            }),
            merge(self.__connectionStatusStream),
        )

        # TODO - need to trigger NVDA to startup, if it isn't already
        #      - first need to check if NVDA is installed + if the plugin is

        asyncio.create_task(self.__startListeningForOutput())
Exemplo n.º 14
0
 def __init__(self,
              concurrency_per_group,
              delay_seconds=0,
              description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self.request_scheduler = ThreadPoolScheduler(10)
     self._requests = Subject()
     self._output_subject = Subject()
     self._output = self._output_subject.pipe(share())
     self._description = description
     self._subscription = self._requests.pipe(
         observe_on(self.scheduler),
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group),
             delay(delay_seconds))),
         take_until_disposed()).subscribe(
             on_next=lambda request: self._output_subject.on_next(request),
             on_error=lambda error: logging.exception(
                 'Error in {} request stream'.format(self)),
             on_completed=lambda: self.dispose(),
             scheduler=self.scheduler)
Exemplo n.º 15
0
import rx
from rx import operators as op
from rx.subject import Subject
import datetime

print('1-100求和')
rx.range(1, 101).pipe(
    op.reduce(lambda acc, i: acc + i, 0)
).subscribe(lambda i: print(i))

# 操作数据流
print('求所有偶数')
some_data = rx.of(1, 2, 3, 4, 5, 6, 7, 8)
some_data2 = rx.from_iterable(range(10, 20))
some_data.pipe(
    op.merge(some_data2),
    op.filter(lambda i: i % 2 == 0),
    # op.map(lambda i: i * 2)
).subscribe(lambda i: print(i))

# debounce操作符,仅在时间间隔之外的可以发射
print('防止重复发送')
ob = Subject()
ob.pipe(
    op.throttle_first(3)
    # op.debounce(3)
).subscribe(
    on_next=lambda i: print(i),
    on_completed=lambda: print('Completed')
)
Exemplo n.º 16
0
    ctxmgr_settings = collect_ctxmgr_settings(args.input_ctx_manager,
                                              args.output_ctx_manager)
    for setting in ctxmgr_settings:
        logger.info(f"{setting}: {ctxmgr_settings[setting]}")

    scheduler = ThreadPoolScheduler(max_workers=1)
    checkers_observable = rx.empty()

    if settings.INSTANCE_ON_AWS:
        logger.info(f"instance_type: {get_instance_type()}"
                    )  # Assumes being run on AWS EC2 instance
        if args.is_spot_instance or settings.AWS_ENABLE_SPOTINSTANCE_STATE_LOGGING:
            logger.info(f"Start spot_instance_observable monitoring...")
            spot_instance_observable = spot_instance_check_observable()
            checkers_observable = checkers_observable.pipe(
                ops.merge(spot_instance_observable))
    elif args.is_spot_instance or settings.AWS_ENABLE_SPOTINSTANCE_STATE_LOGGING:
        logger.warning(
            f'"--spot-instance" flag or AWS_ENABLE_SPOTINSTANCE_STATE_LOGGING envar given, '
            f"but INSTANCE_ON_AWS == False, logging NOT performed!")

    checkers_observable.pipe(ops.publish()).connect(scheduler=scheduler)
    checkers_observable.subscribe(CheckersObserver())

    input_values = None
    if args.inputs:
        input_values = [v.strip() for v in args.inputs.split(",")]

    summary = execute_prediction(
        predictor=external_predictor,
        input_ctx_manager=args.input_ctx_manager,
Exemplo n.º 17
0
 def create():
     return xs.pipe(ops.merge(ys))
Exemplo n.º 18
0
 def create():
     return xs.pipe(ops.merge(max_concurrent=2))
Exemplo n.º 19
0

class MyObserver(Observer):
    def on_next(self, x):
        print("Got: %s" % x)

    def on_error(self, e):
        print("Got error: %s" % e)

    def on_completed(self):
        print("Sequence completed")


print("------ observer ------")
range(5).subscribe(MyObserver())

print("------ of ------")
of(7, 8).pipe(op.map(lambda x: x * 3)).subscribe(print)
of([7, 8]).pipe(op.map(lambda x: x * 3)).subscribe(print)

print("------ map ------")
range(10).pipe(op.map(lambda x: x * 2)).subscribe(print)

print("------ map ------")
range(10, 20, 2).pipe(op.map(lambda x: "%d" % (x * 2))).subscribe(print)

print("------ merge ------")
s1 = range(1, 5)
s2 = of("abcde", "def")
s1.pipe(op.merge(s2)).subscribe(print)
Exemplo n.º 20
0
import rx
from rx import operators as ops

items = ["134/34/235/132/77", "64/22/98/112/86/11", "66/08/34/778/22/12"]

# RxPy v3.0 에서 부터 concat_all() 이 없어지고 merge 연산을 사용하고 max_concurrent 아규먼트를 1로 설정하도록 변경되었다.

rx.from_(items).pipe(
    ops.map(lambda s: rx.from_(s.split("/"))),
    ops.merge(max_concurrent=1),
    ops.map(lambda s: int(s))
).subscribe(
    lambda i: print(i)
)




op.catch()
op.retry()

"""Utility"""
op.delay()
op.materialize()
op.time_interval()
op.timeout()
op.timestamp()

"""Conditional and Boolean"""
op.all()
op.contains()
op.default_if_empty()
op.sequence_equal()
op.skip_until()
op.skip_while()
op.take_until()
op.take_while()

"""Connectable"""
op.publish()
op.ref_count()
op.replay()

"""Combining"""
op.combine_latest()
op.merge()
op.start_with()
op.zip()
Exemplo n.º 22
0
def throw_when(errors: Observable) -> Callable[[Observable], Observable]:
    return rx.pipe(merge(errors.pipe(flat_map(lambda e: throw(e)))))
Exemplo n.º 23
0
 def create():
     return xs.pipe(ops.merge(ys))
Exemplo n.º 24
0
 def create():
     return xs.pipe(ops.merge(max_concurrent=2))