示例#1
0
 def test_without_bp(self):
     self.source.pipe(
         operators.do_action(lambda x: print(f"Producing {x}")),
         operators.map(self.slow_op),
         operators.do_action(self.stop),
         operators.take_until(self._stop),
     ).run()
示例#2
0
 def test_with_buffer(self):
     self.source.pipe(
         operators.do_action(lambda x: print(f"Producing {x}")),
         bp_operator(BackPressure.BUFFER),
         operators.map(self.slow_op),
         operators.do_action(self.stop),
         operators.take_until(self._stop),
     ).run()
示例#3
0
 def test_with_observe_on(self):
     self.source.pipe(
         operators.do_action(lambda x: print(f"Producing {x}")),
         operators.observe_on(NewThreadScheduler()),
         operators.map(self.slow_op),
         operators.do_action(self.stop),
         operators.take_until(self._stop),
     ).run()
示例#4
0
 def test_attach_size(self):
     self.source.pipe(
         operators.do_action(lambda x: print(f"Producing {x}")),
         bp_drop_operator_attach_size(3),
         operators.do_action(print),
         operators.map(self.slow_op),
         operators.do_action(self.stop),
         operators.take_until(self._stop),
     ).run()
示例#5
0
 def run_metaepoch(self) -> Observable:
     if self.alive:
         epoch_job = StepsRun(self.metaepoch_len)
         return epoch_job.create_job(self.driver).pipe(
             ops.map(lambda message: self.fill_node_info(message)),
             ops.do_action(
                 lambda message: self.update_current_cost(message)),
             ops.do_action(
                 on_completed=lambda: self._after_metaepoch()),
         )
     return rx.empty()
示例#6
0
 def download(destination_file):
     do_action(lambda _: os.makedirs(destination_file, exist_ok=True))
     return of(create_downloader(destination_file)).pipe(
         flat_map(lambda downloader: forever().pipe(
             map(lambda _: next_chunk(downloader)),
             take_while(lambda progress: progress < 1, inclusive=True),
             map(
                 lambda progress: {
                     'progress': progress,
                     'downloaded_bytes': int(int(file['size']) * progress),
                     'file': file
                 }))))
示例#7
0
def model_publisher(scheduler, sources):
    file_source = sources.file.response.pipe(ops.share())

    # kafka driver bootstrap. fixme
    kafka_source = sources.kafka.response.pipe(
        ops.do_action(print),
        ops.replay(),
        ops.ref_count(),
    )
    kafka_source.subscribe()

    config, config_read_request, http_request = read_config_from_args(
        sources.argv.argv,
        file_source,
        sources.http.response,
        scheduler=scheduler)

    config = config.pipe(ops.first())

    kafka_request = config.pipe(ops.map(lambda c: create_model_topics(c)), )

    return ModelPublisherSink(
        file=file.Sink(request=rx.merge(config_read_request)),
        http=http.Sink(request=http_request),
        kafka=kafka.Sink(request=kafka_request),
    )
示例#8
0
def run_parallel(args):
    worker_factory, simulation_cases = factory.resolve_configuration(args)

    logger.debug("Shuffling the job queue")
    random.shuffle(simulation_cases)

    logger.debug("Creating the pool")

    processes_no = int(args["-j"])
    rxtools.configure_default_executor(processes_no)

    wall_time = []
    start_time = datetime.now()
    results = []
    logger.debug("Simulation cases: %s", simulation_cases)
    logger.debug("Work will be divided into %d processes", processes_no)

    sys = ActorSystem("multiprocTCPBase", logDefs=log_helper.EVOGIL_LOG_CONFIG)

    with log_time(system_time, logger, "Pool evaluated in {time_res}s", out=wall_time):

        def process_result(subres):
            results.append(subres)
            log_simulation_stats(start_time, subres[-1], len(simulation_cases))

        rx.from_iterable(range(len(simulation_cases))).pipe(
            ops.map(lambda i: worker_factory(simulation_cases[i], i)),
            # ops.map(lambda w: rxtools.from_process(w.run)),
            ops.map(lambda w : w.run()),
            # ops.merge(max_concurrent=1)
            ops.do_action(on_next=process_result)
        ).run()
    log_summary(args, results, simulation_cases, wall_time)
    rxtools.shutdown_default_executor()
    sys.shutdown()
        def create():
            def action(x):
                i[0] += 1
                sum[0] -= x
                return sum[0]

            return xs.pipe(_.do_action(action))
示例#10
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    disposable = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse faces on frame
        ops.filter(
            lambda img_faces_pair: any([
                face.top_prediction.confidence >= ARGS.min_confidence and face.
                top_prediction.confidence <= ARGS.max_confidence
                for face in img_faces_pair.faces
            ])
        ),  # proceed only if min_confidence <= person_confidence <= max_confidence
        ops.do_action(on_next=save_frame)).subscribe(
            on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Data collector shutdown")
        disposable.dispose()
示例#11
0
    def run_driver(self, driver: Driver, problem_mod: ModuleType,
                   logger: logging.Logger):
        serializer = Serializer(self.simulation)
        results = []

        def process_results(budget: int):
            finalpop = driver.finalized_population()
            finalpop_fit = [[fit(x) for fit in problem_mod.fitnesses]
                            for x in finalpop]
            serializer.store(Result(finalpop, finalpop_fit, cost=driver.cost),
                             str(budget))
            results.append((driver.cost, finalpop))

        driver.max_budget = self.budgets[-1]
        for budget in self.budgets:
            budget_run = BudgetRun(budget)
            budget_run.create_job(driver).pipe(
                ops.do_action(on_completed=lambda: process_results(budget))
            ).subscribe(lambda proxy: logger.debug(
                "{}{} : Driver progress: budget={}, current cost={}, driver step={}"
                .format(
                    self.simulation.algorithm_name,
                    self.simulation.problem_name,
                    budget,
                    proxy.cost,
                    proxy.step_no,
                )))
        return results
示例#12
0
    def run_driver(self, driver: Driver, problem_mod: ModuleType,
                   logger: logging.Logger):
        results = []
        timeout = self.simulation.params[factory.TIMEOUT_PARAM]
        sampling_interval = self.simulation.params[
            factory.SAMPLING_INTERVAL_PARAM]

        serializer = Serializer(self.simulation)

        slots_filled = set()

        time_run = TimeRun(sampling_interval, timeout)

        def process_results(msg: TimeProgressMessage):
            finalpop = driver.finalized_population()
            print(f"final pop result: {finalpop}")
            finalpop_fit = [[fit(x) for fit in problem_mod.fitnesses]
                            for x in finalpop]

            time_slot = msg.elapsed_time

            serializer.store(Result(finalpop, finalpop_fit, cost=driver.cost),
                             str(time_slot))
            results.append((driver.cost, finalpop))
            slots_filled.add(time_slot)

        time_run.create_job(driver).pipe(
            ops.do_action(on_next=process_results)).run()

        return results
示例#13
0
def test_multiplex():
    source = [1, 2, 3, 4]
    actual_error = []
    actual_completed = []
    actual_result = []
    mux_actual_result = []

    def on_completed():
        actual_completed.append(True)

    rx.from_(source).pipe(
        rs.ops.multiplex(rx.pipe(ops.do_action(
            mux_actual_result.append), ), ), ).subscribe(
                on_next=actual_result.append,
                on_completed=on_completed,
                on_error=actual_error.append,
            )

    assert actual_error == []
    assert actual_completed == [True]
    assert actual_result == source
    assert mux_actual_result == [
        rs.OnCreateMux((0, )),
        rs.OnNextMux((0, ), 1),
        rs.OnNextMux((0, ), 2),
        rs.OnNextMux((0, ), 3),
        rs.OnNextMux((0, ), 4),
        rs.OnCompletedMux((0, )),
    ]
示例#14
0
def get_products(shop: Dict[str, str], search_term: str,
                 options) -> Observable:
    domain = re.findall("\.(.+)\.com", shop['url'])[0]
    print(f"Lauching {domain}")

    browser = launch_browser(f"{shop['url']}{search_term}", options, shop)

    base_obs = rx.of(browser).pipe(
        ops.do_action(
            lambda el: print(f"Getting products prices from {domain}")),
        ops.flat_map(lambda browser: rx.from_(
            browser.find_elements_by_xpath(shop["xpath"]["parent"]))),
        ops.filter(lambda el: el.is_displayed()),
        ops.map(lambda el: (
            get_property(el, shop["xpath"]["product_name"]),
            get_property(el, shop["xpath"]["price"]),
        )),
        ops.filter(lambda el: el[0] and el[1]),
        ops.map(lambda el: {
            "name": el[0],
            "price": el[1]
        }),
        ops.map(lambda product: transform_price(product, shop["priceRegexp"])),
        ops.finally_action(lambda: browser.close()),
    )

    return base_obs
示例#15
0
    def timeout_check(process: subprocess.Popen, timeout: float,
                      ct: CancellationToken) -> rx.Observable:
        """Kills the given process after timeout has passed.

        Args:
            process: process to be killed
            timeout: termination time in seconds
            ct: CancellationToken

        Return:
            rx.Observable that fires only a single dictionary after the timeout
            has passed.
        """
        return rx.timer(timeout).pipe(
            ops.do_action(
                # Request cancellation so all simulation processes that
                # share the same cancellation_token are also stopped.
                # TODO not working as intended if simulation is short enough
                #   to stop before max_time has elapsed. Maybe let caller
                #   implement its own timeout check when multiple processes
                #   are being run.
                on_next=lambda _: ct.request_cancellation()),
            ops.map(
                lambda _: {
                    # sutils.kill_process returns None, which can be casted to bool
                    MCERD.IS_RUNNING:
                    bool(sutils.kill_process(process)),
                    MCERD.MSG:
                    MCERD.SIM_TIMEOUT
                }),
            ops.first())
示例#16
0
    def add(self, file_name: str, content: Union[bytes, Path], override: bool = False,
            config: DataRefRequestConfig = None) -> Observable['DataRef']:
        r"""Uploads a file with a given name and content to the storage.

        :param file_name: Name of the file to be uploaded.
        :param content: Content of the file to be uploaded in bytes or a Path pointing to a file location.
        :param override: (optional) Flag to specify if existing files should be overwritten. If set to false, the method
        will throw an exception when trying to overwrite an existing file.
        :param config: (optional) Interface which currently only contains onUploadProgress callback
        :return: :class:`Observable[DataRef]` object: Reference to the newly uploaded file
        :exception HttpError: Error thrown if the upload fails due to override being set to false.
        """
        file_uri = DataRef.concat_uri(self.uri, file_name)

        if isinstance(content, bytes):
            upload_content = DataUploadContentBytes(content)
        else:
            upload_content = DataUploadContentPath(content)

        number_of_chunks = ceil(upload_content.total_size_in_bytes / CHUNK_SIZE)
        upload = DataUpload(file_uri, upload_content, number_of_chunks, override, config)

        def execute_upload() -> DataRef:
            self.logger.debug(f"Started upload to uri {file_uri}")
            new_file_data_ref = upload.execute(self.client, self.scheduler)
            self.logger.debug(f"Finished upload to uri {file_uri}")
            return new_file_data_ref

        return rx.from_callable(execute_upload).pipe(ops.do_action(on_error=lambda: upload_content.close(),
                                                                   on_completed=lambda: upload_content.close()))
示例#17
0
        def create():
            def on_next(x):
                i[0] += 1
                sum[0] -= x

            def on_completed():
                completed[0] = True
            return xs.pipe(_.do_action(on_next=on_next, on_completed=on_completed))
示例#18
0
def test_split():
    source = ["1a", "2a", "3b", "4b", "5c", "6c", "7c", "8d", "9d"]
    source = [
        rs.OnCreateMux((1, None)),
        rs.OnNextMux((1, None), '1a'),
        rs.OnNextMux((1, None), '2a'),
        rs.OnNextMux((1, None), '3b'),
        rs.OnNextMux((1, None), '4b'),
        rs.OnNextMux((1, None), '5c'),
        rs.OnNextMux((1, None), '6c'),
        rs.OnNextMux((1, None), '7c'),
        rs.OnNextMux((1, None), '8d'),
        rs.OnNextMux((1, None), '9d'),
        rs.OnCompletedMux((1, None)),
    ]
    actual_result = []
    mux_actual_result = []
    expected_result = [
        ["1a", "2a"],
        ["3b", "4b"],
        ["5c", "6c", "7c"],
        ["8d", "9d"],
    ]

    def on_next(i):
        actual_result.append(i)

    store = rs.state.StoreManager(store_factory=rs.state.MemoryStore)
    rx.from_(source).pipe(
        rs.cast_as_mux_observable(),
        rs.state.with_store(
            store,
            rs.data.split(lambda i: i[-1],
                          rx.pipe(ops.do_action(mux_actual_result.append), )),
        ),
    ).subscribe(on_next)

    assert type(mux_actual_result[0]) is rs.state.ProbeStateTopology
    assert mux_actual_result[1:] == [
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), '1a', store),
        rs.OnNextMux((1, (1, None)), '2a', store),
        rs.OnCompletedMux((1, (1, None)), store),
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), '3b', store),
        rs.OnNextMux((1, (1, None)), '4b', store),
        rs.OnCompletedMux((1, (1, None)), store),
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), '5c', store),
        rs.OnNextMux((1, (1, None)), '6c', store),
        rs.OnNextMux((1, (1, None)), '7c', store),
        rs.OnCompletedMux((1, (1, None)), store),
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), '8d', store),
        rs.OnNextMux((1, (1, None)), '9d', store),
        rs.OnCompletedMux((1, (1, None)), store),
    ]
    assert actual_result == source
示例#19
0
    def _conditional_printer(
            cond: bool, msg: str) -> Callable[[rx.Observable], rx.Observable]:
        if cond:
            return observing.get_printer(msg)

        def passer(*_):
            pass

        return ops.do_action(passer)
示例#20
0
    def run_metaepoch(self):
        if self.node.alive:
            self.log("ALIVE SO RUN EPOCH")
            self.metaepoch_cost = self.node.current_cost
            epoch_job = StepsRun(self.node.metaepoch_len)

            self.log(
                f"starting metaepoch, len={self.node.metaepoch_len}, pop_len={len(self.node.driver.population)} "
            )

            return epoch_job.create_job(self.node.driver).pipe(
                ops.map(lambda message: self.fill_node_info(message)),
                ops.do_action(
                    lambda message: self.update_current_cost(message)),
                ops.do_action(on_completed=lambda: self._after_metaepoch()),
            )
        self.log("DEAD SO EMPTY MSG")
        return rx.empty()
示例#21
0
    def play_step(self, step, cancel):
        interval = rx.interval(0.1)
        interval_steps = rx.just(step).pipe(
            ops.flat_map(lambda step: interval.pipe(ops.map(lambda _: step))))

        step_done = interval_steps.pipe(
            ops.filter(lambda step: self.player.position() >= step.step_end),
            ops.do_action(
                lambda step: self.player.set_position(step.loop_start)),
            ops.take(1))

        loop_done = interval_steps.pipe(
            ops.filter(lambda step: self.player.position() >= step.loop_end),
            ops.do_action(
                lambda step: self.player.set_position(step.loop_start)),
            ops.take_until(cancel.pipe(ops.skip(1))))

        return step_done.pipe(ops.merge(loop_done))
        def create():
            def on_next(x):
                i[0] += 1
                sum[0] -= x

            def on_completed():
                completed[0] = True

            return xs.pipe(
                _.do_action(on_next=on_next, on_completed=on_completed))
示例#23
0
def get_printer(completed_msg=""):
    """Returns an rx.operator that prints the observed item to console.

    Args:
        completed_msg: message shown when the observable completes.
    """
    return ops.do_action(
        on_next=print,
        on_error=lambda x: print("Error:", x, file=sys.stderr),
        on_completed=lambda: print("Completed:", completed_msg))
示例#24
0
def test_closing_mapper_exclude():
    source = [
        rs.OnCreateMux((1 ,None)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=1)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=2)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=3)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=4)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=5)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=6)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=10)),
        rs.OnNextMux((1, None), datetime(2020, 1, 2, second=12)),
        rs.OnCompletedMux((1, None)),
    ]
    actual_result = []
    mux_actual_result = []

    def on_next(i):
        actual_result.append(i)

    store = rs.state.StoreManager(store_factory=rs.state.MemoryStore)
    rx.from_(source).pipe(
        rs.cast_as_mux_observable(),
        rs.state.with_store(
            store,
            rs.data.time_split(
                time_mapper=lambda i: i,
                active_timeout=timedelta(seconds=5),
                inactive_timeout=timedelta(seconds=3),
                closing_mapper=lambda i: i == datetime(2020, 1, 2, second=4),
                include_closing_item=False,
                pipeline=rx.pipe(
                    ops.do_action(mux_actual_result.append),
            )),
        ),
    ).subscribe(on_next)

    assert type(mux_actual_result[0]) is rs.state.ProbeStateTopology
    assert mux_actual_result[1:] == [
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=1), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=2), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=3), store),        
        rs.OnCompletedMux((1, (1, None)), store),
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=4), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=5), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=6), store),
        rs.OnCompletedMux((1, (1, None)), store),
        rs.OnCreateMux((1, (1, None)), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=10), store),
        rs.OnNextMux((1, (1, None)), datetime(2020, 1, 2, second=12), store),
        rs.OnCompletedMux((1, (1, None)), store),
    ]
    assert actual_result == source
        def create():
            nonlocal completed

            def on_next(x):
                i[0] += 1

            def on_completed():
                nonlocal completed
                completed = True

            return rx.never().pipe(
                _.do_action(on_next=on_next, on_completed=on_completed), )
示例#26
0
        def create():
            nonlocal completed

            def on_next(x):
                i[0] += 1

            def on_completed():
                nonlocal completed
                completed = True
            return rx.never().pipe(
                _.do_action(on_next=on_next, on_completed=on_completed),
                )
示例#27
0
 def requestQuotes(self, tickers):
     logger.debug(f'QuoteObserver.requestQuotes({tickers})')
     self.quoteSubscription = create(
         lambda o, s: beginStreamingQuotes(tickers, o, s)).pipe(
             op.subscribe_on(config.pool_scheduler),
             op.observe_on(config.pool_scheduler),
             op.do_action(lambda q: logger.debug(f'QO: {q}')),
             op.group_by(lambda q: q['symbol']),
         ).subscribe(on_next=self.handleQuote,
                     on_error=lambda e: logger.debug(e),
                     on_completed=lambda: logger.debug(
                         'QuoteObserver subscription completed'))
示例#28
0
    async def init(self):
        self.app['REQUEST_COUNT'] = Counter('request_total', 'Total Incoming Request', ('path', 'method'), unit='requests')
        self.app['REQUEST_LATENCY'] = Summary('request_latency', 'Request Process Time', ('path', 'method'), unit='seconds')
        self.app['REQUEST_PROGRESS'] = Gauge('request_progress', 'Request in Progress', ('path', 'method'), unit='requests')

        # Establish connection to RabbitMQ
        self.rmqConn = await connect_robust(login='******', password='******')

        # Establish channel for order request and declare order queue
        self.channel['/order'] = await self.rmqConn.channel()
        await self.channel['/order'].declare_queue('order', durable=True)

        # Establish channel for account request and declare account queue
        self.channel['/account'] = await self.rmqConn.channel()
        await self.channel['/account'].declare_queue('account', durable=True)

        # Create disposable request observer for handling order request. Only request to /order will be passed to OrderHandler
        dispose = self.request.pipe(
            ops.filter(lambda i : i.path == '/order'),
            ops.do_action(self.logRequest),
            ops.filter(self.orderHandler.orderVerificator)
        ).subscribe(self.orderHandler, scheduler=AsyncIOScheduler)
        self.subscriptions.append(dispose)

        # Create disposable request observer for handling account request. Only request to /account will be passed to OrderHandler
        dispose = self.request.pipe(
            ops.filter(lambda i : i.path == '/account'),
            ops.do_action(self.logRequest),
            ops.filter(self.accountHandler.accountVerificator)
        ).subscribe(self.accountHandler, scheduler=AsyncIOScheduler)
        self.subscriptions.append(dispose)

        self.app.router.add_post('/order', self.dispatcher, name='order')
        self.app.router.add_get('/orders/{account}', self.orderQuery)
        self.app.router.add_post('/account', self.dispatcher, name='account')
        self.app.router.add_get('/accounts/{account}', self.accountQuery)
        self.app.router.add_get('/metrics', self.metrics)
        self.app.on_shutdown.append(self.on_shutdown)

        return self.app
示例#29
0
    def enqueue(self,
                observable: Observable,
                group: str = 'default-group',
                retries: int = 0,
                description: str = None) -> Observable:
        def log_status(status):
            logging.debug(
                str({
                    'WorkQueue': str(self),
                    'group': group,
                    status: description
                }))

        log_status('ENQUEUED')
        output = Subject()
        errors = Subject()
        output_finalized = Subject()

        def handle_error(e, _):
            log_status('FAILED')
            errors.on_next(e)
            return empty()

        def set_output_finalized():
            output_finalized.on_next(True)

        work = of(True).pipe(
            do_action(lambda _: log_status('STARTED')),
            take_until(output_finalized),
            flat_map(lambda _: observable.pipe(
                map(lambda value: of({
                    'value': value,
                    'output': output
                })),
                retry_with_backoff(
                    retries=retries,
                    description='{}.enqueue(group={}, description={})'.format(
                        self, group, description)),
                catch(handler=handle_error), take_until(output_finalized),
                take_until_disposed())),
            concat(of(of({
                'completed': True,
                'output': output
            }))), finally_action(lambda: log_status('COMPLETED')))

        self._queue.on_next({'work': work, 'group': group})

        return output.pipe(observe_on(self.request_scheduler),
                           throw_when(errors),
                           take_while(lambda r: not r.get('completed')),
                           map(lambda r: r.get('value')),
                           finally_action(set_output_finalized))
示例#30
0
 def __init__(self, obs_stream, symbol):
     logger.debug(f'Stock.__init__({symbol})')
     self.obs_stream = obs_stream
     self.symbol = symbol
     self.price = ()
     self.stockSubscription = self.obs_stream.pipe(
         op.subscribe_on(config.pool_scheduler),
         op.observe_on(config.pool_scheduler),
         op.do_action(lambda s: logger.debug(f'STK: {s}')),
     ).subscribe(
         on_next=self.handleQuote,
         on_error=lambda e: logger.debug(e),
         on_completed=lambda: logger.debug('Stock subscription completed'))
示例#31
0
 def stream_metaepoch_results(self, msg: NodeMessage, sender: Actor):
     self.run_metaepoch().pipe(
         ops.do_action(on_completed=lambda: self.node.send(
             sender,
             NodeMessage(NodeOperation.METAEPOCH_END, msg.id, self.
                         last_result_data),
         ))
     ).subscribe(
         lambda result: self.update_cost(result)
         # lambda result: self.node.send(
         #     sender, NodeMessage(NodeOperation.NEW_RESULT, msg.id, result)
         # )
     )
示例#32
0
def of_init_feature(identifier: str) -> Mapper[Observable, Observable]:
    """ Operator to test for the initialization action of a feature

        Args:
            identifier: the identifier of the feature

        Returns:
            Operator function that accepts init actions for the feature, once

    """
    return pipe(op.filter(is_type(INIT_ACTION)),
                op.filter(has_payload(identifier)), op.take(1),
                op.map(lambda x: identifier), op.do_action(logger.debug))
示例#33
0
    def test_amb_regular_should_dispose_loser(self):
        scheduler = TestScheduler()
        msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(240)]
        msgs2 = [on_next(150, 1), on_next(220, 3), on_completed(250)]
        source_not_disposed = [False]
        o1 = scheduler.create_hot_observable(msgs1)

        def action():
            source_not_disposed[0] = True

        o2 = scheduler.create_hot_observable(msgs2).pipe(
                ops.do_action(on_next=action),
                )

        def create():
            return o1.pipe(ops.amb(o2))
        results = scheduler.start(create)

        assert results.messages == [on_next(210, 2), on_completed(240)]
        assert(not source_not_disposed[0])
示例#34
0
    def test_merge_error_causes_disposal(self):
        ex = 'ex'
        scheduler = TestScheduler()
        msgs1 = [on_next(150, 1), on_error(210, ex)]
        msgs2 = [on_next(150, 1), on_next(220, 1), on_completed(250)]
        source_not_disposed = [False]
        o1 = scheduler.create_hot_observable(msgs1)

        def action():
            source_not_disposed[0] = True

        o2 = scheduler.create_hot_observable(msgs2).pipe(ops.do_action(on_next=action))

        def create():
            return rx.merge(o1, o2)

        results = scheduler.start(create)

        assert results.messages == [on_error(210, ex)]
        assert(not source_not_disposed[0])
示例#35
0
    def test_amb_loser_throws(self):
        ex = 'ex'
        scheduler = TestScheduler()
        msgs1 = [on_next(150, 1), on_next(220, 2), on_error(230, ex)]
        msgs2 = [on_next(150, 1), on_next(210, 3), on_completed(250)]
        source_not_disposed = [False]

        def action():
            source_not_disposed[0] = True
        o1 = scheduler.create_hot_observable(msgs1).pipe(
                ops.do_action(on_next=action),
                )

        o2 = scheduler.create_hot_observable(msgs2)

        def create():
            return o1.pipe(ops.amb(o2))

        results = scheduler.start(create)
        assert results.messages == [on_next(210, 3), on_completed(250)]
        assert(not source_not_disposed[0])
示例#36
0
 def create():
     def on_completed():
         completed[0] = True
     return xs.pipe(_.do_action(on_completed=on_completed))
示例#37
0
import datetime

import rx
import rx.operators as ops

"""
Delay the emission of elements to the specified datetime.
"""

now = datetime.datetime.utcnow()
dt = datetime.timedelta(seconds=3.0)
duetime = now + dt

print('{} ->  now\n'
      '{} ->  start of emission in {}s'.format(now, duetime, dt.total_seconds()))

hot = rx.hot('10--11--12--13--(14,|)', timespan=0.2, duetime=duetime)

source = hot.pipe(ops.do_action(print))
source.run()
import rx
from rx import operators as ops

"""
Specify the error to be raised in place of the # symbol.
"""

err = ValueError("I don't like 5!")

src0 = rx.from_marbles('12-----4-----67--|', timespan=0.2)
src1 = rx.from_marbles('----3----5-#      ', timespan=0.2, error=err)

source = rx.merge(src0, src1).pipe(ops.do_action(print))
source.run()
import rx
from rx import operators as ops

a = rx.cold(' ---a0---a1----------------a2-|    ')
b = rx.cold('    ---b1---b2---|                 ')
c = rx.cold('             ---c1---c2---|        ')
d = rx.cold('                   -----d1---d2---|')
e1 = rx.cold('a--b--------c-----d-------|       ')

observableLookup = {"a": a, "b": b, "c": c, "d": d}

source = e1.pipe(
    ops.flat_map(lambda value: observableLookup[value]),
    ops.do_action(lambda v: print(v)),
    )

source.run()
示例#40
0
 def create():
     def action(x):
         i[0] += 1
         sum[0] -= x
         return sum[0]
     return xs.pipe(_.do_action(action))
示例#41
0
 def create():
     def action(x):
         i[0] += 1
         return i[0]
     return xs.pipe(_.do_action(action))