Beispiel #1
0
def get_authorization_stream(context: Context) -> Observable:
    return return_value(context).pipe(
        op.flat_map(call_async(validate_redirect_uri)),
        op.flat_map(call_async(validate_response_type)),
        op.flat_map(call_async(validate_scope)),
        op.flat_map(select_flow),
    )
def rmux_client(sources):
    response = sources.tcp_client.response.pipe(ops.share())
    tcp_connect = rx.just(tcp_client.Connect(
        host='127.0.0.1', port='8080'
    ))

    create_observable = response.pipe(
        ops.flat_map(lambda connection: 
            rx.just({'what': 'subscribe', 'id':42, 'name': '1234'}).pipe(
                ops.map(lambda i: json.dumps(i)),
                frame,
                ops.map(lambda j: tcp_client.Write(id=connection.id, data=j.encode()))
        ))
    )

    console = response.pipe(
        ops.flat_map(lambda connection: connection.observable.pipe(
            ops.map(lambda i: i.data.decode('utf-8')),
            unframe,
            ops.map(lambda i: json.loads(i)),
            ops.group_by(lambda i: i['id']),
            ops.flat_map(lambda subscription: subscription.pipe(
                ops.map(notification),
                ops.dematerialize(),
            ))
        )),
        ops.map(lambda i: "item: {}\n".format(i))
    )

    tcp_sink = rx.merge(tcp_connect, create_observable)

    return Sink(
        tcp_client=tcp_client.Sink(request=tcp_sink),
        stdout=stdout.Sink(data=console),
    )
Beispiel #3
0
    def test_walk_file_and_dir(self):

        expected_files = [
            os.path.join('dfoo', 'foo'),
            os.path.join('dbar', 'bar'),
            os.path.join('dbiz', 'biz'),
        ]
        expected_dirs = ['dfoo', 'dbar', 'dbiz']
        actual_files = []
        create_file_tree(self.wordkir, expected_dirs, expected_files)
        source = walk.make_driver().call(
            walk.Sink(request=rx.just(
                walk.Walk(top=self.wordkir, id='test', recursive=True))))

        class TestObserver(Observer):
            def on_next(self, i):
                actual_files.append(i)

            def on_completed(self):
                return

            def on_error(self, e):
                raise Exception(e)

        source.response.pipe(
            ops.filter(lambda i: type(i) is walk.WalkResponse),
            ops.flat_map(lambda i: i.content.directories),
            ops.flat_map(lambda i: i.files),
        ).subscribe(TestObserver())

        for f in expected_files:
            self.assertIn(os.path.join(self.wordkir, f), actual_files)
Beispiel #4
0
def download_directory(file: dict,
                       destination: str,
                       matching: str = None,
                       delete_after_download: bool = False) -> Observable:
    destination = os.path.abspath(destination)

    def get_destination(f):
        relative_path = f['path'][len(file['path']):]
        next_destination = '{}{}{}'.format(
            destination, '' if destination[-1] == '/' else '/', relative_path)
        return next_destination

    def initial_stats(files):
        return {
            'progress': 0,
            'total_files': len(files),
            'total_bytes': sum([int(f.get('size', 0)) for f in files]),
            'downloaded_files': 0,
            'downloaded_bytes': 0
        }

    def update_stats(stats, download):
        downloaded_files = stats['downloaded_files'] + (
            0 if download['progress'] < 1 else 1)
        downloaded_bytes = stats['downloaded_bytes'] + download[
            'downloaded_bytes']
        progress = downloaded_bytes / stats['total_bytes']
        return {
            'progress': progress,
            'total_files': stats['total_files'],
            'total_bytes': stats['total_bytes'],
            'downloaded_files': downloaded_files,
            'downloaded_bytes': downloaded_bytes
        }

    def is_file_matching(f):
        return not matching or fnmatch.fnmatch(f['path'], matching)

    def delete_downloaded(downloaded):
        if delete_after_download:
            return delete_file(downloaded['file']).pipe(
                map(lambda _: downloaded))
        else:
            return of(downloaded)

    def filter_files(files):
        return [f for f in files if not is_folder(f) and is_file_matching(f)]

    if is_folder(file):
        return list_folder_recursively(file).pipe(
            map(lambda files: filter_files(files)),
            flat_map(lambda files: of(True).pipe(
                flat_map(lambda _: of(*files).pipe(
                    flat_map(lambda f: download_file(f, get_destination(f))),
                    flat_map(delete_downloaded))),
                scan(update_stats, initial_stats(files)))))
    else:
        return download_file(file, destination)
Beispiel #5
0
def audio_encoder(sources):
    # Parse configuration
    parser = create_arg_parser()

    read_request, read_response = sources.argv.argv.pipe(
        ops.skip(1),
        argparse.parse(parser),
        ops.filter(lambda i: i.key == 'config'),
        ops.map(lambda i: file.Read(id='config', path=i.value)),
        file.read(sources.file.response),
    )
    config = read_response.pipe(
        ops.filter(lambda i: i.id == "config"),
        ops.flat_map(lambda i: i.data),
        parse_config,
    )

    # Transcode request handling
    encode_init = config.pipe(
        ops.map(
            lambda i: encoder.Initialize(storage_path=i.encode.storage_path)))

    encode_request = sources.httpd.route.pipe(
        ops.filter(lambda i: i.id == 'flac_transcode'),
        ops.flat_map(lambda i: i.request),
        ops.map(lambda i: encoder.EncodeMp3(
            id=i.context, data=i.data, key=i.match_info['key'])),
    )
    encoder_request = rx.merge(encode_init, encode_request)

    # http server
    http_init = config.pipe(
        ops.flat_map(lambda i: rx.from_([
            httpd.Initialize(request_max_size=0),
            httpd.AddRoute(
                methods=['POST'],
                path='/api/transcode/v1/flac/{key:[a-zA-Z0-9-\._]*}',
                id='flac_transcode',
            ),
            httpd.StartServer(host=i.server.http.host, port=i.server.http.port
                              ),
        ])))

    http_response = sources.encoder.response.pipe(
        ops.map(lambda i: httpd.Response(
            data='ok'.encode('utf-8'),
            context=i.id,
        )))
    http = rx.merge(http_init, http_response)

    # merge sink requests
    file_requests = read_request

    return Sink(
        encoder=encoder.Sink(request=encoder_request),
        file=file.Sink(request=file_requests),
        httpd=httpd.Sink(control=http),
    )
Beispiel #6
0
def select_flow(context: Context) -> Observable:
    response_type = next(iter(context.oauth2_request.response_type))
    if response_type == 'code':
        return return_value(context).pipe(
            op.flat_map(call_async(authorization_code_grant)))
    if response_type == 'token':
        return return_value(context).pipe(
            op.flat_map(call_async(implicit_grant)))
    raise InvalidRequest('Invalid response_type parameter')
Beispiel #7
0
 def __init__(self, protocol):
     # See comment in players.py.
     self._aio_scheduler = QtScheduler(QtCore)
     messages = protocol.register("game_info")
     self.new = messages.pipe(
         ops.flat_map(self._split_game_info),
         ops.observe_on(self._aio_scheduler),
         ops.flat_map(self._process_game),
         ops.observe_on(ImmediateScheduler()),
         ops.filter(lambda x: x is not None),
     )
Beispiel #8
0
    def _export_geometries():
        def aggregate_progress(progresses, count):
            p = _sum_dicts(progresses.values(), excluded_keys=['geometry'])
            exported = round(100 * p['exported'] / count)
            downloaded = round(100 * p['downloaded'] / count)
            downloaded_bytes = format_bytes(p['downloaded_bytes'])
            processed = round(100 * p['processed'] / count)
            return progress(
                default_message='Exported {}%, Downloaded {}% ({}), Processed {}%'.format(
                    exported, downloaded, downloaded_bytes, processed
                ),
                message_key='tasks.retrieve.time_series_to_sepal.progress',
                exported=exported,
                downloaded=downloaded,
                downloaded_bytes=downloaded_bytes,
                processed=processed
            )

        features_collection = _to_features_collection(region)

        def export_geometry(geometry, i, geometry_count):
            geometry_description = str(i + 1).zfill(len(str(geometry_count)))
            return defer(
                lambda _: _export_geometry(
                    geometry,
                    geometry_description=geometry_description
                )
            )

        return concat(
            progress(
                default_message='Tiling AOI...',
                message_key='tasks.retrieve.time_series_to_sepal.tiling'
            ),
            _extract_feature_indexes(features_collection).pipe(
                flat_map(
                    lambda feature_indexes: _to_geometries(features_collection, feature_indexes).pipe(
                        flat_map(
                            lambda geometries: concat(
                                *[
                                    export_geometry(geometry, i, len(feature_indexes))
                                    for i, geometry in enumerate(geometries)
                                ]
                            )
                        ),
                        scan(lambda acc, p: {**acc, p['geometry']: p}, {}),
                        flat_map(lambda progresses: aggregate_progress(
                            progresses,
                            count=len(feature_indexes) * len(year_ranges)
                        ))
                    )
                )
            )
        )
Beispiel #9
0
def getTestrunDependencies(testrun):
    return rx.of(testrun).pipe(
        ops.flat_map(
            lambda testrun: getTestrunProperties(testrun)
        ),
        ops.flat_map(
            lambda testrun: getTestcaseOwners(testrun)
        ),
        ops.flat_map(
            lambda testrun: getTestrunMatrix(testrun)
        ),
    )
Beispiel #10
0
    def monitor():
        def is_running(state):
            return state in [Task.State.UNSUBMITTED, Task.State.READY, Task.State.RUNNING]

        return interval(_MONITORING_FREQUENCY).pipe(
            flat_map(lambda _: execute(
                action=load_status,
                description='monitor task ' + str(task))
            ),
            flat_map(extract_state),
            distinct_until_changed(),
            take_while(is_running, inclusive=True)
        )
Beispiel #11
0
def get_authorization_stream(request: OIDCRequest) -> Observable:
    response_params: Dict[str, Any] = {}

    # yapf: disable
    return just(request).pipe(
        op.flat_map(call_async(validate_redirect_uri)),
        op.flat_map(call_async(validate_response_type)),
        op.flat_map(call_async(validate_scope)),
        op.flat_map(select_flows),
        op.merge_all(),
        op.do_action(lambda x: response_params.update(asdict(x))),
        op.last(),
        op.map(lambda x: AuthorizationResponse(**response_params)))
Beispiel #12
0
def main():
    origin = rx.subjects.Subject()

    origin.pipe(
        ops.flat_map(rx.from_),
        ops.map(lambda x: bytes([x])),
        ops.filter(RemoveComments().next_character),
        ops.flat_map(Tokenize().next_character),
        ops.filter(bool),
        ops.flat_map(Parse().next_token),
    ).subscribe_(on_next=pprint.pprint, on_error=print)

    read_into("parse.lisp", origin)
Beispiel #13
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    semaphore = Subject()

    semaphore_stream = semaphore.pipe(
        ops.flat_map(lambda _: rx.of(True).pipe(
            ops.delay(ARGS.block_time, scheduler=scheduler),
            ops.start_with(False))), ops.start_with(True))

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    gated_video_stream = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.combine_latest(semaphore_stream),
        ops.filter(lambda tup: tup[1]),  # proceed only if semaphore allows
        ops.map(lambda tup: tup[0])  # take only frame
    )

    disposable = gated_video_stream.pipe(
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.map(lambda img: img.resize(
            (640, 360))),  # resize image (inference will be faster)
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse frame for faces
        ops.filter(lambda img_faces_pair: any([
            face.top_prediction.confidence > ARGS.threshold
            for face in img_faces_pair.faces
        ])),  # proceed only if there is a known face in the frame
        ops.throttle_first(1),
        ops.flat_map(unlock_request),  # unlock the door
        ops.do_action(
            on_next=lambda _: semaphore.on_next(True)
        )  # trigger semaphore which will block stream for "block-seconds" seconds (doors are unlocked for that long after unlock request)
    ).subscribe(on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Smart lock face recognition engine shutdown")
        disposable.dispose()
        def create():
            def mapper(x_yy):
                x, yy = x_yy
                return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))

            return xs.pipe(
                ops.group_join(
                    ys,
                    lambda x: rx.timer(x.interval),
                    lambda y: rx.timer(y.interval).pipe(
                        ops.flat_map(rx.throw(ex) if y.value == "tin" else rx.empty())),
                    ),
                ops.flat_map(mapper),
                )
Beispiel #15
0
 def _set_band_names():
     band_names_stream = of(band_names) if band_names else get_band_names(
         credentials, image)
     return concat(
         progress(
             default_message='Setting band names...',
             message_key='tasks.retrieve.image_to_sepal.setting_band_names'
         ),
         band_names_stream.pipe(
             flat_map(lambda names: set_band_names(
                 band_names=names,
                 files=
                 [destination_path + '/*.tif', destination_path + '/*.vrt'])
                      ), flat_map(lambda _: empty())))
    def configure_timed_read(self):
        interval = self.config.getfloat("fp50", "interval")

        if interval > 0:
            logger.info("Configuring timed read")
            # enabled
            rx.interval(interval, scheduler=NewThreadScheduler()).pipe(
                operators.flat_map(lambda x: self.control.get_power()),
                operators.map(lambda x: self.upload_power(x)),
                operators.delay(self.config.getfloat("fp50", "query_delay")),
                operators.flat_map(
                    lambda x: self.control.get_internal_temperature()),
                operators.map(lambda x: self.upload_internal_temperature(x)),
                operators.catch(error_handler)).subscribe()
Beispiel #17
0
def list_folder_recursively(folder: dict) -> Observable:
    def recurse(file):
        if is_folder(file):
            return concat(
                of([file]),
                list_folder_recursively(file),
            )
        else:
            return of([file])

    return list_folder(folder).pipe(
        flat_map(lambda files: from_list(files)),
        flat_map(recurse),
        reduce(lambda acc, files: acc + files, []),
    )
Beispiel #18
0
def get_products(shop: Dict[str, str], search_term: str,
                 options) -> Observable:
    domain = re.findall("\.(.+)\.com", shop['url'])[0]
    print(f"Lauching {domain}")

    browser = launch_browser(f"{shop['url']}{search_term}", options, shop)

    base_obs = rx.of(browser).pipe(
        ops.do_action(
            lambda el: print(f"Getting products prices from {domain}")),
        ops.flat_map(lambda browser: rx.from_(
            browser.find_elements_by_xpath(shop["xpath"]["parent"]))),
        ops.filter(lambda el: el.is_displayed()),
        ops.map(lambda el: (
            get_property(el, shop["xpath"]["product_name"]),
            get_property(el, shop["xpath"]["price"]),
        )),
        ops.filter(lambda el: el[0] and el[1]),
        ops.map(lambda el: {
            "name": el[0],
            "price": el[1]
        }),
        ops.map(lambda product: transform_price(product, shop["priceRegexp"])),
        ops.finally_action(lambda: browser.close()),
    )

    return base_obs
Beispiel #19
0
    def __init__(self,
                 concurrency_per_group,
                 delay_seconds=0,
                 description=None):
        self._queue = Subject()
        self._description = description
        self.request_scheduler = ThreadPoolScheduler(concurrency_per_group)
        producer_scheduler = ThreadPoolScheduler(concurrency_per_group)

        def on_next(result):
            output = result['output']
            output.on_next({
                'value': result.get('value'),
                'completed': result.get('completed')
            })

        self._subscription = self._queue.pipe(
            observe_on(producer_scheduler), group_by(lambda r: r['group']),
            flat_map(lambda concurrency_group: concurrency_group.pipe(
                map(lambda r: r['work']),
                delay(delay_seconds),
                merge(max_concurrent=concurrency_per_group),
                merge_all(),
            )), take_until_disposed()).subscribe(
                on_next=on_next,
                on_error=lambda error: logging.exception(
                    'Error in {} request stream'.format(self)),
                scheduler=producer_scheduler)
Beispiel #20
0
 def start(self):
     logging.debug("in MyObservable.start")
     rx.interval(1.0).pipe(
         ops.subscribe_on(self.scheduler),
         ops.flat_map(lambda i: self.fetch_subjects())).subscribe(
             on_next=lambda v: self.subject.on_next(v),
             on_error=lambda e: print_error("Error in interval loop", e))
Beispiel #21
0
    def __init__(
        self, influxdb_client,
        write_options: WriteOptions = WriteOptions()) -> None:
        self._influxdb_client = influxdb_client
        self._write_service = WriteService(influxdb_client.api_client)
        self._write_options = write_options
        if self._write_options.write_type is WriteType.batching:
            # Define Subject that listen incoming data and produces writes into InfluxDB
            self._subject = Subject()

            # Define a scheduler that is used for processing incoming data - default singleton
            observable = self._subject.pipe(
                ops.observe_on(self._write_options.write_scheduler))
            self._disposable = observable \
                .pipe(  # Split incoming data to windows by batch_size or flush_interval
                    ops.window_with_time_or_count(count=write_options.batch_size,
                                                  timespan=timedelta(milliseconds=write_options.flush_interval)),
                    # Map incoming batch window in groups defined by 'organization', 'bucket' and 'precision'
                    ops.flat_map(lambda v: _window_to_group(v)),
                    # Write data into InfluxDB (possibility to retry if its fail)
                    ops.map(mapper=lambda batch: self._retryable(data=batch, delay=self._jitter_delay())),  #
                    ops.merge_all()) \
                .subscribe(self._on_next, self._on_error, self._on_complete)
        else:
            self._subject = None
            self._disposable = None
Beispiel #22
0
def demo_flatmap2():
    '''tuple unpacking'''
    a = rx.of(1, 2, 3, 4)

    a.pipe(ops.flat_map(lambda x: range(0, x))
           #ops.flat_map(range)
           ).subscribe(print)
Beispiel #23
0
def word_counter(file_name):
    # count words using `group_by()`
    # tuple the word with the count
    return words_from_file(file_name).pipe(
        ops.group_by(lambda word: word),
        ops.flat_map(lambda grp: grp.pipe(ops.count(),
                                          ops.map(lambda ct: (grp.key, ct)))))
Beispiel #24
0
def run():
    parser = argparse.ArgumentParser(
        prog='entityfactssheetsharvester',
        description=
        'Retrieves EntityFacts sheets from a given CSV with GND identifiers and returns them as line-delimited JSON records.',
        epilog=
        'example: entityfactssheetsharvester < [INPUT CSV FILE WITH GND IDENTIFIERS] > [PATH TO THE OUTPUT LINE-DELIMITED JSON RECORDS FILE]',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    args = parser.parse_args()

    if hasattr(args, 'help') and args.help:
        parser.print_usage(sys.stderr)
        exit(-1)

    source = create(push_input)

    all_in_one = source.pipe(
        op.map(lambda line: get_gnd_identifier(line)),
        op.map(lambda gnd_identifier: retrieve_entityfacts_sheet_obs(
            gnd_identifier)),
        op.map(lambda ef_sheet_tuple_obs: format_entityfacts_sheet_obs(
            ef_sheet_tuple_obs)),
        op.map(lambda flat_ef_sheet_json_tuple_obs:
               write_entityfacts_sheet_obs(flat_ef_sheet_json_tuple_obs)),
        op.flat_map(lambda x: x))

    all_in_one.subscribe(on_next=lambda gnd_identifier: eprint(
        "PROCESSED GND identifier '{0}': {1}".format(gnd_identifier,
                                                     current_thread().name)),
                         on_error=lambda e: eprint(e),
                         on_completed=lambda: eprint("PROCESS done!"),
                         scheduler=THREAD_POOL_SCHEDULER)
        def create():
            def mapper(x_yy):
                x, yy = x_yy
                return yy.pipe(
                    ops.map(lambda y: '{}{}'.format(x.value, y.value)))

            return xs.pipe(
                ops.group_join(
                    ys,
                    lambda x: rx.timer(x.interval),
                    lambda y: rx.timer(y.interval).pipe(
                        ops.flat_map(
                            rx.throw(ex) if y.value == "tin" else rx.empty())),
                ),
                ops.flat_map(mapper),
            )
Beispiel #26
0
def cartesian_product():
    """
    this method take stream of list (--[a,b]--[1,2]-->)
    convert each list to a stream (--a--b--> --1--2-->)
    convert output to a stream contain one list of streams (--[--a--b--> , --1--2-->]-->)
    convert output to a cartesian product stream (--[a,1]--[a,2]--[b,1]--[b,2]-->)
    """
    def cartesian(sources):
        """
        this method take list of streams (--[--a--b--> , --1--2-->]-->)
        convert output to a cartesian product stream (--[a,1]--[a,2]--[b,1]--[b,2]-->)
        """
        if len(sources) == 0:
            return empty()

        result = sources[0].pipe(ops.map(lambda s: [s]))

        def two_streams_product(stream2, stream1):
            product = stream1.pipe(
                ops.flat_map(
                    lambda s1: stream2.pipe(ops.map(lambda s2: s1 + [s2]))))
            return product

        for i in range(1, len(sources)):
            result = two_streams_product(sources[i], result)

        return result

    return pipe(ops.map(lambda _list: from_list(_list)), ops.to_list(),
                ops.flat_map(lambda i: cartesian(i)))
Beispiel #27
0
def getCategoryAndDependencies(selectedJobId):
    return rx.of(selectedJobId).pipe(
        ops.flat_map(
            lambda selectedJobId: getCategory(selectedJobId)
        ),
        ops.flat_map(
            lambda category: getTestruns(category)
        ),
        ops.flat_map(
            lambda testruns: rx.from_(testruns)
        ),
        ops.map(
            lambda testrun: getTestrunDependencies(testrun)
        ),
        ops.merge(max_concurrent=1),
    )
 def factory():
     def projection(x):
         invoked[0] += 1
         if invoked[0] == 3:
             raise Exception(ex)
         return x
     return xs.pipe(ops.flat_map(projection))
Beispiel #29
0
 def logs(self, number_of_lines: int) -> Observable[str]:
     return rx.from_callable(lambda: self._client.post_json({'uri': self._uri, 'numberOfLines': number_of_lines},
                                                            url_postfix="/skill/logs"), self._scheduler) \
         .pipe(
         ops.map(lambda r: json.loads(r.content)),
         ops.flat_map(lambda r: rx.from_iterable(r['logLines']))
     )
Beispiel #30
0
def demo_flatmap3():
    '''tuple unpacking'''
    a = rx.of([1, 2], [3, 4])

    a.pipe(ops.flat_map(lambda x: x)
           #ops.flat_map(range)
           ).subscribe(print)
Beispiel #31
0
def execute_task(task):
    def start():
        task.start()
        return task.status()['id']

    def load_status():
        return task.status()

    def extract_state(status):
        state = status['state']
        if state == 'FAILED':
            return throw(ee.EEException(status.get('error_message')))
        else:
            return of(state)

    def monitor():
        def is_running(state):
            return state in [Task.State.UNSUBMITTED, Task.State.READY, Task.State.RUNNING]

        return interval(_MONITORING_FREQUENCY).pipe(
            flat_map(lambda _: execute(
                action=load_status,
                description='monitor task ' + str(task))
            ),
            flat_map(extract_state),
            distinct_until_changed(),
            take_while(is_running, inclusive=True)
        )

    return execute(start, description='start task ' + str(task)).pipe(
        flat_map(lambda _: monitor())
    )
Beispiel #32
0
 def create():
     pfh = self.test_obs.pipe(pairwise_buffer, ops.map(lambda x: x[0]))
     w = pfh.pipe(
         ops.window(
             self.test_obs.pipe(pairwise_buffer,
                                ops.filter(lambda x: x[0] != x[1]))))
     return w.pipe(ops.flat_map(lambda x: x.pipe(ops.to_list())))
def _buffer_with_time(timespan: typing.RelativeTime, timeshift: typing.RelativeTime = None,
                      scheduler: typing.Scheduler = None) -> Callable[[Observable], Observable]:
    if not timeshift:
        timeshift = timespan

    return pipe(
        ops.window_with_time(timespan, timeshift, scheduler),
        ops.flat_map(lambda x: x.pipe(ops.to_iterable()))
    )
        def create():
            def mapper(x_yy):
                x, yy = x_yy
                return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))

            return xs.pipe(
                ops.group_join(
                    ys,
                    lambda x: new_timer(xsd, x.interval, scheduler),
                    lambda y: new_timer(ysd, y.interval, scheduler),
                    ),
                ops.flat_map(mapper),
                )
Beispiel #35
0
        def create():
            def mapper(xy):
                x, y = xy
                return "{}{}".format(x.value, y.value)

            return xs.pipe(
                ops.join(
                    ys,
                    lambda x: rx.timer(x.interval),
                    lambda y: rx.timer(y.interval).pipe(ops.flat_map(rx.throw(ex) if y.value == "tin" else rx.empty())),
                    ),
                ops.map(mapper),
                )
        def create():
            def mapper(x_yy):
                x, yy = x_yy
                return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))

            return xs.pipe(
                ops.group_join(
                    ys,
                    lambda x: rx.timer(x.interval).pipe(ops.filter(lambda _: False)),
                    lambda y: rx.timer(y.interval).pipe(ops.filter(lambda _: False)),
                    ),
                ops.flat_map(mapper),
                )
Beispiel #37
0
    def buffer_with_count(source: Observable) -> Observable:
        nonlocal skip

        if skip is None:
            skip = count

        def mapper(value):
            return value.pipe(ops.to_iterable(), ops.map(list))

        def predicate(value):
            return len(value) > 0

        return source.pipe(ops.window_with_count(count, skip), ops.flat_map(mapper), ops.filter(predicate))
    def test_flat_map_then_error_error(self):
        ex = 'ex'
        scheduler = TestScheduler()
        xs = scheduler.create_cold_observable(
                on_next(100, 4), on_next(200, 2), on_next(300, 3),
                on_next(400, 1), on_error(500, ex))
        ys = scheduler.create_cold_observable(
                on_next(50, "foo"), on_next(100, "bar"), on_next(150, "baz"),
                on_next(200, "qux"), on_error(250, ex))
        results = scheduler.start(lambda: xs.pipe(ops.flat_map(ys)))

        assert results.messages == [
                on_next(350, "foo"), on_next(400, "bar"), on_next(450, "baz"),
                on_next(450, "foo"), on_next(500, "qux"), on_next(500, "bar"),
                on_error(550, ex)]
        assert xs.subscriptions == [subscribe(200, 550)]
        assert ys.subscriptions == [
            subscribe(300, 550), subscribe(400, 550), subscribe(500, 550)]
        def create():
            def right_duration_mapper(y):
                if len(y.value) >= 0:
                    raise Exception(ex)
                else:
                    return rx.empty()

            def mapper(x_yy):
                x, yy = x_yy
                return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))

            return xs.pipe(
                ops.group_join(
                    ys,
                    lambda x: rx.timer(x.interval),
                    right_duration_mapper,
                    ),
                ops.flat_map(mapper),
                )
Beispiel #40
0
    def open(self):
        print("WebSocket opened")

        # A Subject is both an observable and observer, so we can both subscribe
        # to it and also feed (on_next) it with new values
        self.subject = Subject()

        # Now we take on our magic glasses and project the stream of bytes into
        # a ...
        query = self.subject.pipe(
            # 1. stream of keycodes
            ops.map(lambda obj: obj["keycode"]),
            # 2. stream of windows (10 ints long)
            ops.window_with_count(10, 1),
            # 3. stream of booleans, True or False
            ops.flat_map(lambda win: win.pipe(ops.sequence_equal(codes))),
            # 4. stream of Trues
            ops.filter(lambda equal: equal)
        )
        # 4. we then subscribe to the Trues, and signal Konami! if we see any
        query.subscribe(lambda x: self.write_message("Konami!"))
Beispiel #41
0
def _buffer(buffer_openings=None, buffer_closing_mapper=None) -> Callable[[Observable], Observable]:
    """Projects each element of an observable sequence into zero or more
    buffers.

    Args:
        buffer_openings -- Observable sequence whose elements denote the
            creation of windows.
        buffer_closing_mapper -- [optional] A function invoked to define
            the closing of each produced window. If a closing mapper
            function is specified for the first parameter, this parameter is
            ignored.

    Returns:
        A function that takes an observable source and retuerns an
        observable sequence of windows.
    """

    return pipe(
        ops.window(buffer_openings, buffer_closing_mapper),
        ops.flat_map(pipe(ops.to_iterable(), ops.map(list)))
    )
    def test_flat_map_then_complete_never(self):
        scheduler = TestScheduler()
        xs = scheduler.create_cold_observable(
                on_next(100, 4), on_next(200, 2), on_next(300, 3),
                on_next(400, 1), on_completed(500))
        ys = scheduler.create_cold_observable(
                on_next(50, "foo"), on_next(100, "bar"), on_next(150, "baz"),
                on_next(200, "qux"))
        results = scheduler.start(lambda: xs.pipe(ops.flat_map(ys)))

        assert results.messages == [
                on_next(350, "foo"), on_next(400, "bar"), on_next(450, "baz"),
                on_next(450, "foo"), on_next(500, "qux"), on_next(500, "bar"),
                on_next(550, "baz"), on_next(550, "foo"), on_next(600, "qux"),
                on_next(600, "bar"), on_next(650, "baz"), on_next(650, "foo"),
                on_next(700, "qux"), on_next(700, "bar"), on_next(750, "baz"),
                on_next(800, "qux")]
        assert xs.subscriptions == [subscribe(200, 700)]
        assert ys.subscriptions == [
                subscribe(300, 1000), subscribe(400, 1000),
                subscribe(500, 1000), subscribe(600, 1000)]
 def factory():
     def projection(x):
         return rx.interval(10).pipe(ops.map_indexed(lambda a, b: x), ops.take(x))
     return xs.pipe(ops.flat_map(projection))
def _buffer_with_time_or_count(timespan, count, scheduler = None) -> Callable[[Observable], Observable]:
    return pipe(
        ops.window_with_time_or_count(timespan, count, scheduler),
        ops.flat_map(lambda x: x.pipe(ops.to_iterable()))
    )
Beispiel #45
0
import concurrent.futures
import time

import rx
from rx import operators as ops

seconds = [5, 1, 2, 4, 3]


def sleep(tm):
    time.sleep(tm)
    return tm


def output(result):
    print('%d seconds' % result)

with concurrent.futures.ProcessPoolExecutor(5) as executor:
    rx.from_(seconds).pipe(
        ops.flat_map(lambda s: executor.submit(sleep, s))
    ).subscribe(output)

# 1 seconds
# 2 seconds
# 3 seconds
# 4 seconds
# 5 seconds
 def factory():
     return xs.pipe(ops.flat_map(lambda x: x))
 def create():
     return xs.pipe(ops.flat_map(lambda x: x))
 def factory():
     return xs.pipe(ops.flat_map(ys))
 def create():
     def mapper(x):
         ys = [x] * x
         inners.append(ys)
         return ys
     return xs.pipe(ops.flat_map(mapper))
"""
Tests MergeMap from rxjs
https://github.com/ReactiveX/rxjs/blob/master/spec/operators/mergeMap-spec.ts

it should flat_map many regular interval inners
"""
with marbles_testing(timespan=1.0) as context:
    start, cold, hot, exp = context

    a = cold(' ----a---a----a----(a,|)                     ')
    b = cold('     ----1----b----(b,|)                     ')
    c = cold('                 -------c---c---c----c---(c,|)')
    d = cold('                         -------(d,|)        ')
    e1 = hot('-a---b-----------c-------d------------|      ')
    ex = exp('-----a---(a,1)(a,b)(a,b)c---c---(c,d)c---(c,|)')
    expected = ex

    observableLookup = {"a": a, "b": b, "c": c, "d": d}

    obs = e1.pipe(
        ops.flat_map(lambda value: observableLookup[value])
        )

    results = start(obs)
    assert results == expected

print('flat_map: results vs expected')
for r, e in zip(results, expected):
    print(r, e)