Ejemplo n.º 1
0
 def _window_to_group(self, value):
     return value.pipe(
         ops.to_iterable(),
         ops.map(
             lambda x: rx.from_iterable(x).pipe(ops.group_by(
                 _group_by), ops.map(_group_to_batch), ops.merge_all())),
         ops.merge_all())
Ejemplo n.º 2
0
def _window_to_group(value):
    return value.pipe(
        ops.to_iterable(),
        ops.map(lambda x: rx.from_iterable(x).pipe(
            # Group window by 'organization', 'bucket' and 'precision'
            ops.group_by(_group_by),
            # Create batch (concatenation line protocols by \n)
            ops.map(_group_to_batch),
            ops.merge_all())),
        ops.merge_all())
Ejemplo n.º 3
0
    def __init__(self,
                 concurrency_per_group,
                 delay_seconds=0,
                 description=None):
        self._queue = Subject()
        self._description = description
        self.request_scheduler = ThreadPoolScheduler(concurrency_per_group)
        producer_scheduler = ThreadPoolScheduler(concurrency_per_group)

        def on_next(result):
            output = result['output']
            output.on_next({
                'value': result.get('value'),
                'completed': result.get('completed')
            })

        self._subscription = self._queue.pipe(
            observe_on(producer_scheduler), group_by(lambda r: r['group']),
            flat_map(lambda concurrency_group: concurrency_group.pipe(
                map(lambda r: r['work']),
                delay(delay_seconds),
                merge(max_concurrent=concurrency_per_group),
                merge_all(),
            )), take_until_disposed()).subscribe(
                on_next=on_next,
                on_error=lambda error: logging.exception(
                    'Error in {} request stream'.format(self)),
                scheduler=producer_scheduler)
Ejemplo n.º 4
0
    def __init__(
        self, influxdb_client,
        write_options: WriteOptions = WriteOptions()) -> None:
        self._influxdb_client = influxdb_client
        self._write_service = WriteService(influxdb_client.api_client)
        self._write_options = write_options
        if self._write_options.write_type is WriteType.batching:
            # Define Subject that listen incoming data and produces writes into InfluxDB
            self._subject = Subject()

            # Define a scheduler that is used for processing incoming data - default singleton
            observable = self._subject.pipe(
                ops.observe_on(self._write_options.write_scheduler))
            self._disposable = observable \
                .pipe(  # Split incoming data to windows by batch_size or flush_interval
                    ops.window_with_time_or_count(count=write_options.batch_size,
                                                  timespan=timedelta(milliseconds=write_options.flush_interval)),
                    # Map incoming batch window in groups defined by 'organization', 'bucket' and 'precision'
                    ops.flat_map(lambda v: _window_to_group(v)),
                    # Write data into InfluxDB (possibility to retry if its fail)
                    ops.map(mapper=lambda batch: self._retryable(data=batch, delay=self._jitter_delay())),  #
                    ops.merge_all()) \
                .subscribe(self._on_next, self._on_error, self._on_complete)
        else:
            self._subject = None
            self._disposable = None
Ejemplo n.º 5
0
def _merge(*args: Union[Observable, Iterable[Observable]]) -> Observable:
    sources = args[:]

    if isinstance(sources[0], Iterable):
        sources = sources[0]

    return rx.from_iterable(sources).pipe(ops.merge_all())
Ejemplo n.º 6
0
        def create():
            def mapper(ys, i):
                def proj(y):
                    return "%s %s" % (i, y)

                return ys.pipe(ops.map(proj), ops.concat(rx.return_value('%s end' % i)))
            return xs.pipe(ops.window_with_time(100, 50), ops.map_indexed(mapper), ops.merge_all())
Ejemplo n.º 7
0
 def create():
     def mapper(w, i):
         return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))
     return xs.pipe(
             ops.window(ys),
             ops.map_indexed(mapper),
             ops.merge_all(),
             )
        def create():
            def projection(w, i):
                def inner_proj(x):
                    return "%s %s" % (i, x)

                return w.pipe(ops.map(inner_proj))

            return xs.pipe(ops.window_with_time_or_count(70, 3),
                           ops.map_indexed(projection), ops.merge_all())
Ejemplo n.º 9
0
        def create():
            def mapper(w, i):
                def mapping(x):
                    return "%s %s" % (i, x)

                return w.pipe(ops.map(mapping))

            return xs.pipe(ops.window_with_count(3, 2),
                           ops.map_indexed(mapper), ops.merge_all())
Ejemplo n.º 10
0
        def create():
            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                ops.window(ys),
                ops.map_indexed(mapper),
                ops.merge_all(),
            )
Ejemplo n.º 11
0
def list_files(config, dataset_key='voxceleb2_test_path'):

    files = rx.just(None).pipe(
        rs.with_latest_from(config),
        ops.starmap(lambda _, c: walk(c['config']['dataset'][dataset_key])),
        ops.merge_all(),
    )

    return files,
Ejemplo n.º 12
0
def _flat_map_internal(source, mapper=None, mapper_indexed=None):
    def projection(x, i):
        mapper_result = mapper(x) if mapper else mapper_indexed(x, i)
        if isinstance(mapper_result, collections.abc.Iterable):
            result = from_(mapper_result)
        else:
            result = from_future(mapper_result) if is_future(
                mapper_result) else mapper_result
        return result

    return source.pipe(ops.map_indexed(projection), ops.merge_all())
 def __init__(self) -> None:
     self._subject = Subject()
     self._scheduler = ThreadPoolScheduler(max_workers=1)
     obs = self._subject.pipe(ops.observe_on(self._scheduler))
     self._disposable = obs \
         .pipe(ops.window_with_time_or_count(count=5, timespan=datetime.timedelta(milliseconds=10_000)),
               ops.flat_map(lambda x: self._window_to_group(x)),
               ops.map(mapper=lambda x: self._retryable(data=x, delay=self._jitter_delay(jitter_interval=1000))),
               ops.merge_all()) \
         .subscribe(self._result, self._error, self._on_complete)
     pass
Ejemplo n.º 14
0
        def create():
            def closing():
                return rx.throw(ex)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                ops.window(closing),
                ops.map_indexed(mapper),
                ops.merge_all(),
            )
Ejemplo n.º 15
0
        def create():
            def closing(x):
                raise Exception(ex)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                ops.window(ys, closing),
                ops.map_indexed(mapper),
                ops.merge_all(),
            )
Ejemplo n.º 16
0
def _failed_transfers(store):
    processing_files = ReplaySubject()

    def transfer_files():
        state = store.getState()
        if (state.processing):
            processing_files.on_next(state.processing)

    store.subscribe(transfer_files)
    return processing_files.pipe(
        operators.map(lambda paths: rx.from_iterable(paths)),
        operators.merge_all(), operators.flat_map(_transfer_file))
Ejemplo n.º 17
0
        def create():
            def closing(x):
                raise Exception(ex)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                    ops.window(ys, closing),
                    ops.map_indexed(mapper),
                    ops.merge_all(),
                    )
Ejemplo n.º 18
0
        def create():
            def closing():
                return rx.throw(ex)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                    ops.window(closing),
                    ops.map_indexed(mapper),
                    ops.merge_all(),
                    )
Ejemplo n.º 19
0
    def __init__(
        self,
        influxdb_client,
        write_options: WriteOptions = WriteOptions(),
        point_settings: PointSettings = PointSettings()
    ) -> None:
        self._influxdb_client = influxdb_client
        self._write_service = WriteService(influxdb_client.api_client)
        self._write_options = write_options
        self._point_settings = point_settings

        if influxdb_client.default_tags:
            for key, value in influxdb_client.default_tags.items():
                self._point_settings.add_default_tag(key, value)

        if self._write_options.write_type is WriteType.batching:
            # Define Subject that listen incoming data and produces writes into InfluxDB
            self._subject = Subject()

            self._disposable = self._subject.pipe(
                # Split incoming data to windows by batch_size or flush_interval
                ops.window_with_time_or_count(count=write_options.batch_size,
                                              timespan=timedelta(milliseconds=write_options.flush_interval)),
                # Map  window into groups defined by 'organization', 'bucket' and 'precision'
                ops.flat_map(lambda window: window.pipe(
                    # Group window by 'organization', 'bucket' and 'precision'
                    ops.group_by(lambda batch_item: batch_item.key),
                    # Create batch (concatenation line protocols by \n)
                    ops.map(lambda group: group.pipe(
                        ops.to_iterable(),
                        ops.map(lambda xs: _BatchItem(key=group.key, data=_body_reduce(xs), size=len(xs))))),
                    ops.merge_all())),
                # Write data into InfluxDB (possibility to retry if its fail)
                ops.map(mapper=lambda batch: self._retryable(data=batch, delay=self._jitter_delay())),  #
                ops.merge_all())\
                .subscribe(self._on_next, self._on_error, self._on_complete)

        else:
            self._subject = None
            self._disposable = None
Ejemplo n.º 20
0
def get_authorization_stream(request: OIDCRequest) -> Observable:
    response_params: Dict[str, Any] = {}

    # yapf: disable
    return just(request).pipe(
        op.flat_map(call_async(validate_redirect_uri)),
        op.flat_map(call_async(validate_response_type)),
        op.flat_map(call_async(validate_scope)),
        op.flat_map(select_flows),
        op.merge_all(),
        op.do_action(lambda x: response_params.update(asdict(x))),
        op.last(),
        op.map(lambda x: AuthorizationResponse(**response_params)))
Ejemplo n.º 21
0
        def create():
            def closing():
                curr = window[0]
                window[0] += 1
                return rx.timer(curr * 100)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                ops.window(closing),
                ops.map_indexed(mapper),
                ops.merge_all(),
            )
Ejemplo n.º 22
0
def _flat_map_internal(source, mapper=None, mapper_indexed=None):
    def projection(x, i):
        mapper_result = mapper(x) if mapper else mapper_indexed(x, i)
        if isinstance(mapper_result, collections.abc.Iterable):
            result = from_(mapper_result)
        else:
            result = from_future(mapper_result) if is_future(
                mapper_result) else mapper_result
        return result

    return source.pipe(
        ops.map_indexed(projection),
        ops.merge_all()
    )
Ejemplo n.º 23
0
        def create():
            def closing():
                curr = window[0]
                window[0] += 1
                return rx.timer(curr * 100)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                    ops.window(closing),
                    ops.map_indexed(mapper),
                    ops.merge_all(),
                    )
Ejemplo n.º 24
0
        def create():
            def closings():
                w = window[0]
                window[0] += 1
                return rx.timer(w * 100)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                ops.window(window_closing_mapper=closings),
                ops.map_indexed(mapper),
                ops.merge_all(),
            )
Ejemplo n.º 25
0
        def create():
            def closings():
                w = window[0]
                window[0] += 1
                return rx.timer(w * 100)

            def mapper(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(
                    ops.window(window_closing_mapper=closings),
                    ops.map_indexed(mapper),
                    ops.merge_all(),
                    )
Ejemplo n.º 26
0
    def __init__(self,
                 endpoint_clients: [typing.ServiceEndpointClient] = None,
                 request_correlator: typing.RequestCorrelator = None):

        if endpoint_clients is None:
            self.__endpoint_clients = []
        else:
            self.__endpoint_clients = endpoint_clients

        if request_correlator is None:
            self._request_correlator = DefaultRequestCorrelator()
        else:
            self._request_correlator = request_correlator

        self.__events = rx.from_iterable(
            map(lambda ep: ep.events,
                self.__endpoint_clients)).pipe(merge_all(), share())
Ejemplo n.º 27
0
    def get_responses(self, request) -> rx.Observable:
        if self._request_correlator is None:
            raise Exception(
                'Please configure the ServiceBus with a RequestCorrelator using .correlate_requests_with()'
            )

        self._request_correlator.set_request_correlation_ids(request)

        endpoints = self.__can_handle(request)

        responses = rx.from_iterable(
            map(lambda ep: ep.get_responses(request), endpoints))

        return responses.pipe(
            merge_all(),
            rx.operators.filter(
                lambda r: self._request_correlator.are_correlated(request, r)),
            share())
Ejemplo n.º 28
0
    def __init__(self, influxdb_client, write_options: WriteOptions = WriteOptions()) -> None:
        self._influxdb_client = influxdb_client
        self._write_service = WriteService(influxdb_client.api_client)
        self._write_options = write_options
        if self._write_options.write_type is WriteType.batching:
            self._subject = Subject()

            observable = self._subject.pipe(ops.observe_on(self._write_options.write_scheduler))
            self._disposable = observable \
                .pipe(ops.window_with_time_or_count(count=write_options.batch_size,
                                                    timespan=timedelta(milliseconds=write_options.flush_interval)),
                      ops.flat_map(lambda v: _window_to_group(v)),
                      ops.map(mapper=lambda x: self._retryable(data=x, delay=self._jitter_delay())),
                      ops.merge_all()) \
                .subscribe(self._on_next, self._on_error, self._on_complete)
        else:
            self._subject = None
            self._disposable = None
Ejemplo n.º 29
0
def _merge(*args: Union[Observable, Iterable[Observable]]) -> Observable:
    """Merges all the observable sequences into a single observable
    sequence.

    1 - merged = rx.merge(xs, ys, zs)
    2 - merged = rx.merge([xs, ys, zs])

    Returns:
        The observable sequence that merges the elements of the
        observable sequences.
    """

    sources = args[:]

    if isinstance(sources[0], Iterable):
        sources = sources[0]

    return rx.from_iterable(sources).pipe(ops.merge_all())
Ejemplo n.º 30
0
def create_source_observable(kafka_source, topic):
    if topic.timestamp_mapper is not None:  # pull mode
        return kafka_source.pipe(
            ops.filter(lambda i: i.topic == topic.name),  # ConsumerRecords
            ops.flat_map(lambda i: i.records.pipe(
                rxx.pullable.sorted_merge(
                    key_mapper=topic.timestamp_mapper,
                    lookup_size=topic.merge_lookup_depth,
                ),
                rxx.pullable.push(),
            )),
        )
    else:  # push mode
        return kafka_source.pipe(
            ops.filter(lambda i: i.topic == topic.name),  # ConsumerRecords
            ops.flat_map(lambda i: i.records.pipe(
                ops.merge_all(),
            )),
        )
Ejemplo n.º 31
0
def _rx_mux(*xss: rx.Observable) -> rx.Observable:
    """Combines observables into single observable of indexed tuples.

    ```
    A:   --- A1 -------- A2 -- A3 ----------->
    B:   -------- B1 ----------------- B3 --->
                    [ rx_mux ]
    out: --- A1 - B1 --- A2 -- A3 ---- B3 --->
    ```

    The output events are of type `tuple[int, AOut | BOut]`,
    where the first item represents the stream index (A = 0, B = 1),
    and the second item holds the data.
    """

    def pair_index(i: int) -> Callable[[Any], Any]:
        def inner(x: Any) -> Tuple[int, Any]:
            return i, x

        return inner

    paired = [xs.pipe(ops.map(pair_index(i))) for i, xs in enumerate(xss)]
    return rx.from_iterable(paired).pipe(ops.merge_all())
 def create():
     def proj(w, i):
         return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))
     return xs.pipe(ops.window_with_count(3, 2), ops.map_indexed(proj), ops.merge_all())
Ejemplo n.º 33
0
 def action1(scheduler, state):
     xs[0] = rx.from_iterable(["alpha", "apple", "beta", "bat", "gamma"]) \
         .pipe(ops.group_by(lambda s: s[0]),
               ops.map(lambda xs: xs.pipe(ops.to_iterable(), ops.map(list))),
               ops.merge_all(),
               )
Ejemplo n.º 34
0
 def create():
     return xs.pipe(ops.merge_all())
Ejemplo n.º 35
0
        def create():
            def mapper(w, i):
                return w.pipe(ops.map(lambda x: "%s %s" % (i, x)))

            return xs.pipe(ops.window_with_time(100, 70), ops.map_indexed(mapper), ops.merge_all())
 def create():
     def mapper(w, i):
         def mapping(x):
             return "%s %s" % (i, x)
         return w.pipe(ops.map(mapping))
     return xs.pipe(ops.window_with_count(3, 2), ops.map_indexed(mapper), ops.merge_all())
Ejemplo n.º 37
0
def _merge(*sources: Observable) -> Observable:
    return rx.from_iterable(sources).pipe(ops.merge_all())
Ejemplo n.º 38
0
        def create():
            def proj(w, i):
                return w.pipe(ops.map(lambda x: str(i) + ' ' + str(x)))

            return xs.pipe(ops.window_with_count(3, 2), ops.map_indexed(proj),
                           ops.merge_all())
Ejemplo n.º 39
0
 def action1(scheduler, state):
     xs[0] = rx.from_iterable(["alpha", "apple", "beta", "bat", "gamma"]) \
         .pipe(ops.group_by(lambda s: s[0]),
               ops.map(lambda xs: xs.pipe(ops.to_iterable(), ops.map(list))),
               ops.merge_all(),
               )
Ejemplo n.º 40
0
import rx
from rx import operators as ops

source1 = rx.interval(1.0).pipe(
    ops.map(lambda i: "Source 1: {0}".format(i))
)
source2 = rx.interval(0.5).pipe(
    ops.map(lambda i: "Source 2: {0}".format(i))
)
source3 = rx.interval(0.3).pipe(
    ops.map(lambda i: "Source 3: {0}".format(i))
)

rx.from_(
    [source1, source2, source3]
).pipe(
    ops.merge_all()
).subscribe(
    lambda s: print(s)
)

# keep application alive until user presses a key
input("Press any key to quit\n")


Ejemplo n.º 41
0
def intervalRead(rate, fun) -> rx.Observable:
    loop = asyncio.get_event_loop()
    return rx.interval(rate).pipe(
        ops.map(lambda i: rx.from_future(loop.create_task(fun()))),
        ops.merge_all()
    )
Ejemplo n.º 42
0
    save_many_items,
    conn.get_database("planning").get_collection("edges2"))
save_nodes_in_db = partial(
    save_many_items,
    conn.get_database("planning").get_collection("nodes2"))

graph_type_map = {"source": "edge", "node_name": "node", "name": "graph"}
get_dict_type = partial(get_obj_type_from_type_map, graph_type_map)

edge_subject, node_subject, graph_subject = Subject(), Subject(), Subject()

processed_edges = edge_subject.pipe(
    op.filter(lambda edge_dic: not exists(edge_dic)),
    op.group_by(lambda dic: "".join(
        [str(v) for k, v in dic.items() if k not in ['level', 'type']])),
    op.map(lambda o: general_edge_grouper(o)), op.merge_all(),
    op.buffer_with_count(1000),
    op.map(lambda dict_list: save_edges_in_db(dict_list)),
    op.buffer_with_count(5), op.map(lambda futures: perform_futures(futures)),
    op.map(lambda results: [r.inserted_ids for r in results])).subscribe(dumb)

processed_nodes = node_subject.pipe(
    op.filter(lambda node_dic: not exists(node_dic)),
    op.group_by(lambda dic: "".join(
        [str(v) for k, v in dic.items() if k not in ['level']])),
    op.map(lambda o: general_node_grouper(o)), op.merge_all(),
    op.buffer_with_count(5000),
    op.map(lambda dict_list: save_nodes_in_db(dict_list)),
    op.buffer_with_count(5), op.map(lambda futures: perform_futures(futures)),
    op.map(lambda results: [r.inserted_ids for r in results])).subscribe(dumb)