def _window_to_group(self, value):
     return value.pipe(
         ops.to_iterable(),
         ops.map(
             lambda x: rx.from_iterable(x).pipe(ops.group_by(
                 _group_by), ops.map(_group_to_batch), ops.merge_all())),
         ops.merge_all())
Beispiel #2
0
    def __init__(self,
                 concurrency_per_group,
                 delay_seconds=0,
                 description=None):
        self._queue = Subject()
        self._description = description
        self.request_scheduler = ThreadPoolScheduler(concurrency_per_group)
        producer_scheduler = ThreadPoolScheduler(concurrency_per_group)

        def on_next(result):
            output = result['output']
            output.on_next({
                'value': result.get('value'),
                'completed': result.get('completed')
            })

        self._subscription = self._queue.pipe(
            observe_on(producer_scheduler), group_by(lambda r: r['group']),
            flat_map(lambda concurrency_group: concurrency_group.pipe(
                map(lambda r: r['work']),
                delay(delay_seconds),
                merge(max_concurrent=concurrency_per_group),
                merge_all(),
            )), take_until_disposed()).subscribe(
                on_next=on_next,
                on_error=lambda error: logging.exception(
                    'Error in {} request stream'.format(self)),
                scheduler=producer_scheduler)
def rmux_client(sources):
    response = sources.tcp_client.response.pipe(ops.share())
    tcp_connect = rx.just(tcp_client.Connect(
        host='127.0.0.1', port='8080'
    ))

    create_observable = response.pipe(
        ops.flat_map(lambda connection: 
            rx.just({'what': 'subscribe', 'id':42, 'name': '1234'}).pipe(
                ops.map(lambda i: json.dumps(i)),
                frame,
                ops.map(lambda j: tcp_client.Write(id=connection.id, data=j.encode()))
        ))
    )

    console = response.pipe(
        ops.flat_map(lambda connection: connection.observable.pipe(
            ops.map(lambda i: i.data.decode('utf-8')),
            unframe,
            ops.map(lambda i: json.loads(i)),
            ops.group_by(lambda i: i['id']),
            ops.flat_map(lambda subscription: subscription.pipe(
                ops.map(notification),
                ops.dematerialize(),
            ))
        )),
        ops.map(lambda i: "item: {}\n".format(i))
    )

    tcp_sink = rx.merge(tcp_connect, create_observable)

    return Sink(
        tcp_client=tcp_client.Sink(request=tcp_sink),
        stdout=stdout.Sink(data=console),
    )
Beispiel #4
0
def word_counter(file_name):
    # count words using `group_by()`
    # tuple the word with the count
    return words_from_file(file_name).pipe(
        ops.group_by(lambda word: word),
        ops.flat_map(lambda grp: grp.pipe(ops.count(),
                                          ops.map(lambda ct: (grp.key, ct)))))
        def action1(scheduler, state):
            c["outer"] = xs.pipe(
                    ops.group_by(
                            lambda x: x.lower().strip(),
                            lambda x: x[::-1],
                            ))

            return c["outer"]
Beispiel #6
0
        def action1(scheduler, state):
            c["outer"] = xs.pipe(
                ops.group_by(
                    lambda x: x.lower().strip(),
                    lambda x: x[::-1],
                ))

            return c["outer"]
        def factory():
            def key_mapper(x):
                key_invoked[0] += 1
                return x.lower().strip()

            return xs.pipe(
                    ops.group_by(key_mapper, lambda x: x),
                    ops.map(lambda g: g.key),
                    )
Beispiel #8
0
        def factory():
            def key_mapper(x):
                key_invoked[0] += 1
                return x.lower().strip()

            return xs.pipe(
                ops.group_by(key_mapper, lambda x: x),
                ops.map(lambda g: g.key),
            )
Beispiel #9
0
 def subscription(scheduler, state):
     source = xs.pipe(
         ops.group_by(
             key_mapper=lambda x: x % 2,
             element_mapper=None,
             subject_mapper=lambda: rx.subject.ReplaySubject(2),
             )
         )
     return source.subscribe(observer_groups, scheduler=scheduler)
Beispiel #10
0
def _window_to_group(value):
    return value.pipe(
        ops.to_iterable(),
        ops.map(lambda x: rx.from_iterable(x).pipe(
            # Group window by 'organization', 'bucket' and 'precision'
            ops.group_by(_group_by),
            # Create batch (concatenation line protocols by \n)
            ops.map(_group_to_batch),
            ops.merge_all())),
        ops.merge_all())
Beispiel #11
0
    def test_groupby_count(self):
        res = []
        counts = rx.from_(range(10)).pipe(
            ops.group_by(lambda i: 'even' if i % 2 == 0 else 'odd'),
            ops.flat_map(lambda i: i.pipe(
                ops.count(),
                ops.map(lambda ii: (i.key, ii)),
            )))

        counts.subscribe(on_next=res.append)
        assert res == [('even', 5), ('odd', 5)]
Beispiel #12
0
 def requestQuotes(self, tickers):
     logger.debug(f'QuoteObserver.requestQuotes({tickers})')
     self.quoteSubscription = create(
         lambda o, s: beginStreamingQuotes(tickers, o, s)).pipe(
             op.subscribe_on(config.pool_scheduler),
             op.observe_on(config.pool_scheduler),
             op.do_action(lambda q: logger.debug(f'QO: {q}')),
             op.group_by(lambda q: q['symbol']),
         ).subscribe(on_next=self.handleQuote,
                     on_error=lambda e: logger.debug(e),
                     on_completed=lambda: logger.debug(
                         'QuoteObserver subscription completed'))
Beispiel #13
0
        def factory():
            def key_mapper(x):
                key_invoked[0] += 1
                return x.lower().strip()

            def element_mapper(x):
                ele_invoked[0] += 1
                return x[::-1]  # Yes, this is reverse string in Python

            return xs.pipe(
                ops.group_by(key_mapper, element_mapper),
                ops.map(lambda g: g.key),
            )
Beispiel #14
0
        def factory():
            def key_mapper(x):
                key_invoked[0] += 1
                return x.lower().strip()

            def element_mapper(x):
                ele_invoked[0] += 1
                return x[::-1]

            return xs.pipe(
                ops.group_by(key_mapper, element_mapper),
                ops.map(lambda g: g.key),
            )
        def factory():
            def key_mapper(x):
                key_invoked[0] += 1
                return x.lower().strip()

            def element_mapper(x):
                ele_invoked[0] += 1
                return x[::-1]

            return xs.pipe(
                    ops.group_by(key_mapper, element_mapper),
                    ops.map(lambda g: g.key),
                    )
Beispiel #16
0
def grouped_sample(
    key_mapper: ops.Mapper,
    sampler: Union[ops.timedelta, float, rx.Observable],
):
    """
    Combination of "group_by", "flat_map" and "sample", groups an observable sequence by the
    "key_mapper" function, maps the resulting observable sequences with the "sample" operator
    and flatten it into a single observable sequence.
    """
    return rx.pipe(
        ops.group_by(key_mapper),
        ops.flat_map(lambda x: x.pipe(ops.sample(sampler))),
    )
        def factory():
            def key_mapper(x):
                key_invoked[0] += 1
                return x.lower().strip()

            def element_mapper(x):
                ele_invoked[0] += 1
                return x[::-1]  # Yes, this is reverse string in Python

            return xs.pipe(
                    ops.group_by(key_mapper, element_mapper),
                    ops.map(lambda g: g.key),
                    )
Beispiel #18
0
def demo_group_by():
    '''tuple unpacking'''
    a = rx.of([
        {'id':1 , 'name': 'aaa'},
        {'id':2 , 'name': 'bbb'},
        {'id':1 , 'name': 'aaa'},
        {'id':1 , 'name': 'aaa'},
        {'id':2 , 'name': 'aaa'},
        ])

    a.pipe(
        ops.group_by(lambda x: x['id'], lambda x: x['name'], subject_mapper = rx.subject.ReplaySubject()),
        ops.to_iterable(),
    ).subscribe(print)
Beispiel #19
0
 def _watch_heartbeat(
     self,
     key_mapper: Callable[[Any], str],
 ) -> Observable:
     """
     :returns: Observable[Tuple[str, bool]]
     """
     return rx.pipe(
         ops.group_by(key_mapper),
         ops.flat_map(lambda x: x.pipe(
             heartbeat(self.LIVELINESS),
             ops.map(lambda y: (x.key, y)),
         )),
     )
Beispiel #20
0
 def __init__(self, concurrency_per_group, description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self._requests = Subject()
     self._output = ReplaySubject()
     self._description = description
     self._subscription = self._requests.pipe(
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group)))).subscribe(
                 on_next=lambda request: self._output.on_next(request),
                 on_error=lambda error: logging.exception(
                     'Error in {} request stream'.format(self)),
                 on_completed=lambda: logging.error(
                     '{} request stream unexpectedly completed'.format(self
                                                                       )),
                 scheduler=self.scheduler)
Beispiel #21
0
    def __init__(
        self,
        influxdb_client,
        write_options: WriteOptions = WriteOptions(),
        point_settings: PointSettings = PointSettings()
    ) -> None:
        self._influxdb_client = influxdb_client
        self._write_service = WriteService(influxdb_client.api_client)
        self._write_options = write_options
        self._point_settings = point_settings

        if influxdb_client.default_tags:
            for key, value in influxdb_client.default_tags.items():
                self._point_settings.add_default_tag(key, value)

        if self._write_options.write_type is WriteType.batching:
            # Define Subject that listen incoming data and produces writes into InfluxDB
            self._subject = Subject()

            self._disposable = self._subject.pipe(
                # Split incoming data to windows by batch_size or flush_interval
                ops.window_with_time_or_count(count=write_options.batch_size,
                                              timespan=timedelta(milliseconds=write_options.flush_interval)),
                # Map  window into groups defined by 'organization', 'bucket' and 'precision'
                ops.flat_map(lambda window: window.pipe(
                    # Group window by 'organization', 'bucket' and 'precision'
                    ops.group_by(lambda batch_item: batch_item.key),
                    # Create batch (concatenation line protocols by \n)
                    ops.map(lambda group: group.pipe(
                        ops.to_iterable(),
                        ops.map(lambda xs: _BatchItem(key=group.key, data=_body_reduce(xs), size=len(xs))))),
                    ops.merge_all())),
                # Write data into InfluxDB (possibility to retry if its fail)
                ops.map(mapper=lambda batch: self._retryable(data=batch, delay=self._jitter_delay())),  #
                ops.merge_all())\
                .subscribe(self._on_next, self._on_error, self._on_complete)

        else:
            self._subject = None
            self._disposable = None
Beispiel #22
0
 def __init__(self,
              concurrency_per_group,
              delay_seconds=0,
              description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self.request_scheduler = ThreadPoolScheduler(10)
     self._requests = Subject()
     self._output_subject = Subject()
     self._output = self._output_subject.pipe(share())
     self._description = description
     self._subscription = self._requests.pipe(
         observe_on(self.scheduler),
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group),
             delay(delay_seconds))),
         take_until_disposed()).subscribe(
             on_next=lambda request: self._output_subject.on_next(request),
             on_error=lambda error: logging.exception(
                 'Error in {} request stream'.format(self)),
             on_completed=lambda: self.dispose(),
             scheduler=self.scheduler)
Beispiel #23
0
files = ['test.csv', 'test2.csv']

source = rx.from_([rx.from_(read_csv(filename))
                   for filename in files]).pipe(ops.merge_all(),
                                                ops.map(row_to_dict))

published = source.pipe(ops.publish())


def print_row(row):
    print(
        'File: {filename} has {lines} lines and its size is {size} kb'.format(
            **row))


def print_group(group):
    return group.subscribe(print_row)


# 두 곳에서 subscribe 한다. #1
maximum = published.pipe(ops.max(lambda a, b: a['lines'] - b['lines']))
maximum.subscribe(lambda row: print(
    '-- File with the most number of lines is "{filename}" with {lines} lines'.
    format(**row)))

# 2
published.pipe(ops.group_by(lambda row: row['size'])).subscribe(print_group)

# 이걸 해야 시작
published.connect()
Beispiel #24
0
    conn.get_database("planning").get_collection("graphs2"))
save_edges_in_db = partial(
    save_many_items,
    conn.get_database("planning").get_collection("edges2"))
save_nodes_in_db = partial(
    save_many_items,
    conn.get_database("planning").get_collection("nodes2"))

graph_type_map = {"source": "edge", "node_name": "node", "name": "graph"}
get_dict_type = partial(get_obj_type_from_type_map, graph_type_map)

edge_subject, node_subject, graph_subject = Subject(), Subject(), Subject()

processed_edges = edge_subject.pipe(
    op.filter(lambda edge_dic: not exists(edge_dic)),
    op.group_by(lambda dic: "".join(
        [str(v) for k, v in dic.items() if k not in ['level', 'type']])),
    op.map(lambda o: general_edge_grouper(o)), op.merge_all(),
    op.buffer_with_count(1000),
    op.map(lambda dict_list: save_edges_in_db(dict_list)),
    op.buffer_with_count(5), op.map(lambda futures: perform_futures(futures)),
    op.map(lambda results: [r.inserted_ids for r in results])).subscribe(dumb)

processed_nodes = node_subject.pipe(
    op.filter(lambda node_dic: not exists(node_dic)),
    op.group_by(lambda dic: "".join(
        [str(v) for k, v in dic.items() if k not in ['level']])),
    op.map(lambda o: general_node_grouper(o)), op.merge_all(),
    op.buffer_with_count(5000),
    op.map(lambda dict_list: save_nodes_in_db(dict_list)),
    op.buffer_with_count(5), op.map(lambda futures: perform_futures(futures)),
    op.map(lambda results: [r.inserted_ids for r in results])).subscribe(dumb)
 def action1(scheduler, state):
     xs[0] = rx.from_iterable(["alpha", "apple", "beta", "bat", "gamma"]) \
         .pipe(ops.group_by(lambda s: s[0]),
               ops.map(lambda xs: xs.pipe(ops.to_iterable(), ops.map(list))),
               ops.merge_all(),
               )
rx.repeat_value()
rx.start()
rx.timer()

"""Mathematical"""
op.average()
op.concat()
op.count()
op.max()
op.min()
op.reduce()
op.sum()

"""Transformation"""
op.buffer()
op.group_by()
op.map()
op.scan()
# ...

"""Filtering"""
op.debounce()
op.distinct()
op.filter()
op.element_at()
op.first()
op.ignore_elements()
op.last()
op.skip()
op.skip_last()
op.take()
import rx
import rx.operators as ops
from rx.subject import Subject


def wrap_items(i):
    return i.pipe(ops.map(lambda j: 'obs {}: {}'.format(i, j)))


numbers = rx.from_([1, 2, 3, 4, 5, 6])
numbers.pipe(
    ops.group_by(lambda i: i % 2 == 0),
    ops.flat_map(wrap_items),
).subscribe(on_next=lambda i: print("on_next {}".format(i)),
            on_error=lambda e: print("on_error: {}".format(e)),
            on_completed=lambda: print("on_completed"))
Beispiel #28
0
import rx
from rx import operators as ops

items = ["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]

rx.from_(items).pipe(
    ops.group_by(lambda s: len(s)),
    ops.flat_map(lambda grp: grp.pipe(ops.to_list()))
).subscribe(
    lambda i: print(i)
)

Beispiel #29
0
 def action1(scheduler, state):
     xs[0] = rx.from_iterable(["alpha", "apple", "beta", "bat", "gamma"]) \
         .pipe(ops.group_by(lambda s: s[0]),
               ops.map(lambda xs: xs.pipe(ops.to_iterable(), ops.map(list))),
               ops.merge_all(),
               )
from rx import from_, of, from_iterable, operators as ops, create

items = ["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]

from_(items).pipe(ops.group_by(key_mapper=lambda s: len(s)),
                  ops.flat_map(lambda grp: grp.pipe(ops.to_list()))).subscribe(
                      lambda i: print(i))
Beispiel #31
0
        return 'even'
    return 'odd'


# 그룹의 키 값을 참조할 수 있다.
def subscribe_group_observable(group_observable):
    def print_count(count):
        print(f'Group key={group_observable.key} contains {count} items')

    group_observable.pipe(
        ops.count()
    ).subscribe(print_count)


groups = rx.from_(range(3)).pipe(
    ops.group_by(key_selector)
)
groups.subscribe(subscribe_group_observable)


# Sample: A way to grab items and emit latest values at certain points
print('-- Sample')

# 10 ms 마다 인터벌 발생 (시간의 단위는 초이다.)
rx.interval(0.01).pipe(
    # 1초 동안 동작
    ops.take_until(rx.timer(1)),
    # 100ms 마다 이벤트 출력 (현재 받은 최신값)
    ops.sample(0.1)
).subscribe(print)
time.sleep(2)