示例#1
0
    def test_error(self):
        numbers = Subject()
        windows = Subject()

        expected_numbers = []
        expected_error = None
        actual_numbers = []
        actual_error = None

        def on_next(i):
            actual_numbers.append(i)

        def on_error(e):
            nonlocal actual_error
            actual_error = e

        numbers.pipe(ops.buffer(windows)).subscribe(on_next=on_next,
                                                    on_error=on_error)

        numbers.on_next(1)
        numbers.on_next(2)
        numbers.on_error(ValueError())

        self.assertIsInstance(actual_error, ValueError)
        self.assertEqual(expected_numbers, actual_numbers)
示例#2
0
    def test_nominal(self):
        numbers = Subject()
        windows = Subject()

        expected_numbers = [[1, 2], [3, 4, 5]]
        expected_error = None
        actual_numbers = []
        actual_error = None

        def on_next(i):
            actual_numbers.append(i)

        def on_error(e):
            nonlocal actual_error
            actual_error = e

        numbers.pipe(ops.buffer(windows)).subscribe(on_next=on_next,
                                                    on_error=on_error)

        numbers.on_next(1)
        numbers.on_next(2)
        windows.on_next(True)
        numbers.on_next(3)
        numbers.on_next(4)
        numbers.on_next(5)
        windows.on_next(True)

        self.assertEqual(None, actual_error)
        self.assertEqual(expected_numbers, actual_numbers)
示例#3
0
    def _buffer_until_complete(source: rx.Observable):
        def do_boundary():
            boundary.on_next(0)
            boundary.on_completed()

        boundary = subject.Subject()
        source.subscribe(on_completed=do_boundary)
        return operators.buffer(boundary)(source)
示例#4
0
    def __init__(self, func):
        self.values = Subject()
        self.boundaries = Subject()
        self.grouped_values = self.values.pipe(
            op.buffer(self.boundaries)
        )

        self.sub = self.grouped_values.subscribe(func)
示例#5
0
    def open(self):
        print("WebSocket opened")
        self.write_message("connection opened")

        def _send_response(x):
            print(x)
            self.write_message(json.dumps(x))

        def _on_error(ex):
            print(ex)

        self.subject = Subject()
        self.subject.pipe(buffer(rx.interval(5.0)), last(),
                          flat_map(self.get_data)).subscribe(
                              on_next=_send_response, on_error=_on_error)
示例#6
0
async def api():
    @copy_current_websocket_context
    async def ws_handler(val):
        await websocket.send(val)

    obs: Subject = app.output.port_data
    obs.pipe(ops.buffer(obs.pipe(ops.filter(b"\r".__eq__))),
             ops.map(lambda i: b"".join(i).decode())).subscribe(
                 on_next=lambda val: asyncio.get_event_loop().create_task(
                     ws_handler(val)),
                 on_error=lambda val: print(f"on_error {val}"),
                 on_completed=print("on_completed"),
             )

    while True:
        await waiter(1)
示例#7
0
scheduler = TestScheduler()
ts = 0.1
rx.from_marbles('--(a1)-(b2)---(c3)|', timespan=ts).subscribe(print_value)
rx.from_marbles('(a6)---(b5)(c4)|', timespan=ts).subscribe(print_value)
time.sleep(2)
print('--')

# Interval 을 이용한 옵저버블 생성
rx.interval(0.3).pipe(ops.take_until(rx.timer(3))).subscribe(print_value)
time.sleep(4)
print('--')

# 버퍼
print('-- buffer')
rx.from_(range(2000)).pipe(ops.buffer(
    rx.interval(0.001))).subscribe(on_next=lambda buffer: print(
        '# of items in buffer {}'.format(len(buffer))))
time.sleep(2)

print('-- buffer with count')
rx.from_(range(10)).pipe(ops.buffer_with_count(3)).subscribe(print_value)

print('-- buffer with time')
rx.interval(1).pipe(
    ops.take_until(rx.timer(10)),
    ops.buffer_with_time(3),
).subscribe(print_value)
time.sleep(12)
print('--')
示例#8
0
"""this operator will collect all the values, from the source observable and emit them at regular intervals once
the given boundary condition is satisfied

:parameter
    boundaries: the input observable that will decide when to stop so that the collected values are emitted

:return
    the return value is observable, that will have all the values collected from source observable based
    and that is time duration is decided by the observable taken.

example)
"""
from rx import of, interval, operators as op
import numpy as np
of(np.arange(1, 10000000)) \
    .pipe(
    op.map(lambda n: (n * (n + 1)) / 2),
    # op.filter(lambda x: x % 3 != 0),
    op.buffer(interval(0.001))
).subscribe(lambda x: print("the element is {}".format(x)))
示例#9
0
 def create():
     return xs.pipe(ops.buffer(ys))
示例#10
0
    "edge": edge_subject,
    "node": node_subject,
    "graph": graph_subject
}
local_subscriber = partial(subscriber, subscribe_map)

# base_obs = rx.from_(open("streamTest.txt"))
base_obs = rx.from_(sys.stdin)

c = ConnectableObservable(base_obs, Subject())
dict_delimiter_subject = Subject()
ti = time.time()
c.pipe(op.filter(lambda line: '}' in line or '{' in line),
       op.map(lambda line: True)).subscribe(dict_delimiter_subject)

c.pipe(
    op.buffer(dict_delimiter_subject),
    op.skip(1),
    op.map(lambda lines: "".join(lines).replace('"', '\"')),
    op.filter(lambda line: '}' not in line),
    op.map(lambda line: "{}{}".format(line, "}")),
    op.map(lambda json_str: json.loads(json_str)),
    op.map(lambda dic: dict_to_dict_with_set(dic)),
    # op.take(20),
    op.group_by(lambda dic: get_dict_type(dic)),
).subscribe(local_subscriber)

print("Start stream time: {}".format(str(time.time() - ti)))
c.connect()
print("Finish time: {}".format(str(time.time() - ti)))
示例#11
0
    def __init__(self, options: ContextOptions = None):
        if options is None:
            options = ContextOptions()

        self.client = deepkit.client.Client(options)
        deepkit.globals.last_context = self
        self.log_lock = Lock()
        self.defined_metrics = {}
        self.log_subject = Subject()
        self.metric_subject = Subject()
        self.speed_report_subject = Subject()
        self.shutting_down = False

        atexit.register(self.shutdown)
        self.wait_for_connect()

        self.last_iteration_time = 0
        self.last_batch_time = 0
        self.job_iteration = 0
        self.job_iterations = 0
        self.seconds_per_iteration = 0
        self.seconds_per_iterations = []
        self.debugger_controller = None

        if deepkit.utils.in_self_execution():
            self.job_controller = JobController()

        self.debugger_controller = JobDebuggerController()

        def on_connect(connected):
            if connected:
                if deepkit.utils.in_self_execution():
                    asyncio.run_coroutine_threadsafe(
                        self.client.register_controller(
                            'job/' + self.client.job_id, self.job_controller),
                        self.client.loop)

                asyncio.run_coroutine_threadsafe(
                    self.client.register_controller(
                        'job/' + self.client.job_id + '/debugger',
                        self.debugger_controller), self.client.loop)

        self.client.connected.subscribe(on_connect)

        def on_metric(data: List):
            if len(data) == 0: return

            packed = {}
            for d in data:
                if d['id'] not in packed:
                    packed[d['id']] = b''

                packed[d['id']] += d['row']

            for i, v in packed.items():
                self.client.job_action(
                    'channelData', [i, base64.b64encode(v).decode('utf8')])

        self.metric_subject.pipe(buffer(interval(1))).subscribe(on_metric)

        def on_speed_report(rows):
            # only save latest value, each second
            if len(rows) == 0: return
            self.client.job_action('streamFile', [
                '.deepkit/speed.metric',
                base64.b64encode(rows[-1]).decode('utf8')
            ])

        self.speed_report_subject.pipe(buffer(
            interval(1))).subscribe(on_speed_report)

        if deepkit.utils.in_self_execution:
            # the CLI handled output logging otherwise
            def on_log(data: List):
                if len(data) == 0: return
                packed = ''
                for d in data:
                    packed += d

                self.client.job_action('log', ['main_0', packed])

            self.log_subject.pipe(buffer(interval(1))).subscribe(on_log)

            if len(deepkit.globals.last_logs.getvalue()) > 0:
                self.log_subject.on_next(deepkit.globals.last_logs.getvalue())

        if deepkit.utils.in_self_execution:
            # the CLI handled output logging otherwise
            p = psutil.Process()

            def on_hardware_metrics(dummy):
                net = psutil.net_io_counters()
                disk = psutil.disk_io_counters()
                data = struct.pack(
                    '<BHdHHffff',
                    1,
                    0,
                    time.time(),
                    int(((p.cpu_percent(interval=None) / 100) /
                         psutil.cpu_count()) *
                        65535),  # stretch to max precision of uint16
                    int((p.memory_percent() / 100) *
                        65535),  # stretch to max precision of uint16
                    float(net.bytes_recv),
                    float(net.bytes_sent),
                    float(disk.write_bytes),
                    float(disk.read_bytes),
                )

                self.client.job_action('streamFile', [
                    '.deepkit/hardware/main_0.hardware',
                    base64.b64encode(data).decode('utf8')
                ])

            interval(1).subscribe(on_hardware_metrics)
rx.range()
rx.repeat_value()
rx.start()
rx.timer()

"""Mathematical"""
op.average()
op.concat()
op.count()
op.max()
op.min()
op.reduce()
op.sum()

"""Transformation"""
op.buffer()
op.group_by()
op.map()
op.scan()
# ...

"""Filtering"""
op.debounce()
op.distinct()
op.filter()
op.element_at()
op.first()
op.ignore_elements()
op.last()
op.skip()
op.skip_last()
示例#13
0
import rx
import rx.operators as ops
from rx.subject import Subject
import time
import threading

numbers = Subject()
windows = Subject()
numbers.pipe(ops.buffer(windows)).subscribe(
    on_next=lambda i: print("on_next {}".format(i)),
    on_error=lambda e: print("on_error: {}".format(e)),
    on_completed=lambda: print("on_completed"))

numbers.on_next(1)
numbers.on_next(2)
windows.on_next(True)
numbers.on_next(3)
numbers.on_next(4)
numbers.on_next(5)
windows.on_next(True)
示例#14
0
    def __init__(self, config_path: str):
        deepkit.globals.last_context = self
        self.log_lock = Lock()
        self.defined_metrics = {}
        self.log_subject = Subject()
        self.metric_subject = Subject()
        self.speed_report_subject = Subject()

        self.client = deepkit.client.Client(config_path)
        self.wait_for_connect()
        atexit.register(self.shutdown)

        self.last_iteration_time = 0
        self.last_batch_time = 0
        self.job_iteration = 0
        self.job_iterations = 0
        self.seconds_per_iteration = 0
        self.seconds_per_iterations = []
        self.debugger_controller = None

        if deepkit.utils.in_self_execution():
            self.job_controller = JobController()

        self.debugger_controller = JobDebuggerController()

        def on_connect(connected):
            if connected:
                if deepkit.utils.in_self_execution():
                    asyncio.run_coroutine_threadsafe(
                        self.client.register_controller(
                            'job/' + self.client.job_id, self.job_controller),
                        self.client.loop)

                asyncio.run_coroutine_threadsafe(
                    self.client.register_controller(
                        'job/' + self.client.job_id + '/debugger',
                        self.debugger_controller), self.client.loop)

        self.client.connected.subscribe(on_connect)

        def on_log(data: List):
            if len(data) == 0: return
            packed = ''
            for d in data:
                packed += d

            self.client.job_action('log', ['main_0', packed])

        self.log_subject.pipe(buffer(interval(1))).subscribe(on_log)

        if len(deepkit.globals.last_logs.getvalue()) > 0:
            self.log_subject.on_next(deepkit.globals.last_logs.getvalue())

        def on_metric(data: List):
            if len(data) == 0: return
            packed = {}

            for d in data:
                if d['id'] not in packed:
                    packed[d['id']] = []

                packed[d['id']].append(d['row'])

            for i, v in packed.items():
                self.client.job_action('channelData', [i, v])

        self.metric_subject.pipe(buffer(interval(1))).subscribe(on_metric)

        def on_speed_report(rows):
            # only save latest value, each second
            if len(rows) == 0: return
            self.client.job_action('streamJsonFile',
                                   ['.deepkit/speed.csv', [rows[-1]]])

        self.speed_report_subject.pipe(buffer(
            interval(1))).subscribe(on_speed_report)

        p = psutil.Process()
        self.client.job_action('streamJsonFile', [
            '.deepkit/hardware/main_0.csv',
            [[
                'time', 'cpu', 'memory', 'network_rx', 'network_tx',
                'block_write', 'block_read'
            ]]
        ])

        def on_hardware_metrics(dummy):
            net = psutil.net_io_counters()
            disk = psutil.disk_io_counters()
            data = [
                time.time(),
                (p.cpu_percent(interval=None) / 100) / psutil.cpu_count(),
                p.memory_percent() / 100,
                net.bytes_recv,
                net.bytes_sent,
                disk.write_bytes,
                disk.read_bytes,
            ]
            self.client.job_action('streamJsonFile',
                                   ['.deepkit/hardware/main_0.csv', [data]])

        interval(1).subscribe(on_hardware_metrics)