Esempio n. 1
0
async def demo():
    one_second = timedelta(seconds=1)
    max_field_count = 1000
    fail_every = 5
    data_entry_service = InMemoryDataEntryService(
        field_count=max_field_count,
        create_interval=timedelta(milliseconds=1),
        fail_annotate_every=fail_every,
    )
    annotation_service = InMemoryAnnotationService(
        publish_annotation=data_entry_service.annotate_field,
        republish_interval=one_second,
        fail_annotate_every=fail_every,
        fail_ack_every=fail_every,
    )
    data_entry_service.start_publications(
        publish=annotation_service.annotate,
        acknowledge=annotation_service.acknowledge,
        interval=one_second,
    )

    created_field_count = 0
    acked_annotion_count = 0
    unannotated_field_count = 0
    unacked_annotion_count = 0

    def consistent(_) -> bool:
        return (created_field_count == max_field_count
                and unannotated_field_count == 0
                and unacked_annotion_count == 0)

    def inconsistent(_) -> bool:
        return not consistent(_)

    def eval_state(counter: int):
        nonlocal created_field_count
        nonlocal acked_annotion_count
        nonlocal unannotated_field_count
        nonlocal unacked_annotion_count
        created_field_count = data_entry_service.field_count
        acked_annotion_count = len(annotation_service.acknowledged_annotations)
        unacked_annotion_count = len(
            annotation_service.unacknowledged_annotations)
        unannotated_field_count = len(data_entry_service.unannotated_fields)

        print(f"Iteration: {counter + 1}")
        print(f"Field Count: {created_field_count}")
        print(f"Acked Annotation Count: {acked_annotion_count}")
        print(f"Unannotated Field Count: {unannotated_field_count}")
        print(f"Unacked Annotation Count: {unacked_annotion_count}")
        print("")

        if consistent(None):
            print("---------------")
            print("Eventually has arrived :)")
            print("---------------")

    sched = rx.interval(timedelta(seconds=1))
    composed = sched.pipe(take_while(inconsistent))
    composed.subscribe(eval_state)
Esempio n. 2
0
def collect(messages: Observable[IbApiMessage]) -> Observable[List[Position]]:
    return messages.pipe(
        _.filter(lambda m: _is_position(m) or _is_position_end(m)),
        _.take_while(lambda m: not _is_position_end(m)),
        _.map(_unpack_position),
        _.reduce(lambda positions, position: [*positions, position], []),
    )
Esempio n. 3
0
def light_state_equals(light_id: str, light_final_state: str,
                       test_context: Data.TestContext,
                       loop: asyncio.AbstractEventLoop,
                       awaitables: List[RxObservable]):
    assert isinstance(light_id, str)
    assert isinstance(light_final_state, str)

    def take_while_state(payload: Structs.s_lights_state) -> bool:
        return payload.newState != light_final_state

    timeout_sec = 10.0
    lightbulb: Data.Lightbulb = test_context.lightbulbs[light_id]

    observable: RxObservable = lightbulb.light_state.pipe(
        RxOp.timeout(timeout_sec),
        RxOp.observe_on(scheduler=AsyncIOScheduler(loop)),
        RxOp.take_while(take_while_state, inclusive=True),
    )

    observable.subscribe(on_next=lambda i: print(f"on_next: {i}"),
                         on_error=lambda e: print(f"on_error: {e}"),
                         on_completed=lambda: print("on_completed"),
                         scheduler=AsyncIOScheduler(loop))

    awaitables.append(observable)
Esempio n. 4
0
        def factory():
            def predicate(x):
                nonlocal invoked

                invoked += 1
                return is_prime(x)
            return xs.pipe(ops.take_while(predicate))
Esempio n. 5
0
def _solve(print=print):
    total = rx.from_iterable(primes()) \
        .pipe(
        ops.take_while(lambda p: p < 2000000),
        ops.sum(),
    ).run()
    print(f'The sum of primes below 2m: {total}')
    return True
Esempio n. 6
0
        def factory():
            def predicate(x):
                nonlocal invoked

                invoked += 1
                return is_prime(x)

            return xs.pipe(ops.take_while(predicate))
Esempio n. 7
0
def collect(messages: Observable[IbApiMessage],
            request_id: int) -> Observable[AccountSummary]:
    return messages.pipe(
        _.filter(
            lambda m: _is_account_summary(m) or _is_account_summary_end(m)),
        _.filter(lambda m: _request_id(m) == request_id),
        _.take_while(lambda m: not _is_account_summary_end(m)),
        _.map(_unpack_account_summary),
        _.reduce(lambda summary, data: _add_data_to_summary(data, summary),
                 AccountSummary()))
Esempio n. 8
0
 def __init__(self, client: MQTTClientWrapper, name="ds18", config=None):
     super().__init__(client, None, config=config, name=name)
     self.topic_base = self.config["ds18"]["topicBase"]
     self.topic = self.topic_base + "/temperature/+"
     self.can_subscribe.pipe(
         ops.take_while(
             lambda x: not x
         ),  # while cannot subscribe, continue to take (should not happen)
     ).subscribe(lambda x: self.subscribe_on_broker(self.topic))
     self.sensors = self.config["ds18"]["sensors"]
Esempio n. 9
0
        def factory():
            def predicate(x):
                nonlocal invoked

                invoked += 1
                if invoked == 3:
                    raise Exception(ex)

                return is_prime(x)
            return xs.pipe(ops.take_while(predicate))
Esempio n. 10
0
def collect(
        messages: Observable[IbApiMessage], request_id: int,
        data_type: types.HistoricalDataType) -> Observable[List[types.BarData]]:
    return messages.pipe(
        _.filter(
            lambda m: _is_historical_data(m) or _is_historical_data_end(m)),
        _.filter(lambda m: _request_id(m) == request_id),
        _.take_while(lambda m: not _is_historical_data_end(m)),
        _.map(lambda m: _unpack_historical_data(m, data_type)),
        _.reduce(lambda bars, bar: [*bars, bar], []),
    )
Esempio n. 11
0
        def factory():
            def predicate(x):
                nonlocal invoked

                invoked += 1
                if invoked == 3:
                    raise Exception(ex)

                return is_prime(x)

            return xs.pipe(ops.take_while(predicate))
Esempio n. 12
0
 def download(destination_file):
     do_action(lambda _: os.makedirs(destination_file, exist_ok=True))
     return of(create_downloader(destination_file)).pipe(
         flat_map(lambda downloader: forever().pipe(
             map(lambda _: next_chunk(downloader)),
             take_while(lambda progress: progress < 1, inclusive=True),
             map(
                 lambda progress: {
                     'progress': progress,
                     'downloaded_bytes': int(int(file['size']) * progress),
                     'file': file
                 }))))
Esempio n. 13
0
    def enqueue(self,
                observable: Observable,
                group: str = 'default-group',
                retries: int = 0,
                description: str = None) -> Observable:
        def log_status(status):
            logging.debug(
                str({
                    'WorkQueue': str(self),
                    'group': group,
                    status: description
                }))

        log_status('ENQUEUED')
        output = Subject()
        errors = Subject()
        output_finalized = Subject()

        def handle_error(e, _):
            log_status('FAILED')
            errors.on_next(e)
            return empty()

        def set_output_finalized():
            output_finalized.on_next(True)

        work = of(True).pipe(
            do_action(lambda _: log_status('STARTED')),
            take_until(output_finalized),
            flat_map(lambda _: observable.pipe(
                map(lambda value: of({
                    'value': value,
                    'output': output
                })),
                retry_with_backoff(
                    retries=retries,
                    description='{}.enqueue(group={}, description={})'.format(
                        self, group, description)),
                catch(handler=handle_error), take_until(output_finalized),
                take_until_disposed())),
            concat(of(of({
                'completed': True,
                'output': output
            }))), finally_action(lambda: log_status('COMPLETED')))

        self._queue.on_next({'work': work, 'group': group})

        return output.pipe(observe_on(self.request_scheduler),
                           throw_when(errors),
                           take_while(lambda r: not r.get('completed')),
                           map(lambda r: r.get('value')),
                           finally_action(set_output_finalized))
Esempio n. 14
0
def run(onComplete=lambda: None):
    limit = 10 ** 22
    optimal_thread_count = multiprocessing.cpu_count()
    pool_scheduler = ThreadPoolScheduler(optimal_thread_count)

    count = rx.from_iterable(pandigital_step_numbers()) \
        .pipe(
        ops.take_while(lambda n: n < limit),
        ops.count(),
    ) \
        .run()

    onComplete(count)
Esempio n. 15
0
    def monitor():
        def is_running(state):
            return state in [Task.State.UNSUBMITTED, Task.State.READY, Task.State.RUNNING]

        return interval(_MONITORING_FREQUENCY).pipe(
            flat_map(lambda _: execute(
                action=load_status,
                description='monitor task ' + str(task))
            ),
            flat_map(extract_state),
            distinct_until_changed(),
            take_while(is_running, inclusive=True)
        )
Esempio n. 16
0
    def get_pipeline(seed: int, name: str, print_output=False) -> rx.pipe:
        """Returns an rx pipeline that parses the raw output from MCERD
        into dictionaries.

        Each dictionary contains the same keys. If certain value cannot be
        parsed from the output (i.e. the raw line does not contain it),
        either the value from the previous dictionary is carried over or a
        default value is used.

        Args:
            seed: seed used in the MCERD process
            name: name of the process (usually the name of the recoil element)
            print_output: whether output is printed to console
        """
        # TODO add handling for fatal error messages
        return rx.pipe(
            ops.map(lambda x: x.strip()),
            MCERD._conditional_printer(
                print_output, f"simulation process with seed {seed}."),
            observing.reduce_while(
                reducer=str_reducer,
                start_from=lambda x: x == MCERD._INIT_STARTS,
                end_at=lambda x: x == MCERD._INIT_ENDS),
            observing.reduce_while(
                reducer=str_reducer,
                start_from=lambda x: x.startswith(MCERD._FINAL_STARTS),
                end_at=lambda x: x.startswith(MCERD._FINAL_ENDS)),
            ops.scan(lambda acc, x: {
                MCERD.PRESIM:
                acc[MCERD.PRESIM] and x != MCERD.PRESIM_FINISHED,
                **parse_raw_output(x,
                                   end_at=lambda y: y.startswith(MCERD._FINAL_STARTS))
            },
                     seed={MCERD.PRESIM: True}),
            ops.scan(lambda acc, x: dict_accumulator(acc,
                                                     x,
                                                     default={
                                                         MCERD.SEED: seed,
                                                         MCERD.NAME: name,
                                                         MCERD.MSG: "",
                                                         MCERD.IS_RUNNING: True
                                                     }),
                     seed={
                         MCERD.CALCULATED: 0,
                         MCERD.TOTAL: 0,
                         MCERD.PERCENTAGE: 0
                     }),
            ops.take_while(lambda x: x[MCERD.IS_RUNNING], inclusive=True))
Esempio n. 17
0
        def subscriptionFunction(observer, scheduler):
            message = RxImpMessage(topic, 0, RxImpMessage.STATE_SUBSCRIBE,
                                   json.dumps(payload))

            publisher: Subject = Subject()
            lock = Lock()
            currentCount = 0
            queue = []

            def orderingSubscriber(msg: RxImpMessage):
                nonlocal currentCount
                nonlocal queue
                with lock:
                    currentCount += 1
                    queue.append(msg)
                    queue.sort(key=lambda x: x.count)
                    toNext = [msg for msg in queue if msg.count < currentCount]
                    queue = [msg for msg in queue if msg.count >= currentCount]
                    for msg in toNext:
                        publisher.on_next(msg)

            def isRelevant(msg: RxImpMessage):
                return msg.rx_state == RxImpMessage.STATE_COMPLETE or msg.rx_state == RxImpMessage.STATE_ERROR or msg.rx_state == RxImpMessage.STATE_NEXT

            secondSubscription: Disposable = self._in.pipe(
                filter(lambda x: x.id == message.id),
                filter(lambda x: isRelevant(x)),
                map(lambda x: self._checkError(x)),
            ).subscribe(on_next=lambda x: orderingSubscriber(x),
                        on_error=lambda err: publisher.on_error(err))

            subscription: Disposable = publisher.pipe(
                take_while(lambda x: self._checkNotComplete(x)),
                map(lambda x: json.loads(x.payload)),
            ).subscribe(observer)
            self._out.on_next(message)

            def signalUnsubscribe():
                msg = RxImpMessage(message.topic,
                                   0,
                                   RxImpMessage.STATE_DISPOSE,
                                   None,
                                   id=message.id)
                secondSubscription.dispose()
                subscription.dispose()
                self._out.on_next(msg)

            return lambda: signalUnsubscribe()
Esempio n. 18
0
        def download_from_drive(destination_file):
            def create_downloader():
                request = get_service(credentials).files().get_media(
                    fileId=f['id'])
                return MediaIoBaseDownload(fd=destination_file,
                                           request=request,
                                           chunksize=CHUNK_SIZE)

            downloader = create_downloader()
            return forever().pipe(
                map(lambda _: next_chunk(downloader)),
                take_while(lambda progress: progress < 1, inclusive=True),
                map(
                    lambda progress: {
                        'downloaded_bytes': int(total_bytes * progress),
                        'total_bytes': total_bytes,
                        'file': f
                    }))
Esempio n. 19
0
        def download_from_drive(destination_file):
            def create_downloader():
                request = get_service(credentials).files().get_media(
                    fileId=f['id'])
                return MediaIoBaseDownload(fd=destination_file,
                                           request=request,
                                           chunksize=CHUNK_SIZE)

            downloader = create_downloader()
            return forever().pipe(
                map(lambda _: next_chunk(downloader)),
                take_while(lambda p: p < 1, inclusive=True),
                flat_map(lambda p: progress(
                    default_message=
                    'Downloaded {downloaded_files} of {total_files} files ({downloaded} of {total})',
                    message_key='tasks.drive.download_folder',
                    downloaded_bytes=int(total_bytes * p),
                    downloaded=format_bytes(int(total_bytes * p)),
                    total_bytes=total_bytes,
                    total=format_bytes(total_bytes),
                    file=f)))
Esempio n. 20
0
    def running_check(process: subprocess.Popen, first_check: float,
                      interval: float) -> rx.Observable:
        """Periodically checks if the given process is running.

        Args:
            process: process to be monitored
            first_check: seconds until the first check
            interval: interval between each check

        Return:
            rx.Observable that fires dictionaries after each check
        """
        return rx.timer(first_check, interval).pipe(
            # TODO change this to run at an increasing interval, i.e:
            #       - first check after 0.0 seconds,
            #       - second check after 0.2 seconds,
            #       - third after 1.0, ... etc.
            #   MCERD is likely to crash early (?) so it makes sense to
            #   run the check more frequently at the beginning.
            ops.map(lambda _: {MCERD.IS_RUNNING: MCERD.is_running(process)}),
            ops.take_while(lambda x: x[MCERD.IS_RUNNING], inclusive=True))
op.catch()
op.retry()

"""Utility"""
op.delay()
op.materialize()
op.time_interval()
op.timeout()
op.timestamp()

"""Conditional and Boolean"""
op.all()
op.contains()
op.default_if_empty()
op.sequence_equal()
op.skip_until()
op.skip_while()
op.take_until()
op.take_while()

"""Connectable"""
op.publish()
op.ref_count()
op.replay()

"""Combining"""
op.combine_latest()
op.merge()
op.start_with()
op.zip()
import rx
import rx.operators as ops

numbers = rx.from_([1, 2, 3, 4])

numbers.pipe(ops.take_while(lambda i: i < 2)).subscribe(
    on_next=lambda i: print("on_next {}".format(i)),
    on_error=lambda e: print("on_error: {}".format(e)),
    on_completed=lambda: print("on_completed"))
Esempio n. 23
0
import rx
from rx import operators as ops


rx.from_(
    [2, 5, 21, 5, 2, 1, 5, 63, 127, 12]
).pipe(
    ops.take_while(lambda i: i < 100)
).subscribe(
    on_next=lambda i: print(i),
    on_completed=lambda: print("Done!")
)

Esempio n. 24
0
    def run(self,
            print_output=True,
            ct: Optional[CancellationToken] = None,
            poll_interval=10,
            first_check=0.2,
            max_time=None,
            ct_check=0.2) -> rx.Observable:
        """Starts the MCERD process.

        Args:
            print_output: whether MCERD output is also printed to console
            ct: token that is checked periodically to see if
                the simulation should be stopped.
            poll_interval: seconds between each check to see if the simulation
                process is still running.
            first_check: seconds until the first time mcerd is polled.
            max_time: maximum running time in seconds.
            ct_check: how often cancellation is checked in seconds.

        Return:
            observable stream where each item is a dictionary. All dictionaries
            contain the same keys.
        """
        # Create files necessary to run MCERD
        self.create_mcerd_files()
        cmd = self.get_command()
        ct = ct or CancellationToken()

        process = subprocess.Popen(cmd,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   cwd=gf.get_bin_dir(),
                                   universal_newlines=True)

        errs = rx.from_iterable(iter(process.stderr.readline, ""))
        outs = rx.from_iterable(iter(process.stdout.readline, ""))

        is_running = MCERD.running_check(process, first_check, poll_interval)
        ct_check = MCERD.cancellation_check(process, ct_check, ct)

        if max_time is not None:
            timeout = MCERD.timeout_check(process, max_time, ct)
        else:
            timeout = rx.empty()

        thread_count = multiprocessing.cpu_count()
        pool_scheduler = ThreadPoolScheduler(thread_count)

        merged = rx.merge(errs, outs).pipe(
            ops.subscribe_on(pool_scheduler),
            MCERD.get_pipeline(self._seed,
                               self._rec_filename,
                               print_output=print_output),
            ops.combine_latest(rx.merge(is_running, ct_check, timeout)),
            ops.starmap(
                lambda x, y: {
                    **x,
                    **y, MCERD.IS_RUNNING:
                    x[MCERD.IS_RUNNING] and y[MCERD.IS_RUNNING]
                }),
            ops.take_while(lambda x: x[MCERD.IS_RUNNING], inclusive=True),
        )

        # on_completed does not get called if the take_while condition is
        # inclusive so this is a quick fix to get the files deleted.
        # TODO surely there is a way to get the on_completed called?
        def del_if_not_running(x):
            if not x[MCERD.IS_RUNNING]:
                self.delete_unneeded_files()

        return merged.pipe(
            ops.do_action(on_next=del_if_not_running,
                          on_error=lambda _: self.delete_unneeded_files(),
                          on_completed=self.delete_unneeded_files))
Esempio n. 25
0
    def enqueue(self,
                observable: Observable,
                group: str = 'default-group',
                retries: int = 0,
                description: str = None):
        description = description or str(Observable)
        key = '{}({})'.format(description, random.random())

        def log_status(status):
            logging.debug(
                str({
                    'WorkQueue': str(self),
                    'group': group,
                    'key': key,
                    status: description
                }))

        log_status('ENQUEUED')
        error: Optional[Exception] = None

        def handle_error(e):
            log_status('FAILED')
            nonlocal error
            error = e
            return of({'key': key, 'error': e})

        def throw_if_error(r):
            if error:
                return throw(error)
            else:
                return of(r)

        request_disposed = Subject()

        def dispose_request():
            request_disposed.on_next(True)

        request = of(True).pipe(
            do_action(lambda _: log_status('STARTED')),
            flat_map(
                lambda _: observable.pipe(
                    map(lambda value: {
                        'key': key,
                        'value': value
                    }),
                    retry_with_backoff(retries=retries,
                                       description=
                                       '{}.enqueue(group={}, description={})'.
                                       format(self, group, description)),
                    catch(handler=lambda e, o: handle_error(e)),
                    take_until(request_disposed),
                    take_until_disposed(),
                ), ),
            concat(
                of({
                    'key': key,
                    'complete': True
                }).pipe(do_action(lambda _: log_status('COMPLETED')))),
        )
        result_stream = self._output.pipe(
            observe_on(self.request_scheduler),
            filter(lambda r: r['key'] == key),
            flat_map(lambda r: throw_if_error(r)),
            take_while(lambda r: not r.get('complete')),
            flat_map(lambda r: of(r.get('value'))),
            finally_action(dispose_request))
        self._requests.on_next({
            'request': request,
            'concurrency_group': group
        })
        return result_stream
Esempio n. 26
0
    def enqueue(self,
                observable: Observable,
                group: str = None,
                retries: int = 0,
                description: str = None):
        # Provide a function returning a callable?

        description = description or str(Observable)
        key = '{}({})'.format(description, random.random())

        def log_status(status):
            logging.debug(
                str({
                    'WorkQueue': str(self),
                    'group': group,
                    'key': key,
                    status: description
                }))

        log_status('ENQUEUED')
        error: Optional[Exception] = None

        def handle_error(e):
            log_status('FAILED')
            nonlocal error
            error = e
            return of({'key': key, 'error': e})

        def throw_if_error(request):
            if error:
                return throw(error)
            else:
                return of(request)

        def extract_value(value):
            if type(value) == Observable:
                return value
            else:
                return of(value)

        request = of(True).pipe(
            do_action(lambda _: log_status('STARTED')),
            flat_map(lambda _: observable.pipe(
                flat_map(extract_value),
                map(lambda value: {
                    'key': key,
                    'value': value
                }),
                retry_with_backoff(
                    retries=retries,
                    description='{}.enqueue(group={}, description={})'.format(
                        self, group, description)),
                catch(handler=lambda e, o: handle_error(e)),
            )),
            concat(
                of({
                    'key': key,
                    'complete': True
                }).pipe(do_action(lambda _: log_status('COMPLETED')))))
        result_stream = self._output.pipe(
            filter(lambda request: request['key'] == key),
            flat_map(lambda request: throw_if_error(request)),
            take_while(lambda request: not request.get('complete')),
            flat_map(lambda request: of(request.get('value'))))
        self._requests.on_next({
            'request': request,
            'concurrency_group': group
        })
        return result_stream