Example #1
0
    def partition(source: Observable) -> List[Observable]:
        """The partially applied `partition` operator.

        Returns two observables which partition the observations of the
        source by the given function. The first will trigger
        observations for those values for which the predicate returns
        true. The second will trigger observations for those values
        where the predicate returns false. The predicate is executed
        once for each subscribed observer. Both also propagate all
        error observations arising from the source and each completes
        when the source completes.

        Args:
            source: Source obserable to partition.

        Returns:
            A list of observables. The first triggers when the
            predicate returns True, and the second triggers when the
            predicate returns False.
        """

        published = source.pipe(ops.publish(), ops.ref_count())
        return [
            published.pipe(ops.filter(predicate)),
            published.pipe(ops.filter(lambda x: not predicate(x)))
        ]
Example #2
0
def main():
    xs = rx.from_marbles("1-2-3-4-5-6-7-9-|").pipe(ops.publish())
    xs.pipe(ops.map(failing), ops.retry()).subscribe(print)

    xs.connect()  # Must connect. Cannot use ref_count() with publish()

    time.sleep(5)
Example #3
0
    def _tee_map(source):
        if isinstance(source, rs.MuxObservable):
            connectable = source.pipe(
                ops.publish(),
                rs.cast_as_mux_connectable(),
            )

        else:
            connectable = source.pipe(ops.publish())

        return _process_many(
            *[arg(connectable) for arg in args],
            connectable=connectable,
            zip=zip,
            combine=combine,
        )
Example #4
0
    def partition(source: Observable) -> List[Observable]:
        """The partially applied `partition` operator.

        Returns two observables which partition the observations of the
        source by the given function. The first will trigger
        observations for those values for which the predicate returns
        true. The second will trigger observations for those values
        where the predicate returns false. The predicate is executed
        once for each subscribed observer. Both also propagate all
        error observations arising from the source and each completes
        when the source completes.

        Args:
            source: Source obserable to partition.

        Returns:
            A list of observables. The first triggers when the
            predicate returns True, and the second triggers when the
            predicate returns False.
        """

        published = source.pipe(
            ops.publish(),
            ops.ref_count()
        )
        return [
            published.pipe(ops.filter(predicate)),
            published.pipe(ops.filter(lambda x: not predicate(x)))
        ]
def run():
    parser = argparse.ArgumentParser(
        prog='entityfactspicturesmetadataharvester',
        description=
        'Reads depiction information (images URLs) from given EntityFacts sheets (as line-delimited JSON records) and retrieves the (Wikimedia Commons file) metadata of these pictures (as line-delimited JSON records).',
        epilog=
        'example: entityfactspicturesmetadataharvester < [INPUT LINE-DELIMITED JSON FILE WITH ENTITYFACTS SHEETS] > [OUTPUT PICTURES METADATA LINE-DELIMITED JSON FILE]',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    args = parser.parse_args()

    if hasattr(args, 'help') and args.help:
        parser.print_usage(sys.stderr)
        exit(-1)

    source = rx.create(push_input)

    source_connectable_obs = source.pipe(op.publish())

    # Wikimedia Commons file metadata harvesting
    do_harvesting(source_connectable_obs, get_metadata_url,
                  METADATA_CONTENT_TYPE,
                  WIKIMEDIA_COMMONS_FILE_METADATA_HARVESTING,
                  METADATA_THREAD_POOL_SCHEDULER)

    source_connectable_obs.connect()
Example #6
0
    def _train_test_split(source):
        def partition(acc, i):
            if acc is None:
                index = 1
                sampling = sampling_size
            else:
                index = acc[0]
                sampling = acc[1]
            if test_modulus == 0:
                is_test = False
            else:
                is_test = True if index % test_modulus == 0 else False

            sampling -= 1
            if sampling == 0:
                index += 1
                sampling = sampling_size

            return (index, sampling, i, is_test)

        published = source.pipe(ops.publish(), ref_count())

        return [
            published.pipe(
                ops.scan(partition, seed=None),
                ops.filter(lambda i: i[3] is False),
                ops.map(lambda i: i[2]),
            ),
            published.pipe(
                ops.scan(partition, seed=None),
                ops.filter(lambda i: i[3] is True),
                ops.map(lambda i: i[2]),
            )
        ]
Example #7
0
        def create():
            def mapper(ys):
                return ys.pipe(
                        ops.zip(ys),
                        ops.map(sum),
                        )

            return xs.pipe(ops.publish(mapper=mapper))
Example #8
0
        def create():
            def mapper(_xs):
                return _xs.pipe(
                        ops.zip(_xs.pipe(ops.skip(1))),
                        ops.map(sum)
                        )

            return xs.pipe(ops.publish(mapper))
Example #9
0
        def create():
            def mapper(_xs):
                return _xs.pipe(
                    ops.zip(_xs.pipe(ops.skip(1))),
                    ops.map(sum),
                )

            return xs.pipe(ops.publish(mapper))
Example #10
0
        def create():
            def mapper(ys):
                return ys.pipe(
                    ops.zip(ys),
                    ops.map(sum),
                )

            return xs.pipe(ops.publish(mapper=mapper))
Example #11
0
def to_hot_observable(
    # subject_factory: Callable[[Optional[rx.typing.Scheduler]], rx.typing.Subject[T2, T3]],
    base_stream: rx.typing.Observable[T1],
) -> rx.typing.Observable[T1]:

    return cast(rx.Observable, base_stream)\
        .pipe(
        ops.publish(),
        ops.ref_count(),
    )
Example #12
0
 def test_publish_multipleconnections(self):
     xs = rx.never()
     ys = xs.pipe(ops.publish())
     connection1 = ys.connect()
     connection2 = ys.connect()
     assert(connection1 == connection2)
     connection1.dispose()
     connection2.dispose()
     connection3 = ys.connect()
     assert(connection1 != connection3)
     connection3.dispose()
Example #13
0
 def test_publish_multipleconnections(self):
     xs = rx.never()
     ys = xs.pipe(ops.publish())
     connection1 = ys.connect()
     connection2 = ys.connect()
     assert (connection1 == connection2)
     connection1.dispose()
     connection2.dispose()
     connection3 = ys.connect()
     assert (connection1 != connection3)
     connection3.dispose()
Example #14
0
    def __init__(self, scheduler=None):
        self._observerable = rx.interval(
            ObserveConfig.interval,
            scheduler).pipe(ops.map(lambda dummy: get_merge_requests()),
                            ops.retry(), ops.publish(), ops.ref_count())

        self._ready_to_merge = self._observerable.pipe(
            ops.map(lambda requests: next((request for request in requests if
                                           is_ready_to_merge(request)), None)),
            ops.start_with(None), ops.distinct_until_changed())

        self._ready_to_merge.subscribe(lambda ready_to_merge: logging.info(
            'Ready to merge: ' + str(ready_to_merge)))

        voted_merge_requests = self._observerable.pipe(
            ops.map(_to_voted_merge_requests_set))
        self._new_votes_merge_requests = voted_merge_requests.pipe(
            ops.skip(1), ops.zip(voted_merge_requests),
            ops.map(lambda zipped: zipped[0] - zipped[1]), ops.filter(len),
            ops.map(_to_merge_requests))

        self._new_votes_merge_requests.pipe(
            ops.map(lambda diff_set:
                    [merge_request.get_iid() for merge_request in diff_set])
        ).subscribe(
            lambda ids: logging.info(f'New votes for merge requests: {ids}'))

        awards = self._new_votes_merge_requests.pipe(ops.map(_to_awards_set),
                                                     ops.publish(),
                                                     ops.ref_count(),
                                                     ops.start_with(set()))
        self._new_awards = awards.pipe(
            ops.skip(1), ops.zip(awards),
            ops.map(lambda zipped: zipped[0] - zipped[1]), ops.filter(len),
            ops.flat_map(lambda diff_set: rx.from_iterable(diff_set)),
            ops.map(lambda award_key: award_key.award))

        self._new_awards.subscribe(
            lambda new_award: logging.info('New award: ' + str(new_award)))
Example #15
0
    def _init_signals(self, incoming_msgs):
        def _is_signal(msg: Message) -> bool:
            return msg.type == DBUS_CONSTANTS.MESSAGE_TYPE_SIGNAL

        def _parse_signal(msg: Message) -> UnresolvedSignal:
            id_ = msg.sender
            service_names = self._get_service_names_of_id(id_)
            return UnresolvedSignal(service_names, id_,
                                    str(msg.path), msg.interface, msg.member,
                                    list(msg.objects))

        return incoming_msgs.pipe(op.filter(_is_signal), op.map(_parse_signal),
                                  op.publish(), op.ref_count())
Example #16
0
 def __init__(self, inObs: Observable, outSubject: Subject):
     """
     Parameters
     ---------
     inObs : Observable<bytes>
         Observable<bytes> that the instance subscribes to in order to receive data packets. The Observable should emit objects of type bytes
     outSubject : Subject<bytes>
         Subscribe to the outSubject to publish messages (i. e. send the to the receiver)
     """
     super().__init__()
     self._in: Observable = inObs.pipe(map(self._mapIncoming), publish())
     self._in.connect()
     self._out = Subject()
     self._out.pipe(map(self._mapOutgoing)).subscribe(outSubject)
Example #17
0
def test_ref_count():
    print('rx.interval(1).pipe(ops.publish(), ops.ref_count())')
    obs = rx.interval(1).pipe(ops.publish(), ops.ref_count())

    input('Press Enter to subscribe...\n')
    unsub = obs.subscribe(lambda x: print(x))

    input('Press Enter to dispose...\n')
    unsub.dispose()

    input('Press Enter to subscribe again...\n')
    unsub = obs.subscribe(lambda x: print(x))

    input('Press Enter to finish...\n')
    unsub.dispose()
def run():
    parser = argparse.ArgumentParser(
        prog='entityfactspicturesharvester',
        description=
        'Reads depiction information (images URLs) from given EntityFacts sheets (as line-delimited JSON records) and retrieves and stores the pictures and thumbnails contained in this information.',
        epilog=
        'example: entityfactspicturesharvester < [INPUT LINE-DELIMITED JSON FILE WITH ENTITYFACTS SHEETS]',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    optional_arguments = parser._action_groups.pop()

    required_arguments = parser.add_argument_group('required arguments')
    required_arguments.add_argument(
        '-entityfacts-pictures-dir',
        type=str,
        help=
        'The directory, where the images and thumbnails from the depiction information in the EntityFacts sheets should be stored',
        dest='entityfacts_pictures_directory',
        required=True)

    parser._action_groups.append(optional_arguments)

    args = parser.parse_args()

    if hasattr(args, 'help') and args.help:
        parser.print_usage(sys.stderr)
        exit(-1)

    entityfacts_pictures_directory = args.entityfacts_pictures_directory

    source = rx.create(push_input)

    depiction_json_connectable_obs = source.pipe(
        op.map(lambda line: get_depiction_json(line)),
        op.filter(lambda value: value is not None), op.publish())

    # picture harvesting
    do_harvesting(depiction_json_connectable_obs, get_picture_url,
                  PICTURE_CONTENT_TYPE, entityfacts_pictures_directory,
                  PICTURE_THREAD_POOL_SCHEDULER)

    # thumbnail harvesting
    do_harvesting(depiction_json_connectable_obs, get_thumbnail_url,
                  THUMBNAIL_CONTENT_TYPE, entityfacts_pictures_directory,
                  THUMBNAIL_THREAD_POOL_SCHEDULER)

    depiction_json_connectable_obs.connect()
Example #19
0
    def __init__(
        self,
        connection: Connection,
        event_loop: AbstractEventLoop = asyncio.get_event_loop()
    ) -> None:
        """
        DO NOT USE DIRECTLY
        use one of the new_XXX static methods
        """
        super().__init__()

        connection.attach_asyncio(event_loop)

        self._connection = connection
        self._event_loop = event_loop
        self._scheduler = AsyncIOThreadSafeScheduler(event_loop)
        self._disposables = []
        self._own_service_names = set([])  # TODO: make BehaviorSubject?
        self._ve_properties = {}  # TODO: make BehaviorSubject?

        # If you are confused by publish/refcount, just ignore them.
        # It's an optimization technique with no influence on business logic.
        # The code would run perfectly fine without them

        incoming_msgs = self._observe_messages(connection).pipe(op.publish())

        self._signals = self._init_signals(incoming_msgs)
        self._method_calls = self._init_method_calls(incoming_msgs)

        self._init_bus_item_calls(self._method_calls)

        # TODO: this could be made async...
        # TODO: there is a small chance that a nameownerchanged is fired while we populate the list...
        self._service_name_of_id = {
            self._get_id_of_service(sn): sn
            for sn in self._get_service_names()
        }

        self.observe_service_added, self.observe_service_removed, self.observe_online_services = \
            self._init_remote_services()  # depends on self._signals

        self._disposables.append(incoming_msgs.connect(
            self._scheduler))  # use scheduler (asyncio) to dispatch
Example #20
0
import rx
from rx import operators as ops

source = rx.from_(["Alpha", "Beta", "Gamma", "Delta",
                   "Epsilon"]).pipe(ops.publish())

source.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))
source.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

source.connect()
Example #21
0
from rx import interval, operators as ops
import time

source = interval(1).pipe(ops.publish())

source.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))
source.connect()

# sleep 5 seconds, then add another subscriber
time.sleep(5)
source.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

input("Press any key to exit\n")
Example #22
0
def main(cfg=Configuration):
    pygame.init()
    background = display.set_mode((cfg.S_WIDTH, cfg.S_HEIGHT))
    background.fill(cfg.S_COLOR)
    display.set_caption(cfg.S_CAPTION)
    draw.polygon(background, cfg.B_LINE_COLOR, points=cfg.W_VERTICES, width=1)

    event_loop = EventLoop(cfg.FPS)
    event_stream = create(event_loop).pipe(ops.publish())

    heap = Heap(background)
    floor = Boundary(cfg.FLOOR_DIM, heap)
    right_wall = Boundary(cfg.W_RIGHT_DIM, heap)
    left_wall = Boundary(cfg.W_LEFT_DIM, heap)

    shapes = yield_random([
        shpf for shpn, shpf in tile_shapes.__dict__.items() if 'make' in shpn
    ])
    surfaces = yield_random_surface(
        cfg.ASSET_PATH,
        [path for path in os.listdir('./assets') if 'brick.png' in path],
        brick_dim=cfg.BRICK_DIM)

    tile = Tile(background)
    initial_rect = pygame.Rect((cfg.W_CENTER, cfg.CEIL), cfg.BRICK_DIM)
    bricks = brick_factory(surfaces, shapes, cfg.FPS, initial_rect)
    tile_factory: Observer = TileFactory(tile, bricks)

    layers = sprite.Group()
    layers.add(*[
        Layer((cfg.W_LEFT, cfg.CEIL + i * cfg.BRICK_HEIGHT), cfg.L_DIMS, i)
        for i in range(cfg.DROP_HEIGHT)
    ])

    # convenience variables for collision detection
    left, bottom, right = ((-1, 0), (0, 1), (1, 0))

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == pygame.QUIT))\
        .subscribe(on_next=lambda x: sys.exit())

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == NEWTILE)) \
        .subscribe(tile_factory)

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == NEWTILE)) \
        .subscribe(on_next=handle_fall(tile, cfg.FALL_SPEED))

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == NEWTILE),
        ops.map(lambda x: layers),
        ops.map(heap.did_collide((0, 0))),
        ops.map(reduce_dictionary_by_value(
            lambda arr: len(arr) == cfg.L_NUM_BRICKS)),
        ops.filter(lambda x: bool(x)),
        ops.map(lambda x: (x, heap))) \
        .subscribe(on_next=handle_remove_bricks)

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == pygame.KEYDOWN),
        ops.filter(lambda x: x.event.key == pygame.K_s)) \
        .subscribe(on_next=handle_fall(tile, cfg.DIVE_SPEED))

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == pygame.KEYDOWN),
        ops.filter(lambda x: x.event.key == pygame.K_a),
        ops.map(lambda x: tile),
        ops.filter(lambda x: not bool(heap.did_collide(left)(x))))\
        .subscribe(on_next=handle_move(-cfg.HORIZONTAL_SPEED))

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == pygame.KEYDOWN),
        ops.filter(lambda x: x.event.key == pygame.K_d),
        ops.map(lambda x: tile),
        ops.filter(lambda x: not bool(heap.did_collide(right)(x))))\
        .subscribe(on_next=handle_move(cfg.HORIZONTAL_SPEED))

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == pygame.KEYDOWN),
        ops.filter(lambda x: x.event.key == pygame.K_SPACE))\
        .subscribe(on_next=handle_rotation((cfg.W_LEFT, cfg.CEIL), cfg.BRICK_WIDTH, tile, heap))

    event_stream.pipe(
        ops.filter(lambda x: x.event.type == pygame.KEYUP)) \
        .subscribe(on_next=handle_fall(tile, cfg.FALL_SPEED))

    event_stream.pipe(
        ops.map(lambda x: tile),
        ops.filter(lambda x: bool(heap.did_collide(bottom)(x)))) \
        .subscribe(on_next=handle_landing(heap))

    event_stream.pipe(
        ops.map(lambda x: tile),
        ops.filter(lambda x: bool(heap.did_collide(right)(x))),
        ops.filter(lambda x: x.v_x > 0)) \
        .subscribe(on_next=handle_stop)

    event_stream.pipe(
        ops.map(lambda x: tile),
        ops.filter(lambda x: bool(heap.did_collide(left)(x))),
        ops.filter(lambda x: x.v_x < 0)) \
        .subscribe(on_next=handle_stop)

    event_stream.pipe(
        ops.map(lambda x: x.frame)) \
        .subscribe(on_next=lambda x: [actor.update(x) for actor in (tile, heap)])

    event_stream.subscribe(on_next=print)

    event.post(event.Event(NEWTILE))
    event_stream.connect()
Example #23
0
def row_to_dict(row):
    return {
        'id': int(row[0]),
        'filename': row[1],
        'lines': int(row[2]),
        'size': int(row[3])
    }


files = ['test.csv', 'test2.csv']

source = rx.from_([rx.from_(read_csv(filename))
                   for filename in files]).pipe(ops.merge_all(),
                                                ops.map(row_to_dict))

published = source.pipe(ops.publish())


def print_row(row):
    print(
        'File: {filename} has {lines} lines and its size is {size} kb'.format(
            **row))


def print_group(group):
    return group.subscribe(print_row)


# 두 곳에서 subscribe 한다. #1
maximum = published.pipe(ops.max(lambda a, b: a['lines'] - b['lines']))
maximum.subscribe(lambda row: print(
Example #24
0
def main(
    base_dir: Path = "",
    storage_dir: Path = "",
    filter_regex: str = r".*(?:jpg|JPG|JPEG|jpeg)$",
    copy_only: bool = False,
    dry_run: bool = False,
) -> None:
    """Organise image files from one location to another.

    This application allows you to specify a base directory from which to
    recursively search for image files, and to organise those files, based on
    the date they were taken,into a collection of year and month folders.  The
    application will respect albums which already exist based on the presence
    of an album name.

    By setting the --copy-only flag, this application will copy, rather than
    the default move, files when organising them.

    Arguments:
        base_dir: The location from which the application should search for
            image files.

        storage_dir: The location from which the application should create the
            archive of organised files.

        filter_regex: The python Regular Expression used to select files to
            operate on.

        copy_only: A flag to request that we make copies of files, rather than
            moving them.

        dry_run: A flag to print proposed changes only, don't actually do
            anything.

    """
    operation_complete = Event()
    operation_failed = Event()

    if not storage_dir:
        storage_dir = base_dir

    worker_pool = ThreadPoolScheduler(3)
    failed_results: List[FailedTarget] = []

    # Use this to pull errors out of the stream.
    failed_record_filter = partial(filter_errors,
                                   error_collection=failed_results)

    # Identify targets
    file_listing_shared = get_files(base_dir, filter_regex, worker_pool).pipe(
        operators.filter(failed_record_filter),
        operators.publish(),
    )

    # Load targets from disk
    loaded_files = load_file_content(file_listing_shared).pipe(
        operators.filter(failed_record_filter), )

    enriched_files = loaded_files.pipe(
        generate_file_metadata,
        operators.filter(failed_record_filter),
        generate_image_metadata,
        operators.filter(failed_record_filter),
    )
    #  hashed_files = generate_file_metadata(file_listing)
    #  files_with_metadata = generate_image_metadata(hashed_files)

    files_with_move_path = generate_move_path(
        enriched_files,
        storage_dir).pipe(operators.filter(failed_record_filter), )

    if dry_run:
        files_with_move_path.subscribe(
            on_next=dry_run_print,
            on_error=lambda err: handle_error(err, operation_failed),
            on_completed=operation_complete.set,
        )

        file_listing_shared.connect()

        while not any(
            (operation_complete.is_set(), operation_failed.is_set())):
            typer.echo("Waiting for processing to complete.", err=True)
            sleep(1)

        typer.echo(
            f"Encountered {len(failed_results)} Records that failed to process:"
        )
        for fail in failed_results:
            typer.secho(fail, fg=typer.colors.RED)

        typer.echo("Operation completed.")

        return

    moved_files = files_with_move_path.pipe(
        operators.map(
            lambda target: fo.migrate_file_target(target, copy_only)),
        operators.filter(failed_record_filter),
        operators.map(fo.clear_empty_directories),
        operators.filter(failed_record_filter),
    )

    moved_files.subscribe(
        on_next=lambda target: typer.echo(
            f"{'Copied' if copy_only else 'Moved'} "
            f"{target.file_path} to {target.target_move_path}.", ),
        on_error=lambda err: handle_error(err, operation_complete),
        on_completed=operation_complete.set,
    )

    file_listing_shared.connect()

    while not any((operation_complete.is_set(), operation_failed.is_set())):
        typer.echo("Waiting for processing to complete.", err=True)
        sleep(1)

    typer.echo("Operation completed.")

    typer.echo(
        f"Encountered {len(failed_results)} Records that failed to process:")
    for fail in failed_results:
        typer.secho(fail, fg=typer.colors.RED)
Example #25
0
import rx
from rx import operators as ops

numbers = rx.from_iterable(i for i in range(10)).pipe(ops.publish(), )

numbers.pipe(ops.filter(lambda x: x % 2 == 0)).subscribe(
    lambda x: print(f"Even:   {x}"))

numbers.pipe(ops.filter(lambda x: x % 2 == 1)).subscribe(
    lambda x: print(f"Odd:    {x}"))

numbers.pipe(ops.filter(lambda x: x % 3 == 0)).subscribe(
    lambda x: print(f"Triple: {x}"))

numbers.subscribe(lambda x: print("---------"))
numbers.connect()
Example #26
0
import rx
import rx.operators as ops

numbers = rx.from_([1, 2, 3])
pub_numbers = numbers.pipe(ops.publish())

pub_numbers.subscribe(
    on_next=lambda i: print("item: {}".format(i)),
    on_error=lambda e: print("error: {}".format(e)),
    on_completed=lambda: print("completed")
)

pub_numbers.subscribe(
    on_next=lambda i: print("item: {}".format(i)),
    on_error=lambda e: print("error: {}".format(e)),
    on_completed=lambda: print("completed")
)

pub_numbers.connect()
Example #27
0
 def action0(scheduler, state):
     ys[0] = xs.pipe(ops.publish())
Example #28
0
import rx
from rx import operators as ops
from random import randint


three_emissions = rx.range(0, 3)

three_emissions_ints = three_emissions.pipe(
    ops.map(lambda i: randint(1, 10000)),
    ops.publish()
)

three_emissions_ints.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))
three_emissions_ints.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

three_emissions_ints.connect()

# publish 이전에 랜덤 숫자를 생성하고 두 Subscriber 에 공유 한다.
Example #29
0
 def action0(scheduler, state):
     ys[0] = xs.pipe(ops.publish())
Example #30
0
import rx
from rx import operators as ops
import time


source = rx.interval(1.0).pipe(ops.publish())

source.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))
source.connect()

# sleep 5 seconds, then add another subscriber
time.sleep(5)
source.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

input("Press any key to exit\n")

Example #31
0
import rx
from rx import operators as ops
from random import randint


three_emissions = rx.range(0, 3).pipe(ops.publish())

three_emissions_ints = three_emissions.pipe(
    ops.map(lambda i: randint(1, 10000))
)

three_emissions_ints.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))
three_emissions_ints.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

three_emissions.connect()

# Subscriber 1 과 2 가 다른 랜덤 숫자를 받는다.
# 같은 랜덤 숫자를 받을 수 없는가?
Example #32
0
    checkers_observable = rx.empty()

    if settings.INSTANCE_ON_AWS:
        logger.info(f"instance_type: {get_instance_type()}"
                    )  # Assumes being run on AWS EC2 instance
        if args.is_spot_instance or settings.AWS_ENABLE_SPOTINSTANCE_STATE_LOGGING:
            logger.info(f"Start spot_instance_observable monitoring...")
            spot_instance_observable = spot_instance_check_observable()
            checkers_observable = checkers_observable.pipe(
                ops.merge(spot_instance_observable))
    elif args.is_spot_instance or settings.AWS_ENABLE_SPOTINSTANCE_STATE_LOGGING:
        logger.warning(
            f'"--spot-instance" flag or AWS_ENABLE_SPOTINSTANCE_STATE_LOGGING envar given, '
            f"but INSTANCE_ON_AWS == False, logging NOT performed!")

    checkers_observable.pipe(ops.publish()).connect(scheduler=scheduler)
    checkers_observable.subscribe(CheckersObserver())

    input_values = None
    if args.inputs:
        input_values = [v.strip() for v in args.inputs.split(",")]

    summary = execute_prediction(
        predictor=external_predictor,
        input_ctx_manager=args.input_ctx_manager,
        input_settings=ctxmgr_settings["input_settings"],
        output_ctx_manager=args.output_ctx_manager,
        output_settings=ctxmgr_settings["output_settings"],
        inputs=input_values,
    )
    logger.info(f"execution summary: {summary}")
from rx import interval, operators as ops
import time

source = interval(1).pipe(ops.publish(), ops.ref_count())

source.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))

# sleep 5 seconds, then add another subscriber
time.sleep(5)
source.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

input("Press any key to exit\n")
op.catch()
op.retry()

"""Utility"""
op.delay()
op.materialize()
op.time_interval()
op.timeout()
op.timestamp()

"""Conditional and Boolean"""
op.all()
op.contains()
op.default_if_empty()
op.sequence_equal()
op.skip_until()
op.skip_while()
op.take_until()
op.take_while()

"""Connectable"""
op.publish()
op.ref_count()
op.replay()

"""Combining"""
op.combine_latest()
op.merge()
op.start_with()
op.zip()
Example #35
0
import rx
from rx import operators as ops
import time

source = rx.interval(1.0).pipe(ops.publish(), ops.ref_count())

source.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))

# sleep 5 seconds, then add another subscriber
time.sleep(5)
source.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

input("Press any key to exit\n")