Beispiel #1
0
def _reduce(accumulator: Callable[[Any, Any], Any], seed: Any = NotSet) -> Callable[[Observable], Observable]:
    """Applies an accumulator function over an observable sequence,
    returning the result of the aggregation as a single element in the
    result sequence. The specified seed value is used as the initial
    accumulator value.

    For aggregation behavior with incremental intermediate results, see
    `scan()`.

    Examples:
        >>> res = reduce(lambda acc, x: acc + x)
        >>> res = reduce(lambda acc, x: acc + x, 0)

    Args:
        accumulator: An accumulator function to be
            invoked on each element.
        seed: Optional initial accumulator value.

    Returns:
        An operator function that takes an observable source and returns
        an observable sequence containing a single element with the
        final accumulator value.
    """


    if seed is not NotSet:
        initial = ops.start_with(seed)
        scanner = ops.scan(accumulator, seed=seed)

        return pipe(scanner, initial, ops.last())

    return pipe(ops.scan(accumulator), ops.last())
Beispiel #2
0
def _reduce(accumulator: Callable[[Any, Any], Any],
            seed: Any = NotSet) -> Callable[[Observable], Observable]:
    """Applies an accumulator function over an observable sequence,
    returning the result of the aggregation as a single element in the
    result sequence. The specified seed value is used as the initial
    accumulator value.

    For aggregation behavior with incremental intermediate results, see
    `scan()`.

    Examples:
        >>> res = reduce(lambda acc, x: acc + x)
        >>> res = reduce(lambda acc, x: acc + x, 0)

    Args:
        accumulator: An accumulator function to be
            invoked on each element.
        seed: Optional initial accumulator value.

    Returns:
        An operator function that takes an observable source and returns
        an observable sequence containing a single element with the
        final accumulator value.
    """

    if seed is not NotSet:
        initial = ops.start_with(seed)
        scanner = ops.scan(accumulator, seed=seed)

        return pipe(scanner, initial, ops.last())

    return pipe(ops.scan(accumulator), ops.last())
Beispiel #3
0
    def _train_test_split(source):
        def partition(acc, i):
            if acc is None:
                index = 1
                sampling = sampling_size
            else:
                index = acc[0]
                sampling = acc[1]
            if test_modulus == 0:
                is_test = False
            else:
                is_test = True if index % test_modulus == 0 else False

            sampling -= 1
            if sampling == 0:
                index += 1
                sampling = sampling_size

            return (index, sampling, i, is_test)

        published = source.pipe(ops.publish(), ref_count())

        return [
            published.pipe(
                ops.scan(partition, seed=None),
                ops.filter(lambda i: i[3] is False),
                ops.map(lambda i: i[2]),
            ),
            published.pipe(
                ops.scan(partition, seed=None),
                ops.filter(lambda i: i[3] is True),
                ops.map(lambda i: i[2]),
            )
        ]
Beispiel #4
0
 def _scan(source):
     if isinstance(source, rs.MuxObservable):
         return scan_mux(accumulator, seed, reduce)(source)
     else:
         _seed = seed() if callable(seed) else seed
         if reduce is False:
             return rx.pipe(
                 ops.scan(accumulator, _seed),
                 ops.default_if_empty(default_value=_seed),
             )(source)
         else:
             return rx.pipe(
                 ops.scan(accumulator, _seed),
                 ops.last_or_default(default_value=_seed),
             )(source)
Beispiel #5
0
    def get_pipeline(seed: int, name: str, print_output=False) -> rx.pipe:
        """Returns an rx pipeline that parses the raw output from MCERD
        into dictionaries.

        Each dictionary contains the same keys. If certain value cannot be
        parsed from the output (i.e. the raw line does not contain it),
        either the value from the previous dictionary is carried over or a
        default value is used.

        Args:
            seed: seed used in the MCERD process
            name: name of the process (usually the name of the recoil element)
            print_output: whether output is printed to console
        """
        # TODO add handling for fatal error messages
        return rx.pipe(
            ops.map(lambda x: x.strip()),
            MCERD._conditional_printer(
                print_output, f"simulation process with seed {seed}."),
            observing.reduce_while(
                reducer=str_reducer,
                start_from=lambda x: x == MCERD._INIT_STARTS,
                end_at=lambda x: x == MCERD._INIT_ENDS),
            observing.reduce_while(
                reducer=str_reducer,
                start_from=lambda x: x.startswith(MCERD._FINAL_STARTS),
                end_at=lambda x: x.startswith(MCERD._FINAL_ENDS)),
            ops.scan(lambda acc, x: {
                MCERD.PRESIM:
                acc[MCERD.PRESIM] and x != MCERD.PRESIM_FINISHED,
                **parse_raw_output(x,
                                   end_at=lambda y: y.startswith(MCERD._FINAL_STARTS))
            },
                     seed={MCERD.PRESIM: True}),
            ops.scan(lambda acc, x: dict_accumulator(acc,
                                                     x,
                                                     default={
                                                         MCERD.SEED: seed,
                                                         MCERD.NAME: name,
                                                         MCERD.MSG: "",
                                                         MCERD.IS_RUNNING: True
                                                     }),
                     seed={
                         MCERD.CALCULATED: 0,
                         MCERD.TOTAL: 0,
                         MCERD.PERCENTAGE: 0
                     }),
            ops.take_while(lambda x: x[MCERD.IS_RUNNING], inclusive=True))
Beispiel #6
0
    def _export_year(geometry, year_start, year_end, export_description,
                     year_dir):
        stack = _create_stack(geometry, year_start, year_end)
        initial_progress = of({
            'exported': 0,
            'stack_bytes': 0,
            'dates_bytes': 0,
            'downloaded': 0,
            'processed': 0
        })

        def aggregate_downloaded_bytes(p):
            return {
                'exported': p['exported'],
                'downloaded': p['downloaded'],
                'downloaded_bytes': p['stack_bytes'] + p['dates_bytes'],
                'processed': p['processed']
            }

        return concat(
            initial_progress,
            merge(
                _export_and_download_stack(stack, export_description,
                                           year_dir),
                _export_and_download_dates(stack, export_description,
                                           year_dir)), _process_year(year_dir),
            of({'processed': 1})).pipe(scan(lambda acc, p: {
                **acc,
                **p
            }, {}), map(aggregate_downloaded_bytes))
Beispiel #7
0
    def __init__(self, error_handler: ErrorHandler) -> None:
        if error_handler is None:
            raise ValueError("Argument 'error_handler' is required.")

        super().__init__()

        self._error_handler = error_handler

        self._added_window = Subject()
        self._removed_window = Subject()

        changed_window = rx.merge(
            self._added_window.pipe(ops.map(lambda v: (v, True))),
            self._removed_window.pipe(ops.map(lambda v: (v, False))))

        def on_window_change(windows: Tuple[Window, ...], event: Tuple[Window, bool]):
            (window, added) = event

            if added and window not in windows:
                return windows + (window,)
            elif not added and window in windows:
                return tuple(c for c in windows if c is not window)

        # noinspection PyTypeChecker
        self.windows = changed_window.pipe(
            ops.scan(on_window_change, ()), ops.start_with(()), ops.distinct_until_changed())
Beispiel #8
0
    def __init__(
        self,
        add=None,
        remove=None,
        reindex=None,
        clear=None,
        edit_at_index=None,
        initial_state=None,
    ):
        self.add = Subject() if add is None else add
        self.remove = Subject() if remove is None else remove
        self.reindex = Subject() if reindex is None else reindex
        self.clear = Subject() if clear is None else clear
        self.edit_at_index = Subject(
        ) if edit_at_index is None else edit_at_index

        self.initial_state = pr.v() if initial_state is None else initial_state

        self.history = rx.merge(self.add, self.remove, self.reindex,
                                self.clear, self.edit_at_index)

        self._values = self.history.pipe(
            ops.scan(self.reduce_list_state, self.initial_state))
        self.values = self._values
        self.values_with_history = self._values.pipe(
            ops.zip(self.history),
            # ops.replay(buffer_size=1)
        )
Beispiel #9
0
    def average(source: Observable) -> Observable:
        """Partially applied average operator.

        Computes the average of an observable sequence of values that
        are in the sequence or obtained by invoking a transform
        function on each element of the input sequence if present.

        Examples:
            >>> res = average(source)

        Args:
            source: Source observable to average.

        Returns:
            An observable sequence containing a single element with the
            average of the sequence of values.
        """

        if key_mapper:
            return source.pipe(operators.map(key_mapper), operators.average())

        def accumulator(prev, cur):
            return AverageValue(sum=prev.sum + cur, count=prev.count + 1)

        def mapper(s):
            if s.count == 0:
                raise Exception('The input sequence was empty')

            return s.sum / float(s.count)

        seed = AverageValue(sum=0, count=0)
        return source.pipe(operators.scan(accumulator, seed), operators.last(),
                           operators.map(mapper))
Beispiel #10
0
        def create():
            def func(acc, x):
                if acc is None:
                    acc = 0
                return acc + x

            return xs.pipe(_.scan(func))
Beispiel #11
0
        def create():
            def func(acc, x):
                if acc is None:
                    acc = 0

                return acc + x
            return xs.pipe(_.scan(func))
Beispiel #12
0
def accumulate_and_clear(values: Observable, clear: Observable) -> Observable:
    return merge_either(values, clear.pipe(ops.map(lambda _: True))).pipe(
        ops.scan(
            lambda collected, either: []
            if either[1] else collected + [either[0]],
            [],
        ))
Beispiel #13
0
    def __init__(
        self,
        columns,
        add_row=None,
        remove_row=None,
        edit_row_at_index=None,
        clear=None,
        initial_rows=None,
    ):
        self.add_row = Subject() if add_row is None else add_row
        self.remove_row = Subject() if remove_row is None else remove_row
        self.clear = Subject() if clear is None else clear
        self.edit_row_at_index = Subject(
        ) if edit_row_at_index is None else edit_row_at_index

        self.initial_rows = pr.v() if initial_rows is None else initial_rows
        self.initial_columns = pr.v(*columns)

        self.history = rx.merge(self.add_row, self.remove_row, self.clear,
                                self.edit_row_at_index)
        self._values_with_columns = self.history.pipe(
            ops.scan(
                self.reduce_table_state,
                (self.initial_rows, self.initial_columns),
            ))

        self.values = self._values_with_columns.pipe(ops.map(lambda x: x[0]))
        self.columns = self._values_with_columns.pipe(ops.map(lambda x: x[1]))
        self.values_with_history = self.values.pipe(ops.zip(self.history))
Beispiel #14
0
def reactive_frame(initial: Optional[pd.DataFrame] = None,
                   mutate=False) -> Tuple[Subject, rx.Observable]:
    """
    Creates a pair of observables, a subject that allows generating a stream of data, and
    a DataFrame accumulator computed from the data. This can operate in either mutable (where the
    data frame is modified rather than concatenated with each new point) or immutable (default) operation.

    In concurrent applications, you should be wary of setting `mutable=True`, unless you are okay
    with the data being updated behind your back. That being said, this option is more performant as pandas
    does not need to make a new copy of the frame with every push of data onto the stream.

    Args:
        initial (pd.DataFrame): Initial data frame which can be used to populate the types and column names.
        mutate (bool): Whether to modify or concat (make new copy) new data onto the accumulated DataFrame.

    Returns:
        A tuple of an rx.Subject and an rx.Observable providing the raw value
        and accumulated value streams respectively.
    """
    subject = Subject()

    def append_to_frame(old_frame: pd.DataFrame, new_item: Dict[str, any]):
        if mutate:
            old_frame.loc[len(old_frame)] = new_item
            return old_frame

        return pd.concat([old_frame, pd.DataFrame([new_item])],
                         ignore_index=True)

    accumulated = subject.pipe(ops.scan(append_to_frame, initial))

    return subject, accumulated
Beispiel #15
0
def _flag_start_end(start_condition, end_condition):
    """Helper operator that adds two boolean flags to the item depending on
    if the conditiotions are met.
    """
    return ops.scan(
        lambda acc, x:
        (x, start_condition(x) or acc[1] and not acc[2], end_condition(x)),
        seed=(None, False))
Beispiel #16
0
def download_directory(file: dict,
                       destination: str,
                       matching: str = None,
                       delete_after_download: bool = False) -> Observable:
    destination = os.path.abspath(destination)

    def get_destination(f):
        relative_path = f['path'][len(file['path']):]
        next_destination = '{}{}{}'.format(
            destination, '' if destination[-1] == '/' else '/', relative_path)
        return next_destination

    def initial_stats(files):
        return {
            'progress': 0,
            'total_files': len(files),
            'total_bytes': sum([int(f.get('size', 0)) for f in files]),
            'downloaded_files': 0,
            'downloaded_bytes': 0
        }

    def update_stats(stats, download):
        downloaded_files = stats['downloaded_files'] + (
            0 if download['progress'] < 1 else 1)
        downloaded_bytes = stats['downloaded_bytes'] + download[
            'downloaded_bytes']
        progress = downloaded_bytes / stats['total_bytes']
        return {
            'progress': progress,
            'total_files': stats['total_files'],
            'total_bytes': stats['total_bytes'],
            'downloaded_files': downloaded_files,
            'downloaded_bytes': downloaded_bytes
        }

    def is_file_matching(f):
        return not matching or fnmatch.fnmatch(f['path'], matching)

    def delete_downloaded(downloaded):
        if delete_after_download:
            return delete_file(downloaded['file']).pipe(
                map(lambda _: downloaded))
        else:
            return of(downloaded)

    def filter_files(files):
        return [f for f in files if not is_folder(f) and is_file_matching(f)]

    if is_folder(file):
        return list_folder_recursively(file).pipe(
            map(lambda files: filter_files(files)),
            flat_map(lambda files: of(True).pipe(
                flat_map(lambda _: of(*files).pipe(
                    flat_map(lambda f: download_file(f, get_destination(f))),
                    flat_map(delete_downloaded))),
                scan(update_stats, initial_stats(files)))))
    else:
        return download_file(file, destination)
Beispiel #17
0
 def _pid(process):
     return process.pipe(
         # trace_observable("pid"),
         ops.with_latest_from(setpoint),
         # trace_observable("pid2"),
         ops.scan(_pid_step, seed=None),
         # trace_observable("pid3"),
         ops.map(lambda i: i.control_value),
     )
Beispiel #18
0
    def start_simulation(self):
        """Calls ElementSimulation's start method.
        """
        # Ask the user if they want to write old simulation results over (if
        # they exist), or continue
        status = self.element_simulation.get_current_status()

        if status[ElementSimulation.STATE] == SimulationState.DONE:
            reply = QtWidgets.QMessageBox.question(
                self, "Confirmation",
                "Do you want to continue this simulation?\n\n"
                "If you do, old simulation results will be preserved.\n"
                "Otherwise they will be deleted.", QtWidgets.QMessageBox.Yes
                | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel,
                QtWidgets.QMessageBox.Cancel)
            if reply == QtWidgets.QMessageBox.Cancel:
                return  # If clicked Cancel don't start simulation
            elif reply == QtWidgets.QMessageBox.No:
                use_old_erd_files = False
            else:
                use_old_erd_files = True
        elif status[ElementSimulation.STATE] == SimulationState.NOTRUN:
            use_old_erd_files = False
        else:
            self.mcerd_error = "Simulation currently running. Cannot start a " \
                               "new one."
            self.mcerd_error_lbl.show()
            return

        self.mcerd_error_lbl.hide()

        # Lock full edit
        self.element_simulation.lock_edit()
        if self.recoil_dist_widget.current_element_simulation is \
           self.element_simulation:
            self.recoil_dist_widget.full_edit_on = False
            self.recoil_dist_widget.update_plot()

        self.finished_processes = 0, self.process_count
        self.remove_progress_bars()

        observable = self.element_simulation.start(
            self.process_count,
            use_old_erd_files=use_old_erd_files,
            ion_division=self._ion_division)
        if observable is not None:
            self.__unsub = observable.pipe(
                ops.scan(lambda acc, x: {
                    **x, "started":
                    x[MCERD.IS_RUNNING] and not acc["started"]
                },
                         seed={"started": False})).subscribe(self)
        else:
            self.mcerd_error = "Could not start simulation. Check that there " \
                               "is no other simulation running for this " \
                               "recoil element"
            self.mcerd_error_lbl.show()
Beispiel #19
0
    def _export_geometries():
        def aggregate_progress(progresses, count):
            p = _sum_dicts(progresses.values(), excluded_keys=['geometry'])
            exported = round(100 * p['exported'] / count)
            downloaded = round(100 * p['downloaded'] / count)
            downloaded_bytes = format_bytes(p['downloaded_bytes'])
            processed = round(100 * p['processed'] / count)
            return progress(
                default_message='Exported {}%, Downloaded {}% ({}), Processed {}%'.format(
                    exported, downloaded, downloaded_bytes, processed
                ),
                message_key='tasks.retrieve.time_series_to_sepal.progress',
                exported=exported,
                downloaded=downloaded,
                downloaded_bytes=downloaded_bytes,
                processed=processed
            )

        features_collection = _to_features_collection(region)

        def export_geometry(geometry, i, geometry_count):
            geometry_description = str(i + 1).zfill(len(str(geometry_count)))
            return defer(
                lambda _: _export_geometry(
                    geometry,
                    geometry_description=geometry_description
                )
            )

        return concat(
            progress(
                default_message='Tiling AOI...',
                message_key='tasks.retrieve.time_series_to_sepal.tiling'
            ),
            _extract_feature_indexes(features_collection).pipe(
                flat_map(
                    lambda feature_indexes: _to_geometries(features_collection, feature_indexes).pipe(
                        flat_map(
                            lambda geometries: concat(
                                *[
                                    export_geometry(geometry, i, len(feature_indexes))
                                    for i, geometry in enumerate(geometries)
                                ]
                            )
                        ),
                        scan(lambda acc, p: {**acc, p['geometry']: p}, {}),
                        flat_map(lambda progresses: aggregate_progress(
                            progresses,
                            count=len(feature_indexes) * len(year_ranges)
                        ))
                    )
                )
            )
        )
Beispiel #20
0
def reduce_while(reducer, start_from, end_at):
    """Reduces items into a single item while a condition is met.

    Args:
        reducer: function that takes as an input the last output of itself and
            an item and returns a combined result.
        start_from: function that takes an item and returns a boolean
        end_at: function that takes an item and returns a boolean
    """
    return ops.pipe(
        _flag_start_end(start_from, end_at),
        ops.scan(lambda acc, x: (reducer(acc[0], x[0])
                                 if acc[1] else x[0], x[1] and not x[2])),
        ops.filter(lambda x: not x[1]), ops.map(lambda x: x[0]))
Beispiel #21
0
def data_framer(window_length=None, auto_timestamp=False):
    """
    Accept incoming pandas DataFrame, append into the internal buffer and feed next.
    :param auto_timestamp: automatically set the incoming DataFrame's timestamp to the current time.
    :param window_length:
    :return:
    """
    def reducer(acc: pd.DataFrame, new: pd.DataFrame):
        if auto_timestamp:
            new["timestamp"] = datetime.now()
        updated = acc.append(new)
        if window_length:
            return updated[-window_length:]
        else:
            return updated

    return rx.pipe(operators.scan(reducer, pd.DataFrame()))
Beispiel #22
0
    def _export_year(geometry, year_start, year_end, export_description,
                     year_dir):
        stack = _create_stack(geometry, year_start, year_end)
        ee.InitializeThread(credentials)
        if not stack.bandNames().size().getInfo():
            logging.info('No data between {} and {}'.format(
                year_start, year_end))
            return of({
                'exported': 1,
                'downloaded': 1,
                'downloaded_bytes': 0,
                'processed': 1
            })
        initial_progress = of({
            'exported': 0,
            'stack_bytes': 0,
            'dates_bytes': 0,
            'downloaded': 0,
            'processed': 0
        })

        def aggregate_downloaded_bytes(p):
            return {
                'exported': p['exported'],
                'downloaded': p['downloaded'],
                'downloaded_bytes': p['stack_bytes'] + p['dates_bytes'],
                'processed': p['processed']
            }

        return concat(
            initial_progress,
            merge(
                _export_and_download_stack(stack, export_description,
                                           year_dir),
                _export_and_download_dates(stack, export_description,
                                           year_dir)), _process_year(year_dir),
            of({'processed': 1})).pipe(scan(lambda acc, p: {
                **acc,
                **p
            }, {}), map(aggregate_downloaded_bytes))
Beispiel #23
0
    def average(source: Observable) -> Observable:
        """Partially applied average operator.

        Computes the average of an observable sequence of values that
        are in the sequence or obtained by invoking a transform
        function on each element of the input sequence if present.

        Examples:
            >>> res = average(source)

        Args:
            source: Source observable to average.

        Returns:
            An observable sequence containing a single element with the
            average of the sequence of values.
        """

        if key_mapper:
            return source.pipe(
                operators.map(key_mapper),
                operators.average()
            )

        def accumulator(prev, cur):
            return AverageValue(sum=prev.sum+cur, count=prev.count+1)

        def mapper(s):
            if s.count == 0:
                raise Exception('The input sequence was empty')

            return s.sum / float(s.count)

        seed = AverageValue(sum=0, count=0)
        return source.pipe(
            operators.scan(accumulator, seed),
            operators.last(),
            operators.map(mapper)
        )
Beispiel #24
0
def test_tuple_with_previous_using_scan():
    rx.from_iterable(range(10)).pipe(ops.start_with((-1, -1)),
                                     ops.scan(lambda tup, y: (y, tup[0])),
                                     ops.skip(2)).subscribe(lambda x: print(x))
Beispiel #25
0
import rxbp
import rx

from rxbp import op
from rx import operators as rxop

# number of samples that are sent through the test stream
n_samples = int(1e6)

# batch size used only in rxbp
batch_size = 100

# rxbp test stream
start = time.time()
rxbp.range(n_samples, batch_size=batch_size).pipe(
    op.map(lambda _: 2 * random() - 1),
    op.scan(lambda acc, v: acc + v, initial=0),
).subscribe()
print('to scan over {} it takes rxbp = {}s'.format(n_samples,
                                                   time.time() - start))

# rx test stream
start = time.time()
rx.from_(range(n_samples)).pipe(
    rxop.map(lambda _: 2 * random() - 1),
    rxop.scan(lambda acc, v: acc + v, 0),
).subscribe()
print('to scan over {} it takes rx = {}s'.format(n_samples,
                                                 time.time() - start))
Beispiel #26
0
def create_store(initial_state: Optional[ReduxRootState] = None) -> ReduxRootStore:  # pylint: disable=too-many-locals
    """ Constructs a new store that can handle feature modules.

        Args:
            initial_state: optional initial state of the store, will typically be the empty dict

        Returns:
            An implementation of the store
    """

    # current reducer
    reducer: Reducer = identity_reducer

    def replace_reducer(new_reducer: Reducer) -> None:
        """ Callback that replaces the current reducer

            Args:
                new_reducer: the new reducer

        """
        nonlocal reducer
        reducer = new_reducer

    # subject used to dispatch actions
    actions = Subject()

    # the shared action observable
    actions_ = actions.pipe(op.share())

    _dispatch = actions.on_next

    # our current state
    state = BehaviorSubject(initial_state if initial_state else {})

    # shutdown trigger
    done_ = Subject()

    # The set of known modules, to avoid cycles and duplicate registration
    modules: MutableMapping[str, ReduxFeatureModule] = {}

    # Sequence of added modules
    module_subject = Subject()

    # Subscribe to the resolved modules
    module_ = module_subject.pipe(op.distinct(select_id), op.share())

    # Build the reducers
    reducer_ = module_.pipe(
        op.filter(has_reducer),
        op.scan(reduce_reducers, {}),
        op.map(combine_reducers),
        op.map(replace_reducer),
    )

    # Build the epic
    epic_ = module_.pipe(
        op.map(select_epic),
        op.filter(bool),
        op.map(normalize_epic)
    )

    # Root epic that combines all of the incoming epics
    def root_epic(
        action_: Observable, state_: Observable
    ) -> Observable:
        """ Implementation of the root epic. If listens for new epics
            to come in and automatically subscribes.

            Args:
                action_: the action observable
                state_: the state observable

            Returns
                The observable of resulting actions
        """
        return epic_.pipe(
            op.flat_map(run_epic(action_, state_)),
            op.map(_dispatch)
        )

    # notifications about new feature states
    new_module_ = module_.pipe(
        op.map(select_id),
        op.map(create_action(INIT_ACTION)),
        op.map(_dispatch),
    )

    def _add_feature_module(module: ReduxFeatureModule):
        """ Registers a new feature module

            Args:
                module: the new feature module

        """
        module_id = select_id(module)
        if not module_id in modules:
            modules[module_id] = module
            for dep in select_dependencies(module):
                _add_feature_module(dep)
            module_subject.on_next(module)

    # all state
    internal_ = merge(root_epic(actions_, state), reducer_, new_module_).pipe(
        op.ignore_elements()
    )

    def _as_observable() -> Observable:
        """ Returns the state as an observable

            Returns:
                the observable
        """
        return state

    def _on_completed() -> None:
        """ Triggers the done event """
        done_.on_next(None)

    merge(actions_, internal_).pipe(
        op.map(lambda action: reducer(state.value, action)),
        op.take_until(done_),
    ).subscribe(state, logger.error)

    return ReduxRootStore(
        _as_observable, _dispatch, _add_feature_module, _dispatch, _on_completed
    )
rx.timer()

"""Mathematical"""
op.average()
op.concat()
op.count()
op.max()
op.min()
op.reduce()
op.sum()

"""Transformation"""
op.buffer()
op.group_by()
op.map()
op.scan()
# ...

"""Filtering"""
op.debounce()
op.distinct()
op.filter()
op.element_at()
op.first()
op.ignore_elements()
op.last()
op.skip()
op.skip_last()
op.take()
op.take_last()
# ...
Beispiel #28
0
 def create():
     return xs.pipe(_.scan(lambda acc, x: acc + x, seed=seed))
Beispiel #29
0
 def create():
     return xs.pipe(_.scan(seed, lambda acc, x: acc + x))
Beispiel #30
0
# Example of summing all the values in a data stream
import rx
from rx import operators as op

# Set up a source
observable = rx.from_list([2, 3, 5, 7])
# Apply sum function to initial source
observable2 = observable.pipe(op.sum())
# Subscribe to the result generated by sum
observable2.subscribe(lambda v: print(v))

print('-' * 20)

# Rolling or incremental sum
rx.from_([2, 3, 5, 7]).pipe(
    op.scan(lambda subtotal, i: subtotal + i)).subscribe(lambda v: print(v))
Beispiel #31
0
 def create():
     return never().pipe(_.scan(lambda acc, x: acc + x))
Beispiel #32
0
 def create():
     return xs.pipe(_.scan(lambda acc, x: acc + x))
Beispiel #33
0
 def create():
     return never().pipe(_.scan(lambda acc, x: acc + x))
Beispiel #34
0
 def create():
     return xs.pipe(_.scan(lambda acc, x: acc + x, seed=seed))
Beispiel #35
0
 def create():
     def func(acc, x):
         return acc + x
     return never().pipe(_.scan(seed=seed, accumulator=func))
Beispiel #36
0
        def create():
            def func(acc, x):
                return acc + x

            return never().pipe(_.scan(seed=seed, accumulator=func))
Beispiel #37
0
"""this operator will apply an accumulator function to the values coming
from the source observable and return an observable with new values.

:parameter
    accumulator_func: This function is applied to all the values
    from the source observable.

    seed: [optional] The initial value to be used inside in accumulator_func.

:return
    This operator will return an observable that will have new values based
    on the accumulator function applied on each value of the source observable
"""

from rx import of, operators as op

acc_x = lambda acc, x: acc + x
of(1, 2, 3, 4, 5, 6, 7, 8, 90).pipe(op.scan(
    acc_x, 0)).subscribe(lambda x: print("this element is {}".format(x)))

# result
# this element is 1
# this element is 3
# this element is 6
# this element is 10
# this element is 15
# this element is 21
# this element is 28
# this
# element is 126