Пример #1
0
    def __init__(self, context: BlenderContext) -> None:
        super().__init__(context)

        self._position = Subject()
        self._activeInputs = Subject()

        # noinspection PyTypeChecker
        self.position = self._position.pipe(
            ops.distinct_until_changed(),
            ops.map(lambda v: tuple(
                p * s for p, s in zip(v, context.window_size.tuple))),
            ops.map(Point.from_tuple), ops.share())

        codes = {
            MouseButton.LEFT: bge.events.LEFTMOUSE,
            MouseButton.MIDDLE: bge.events.MIDDLEMOUSE,
            MouseButton.RIGHT: bge.events.RIGHTMOUSE
        }

        def pressed(e: SCA_InputEvent) -> bool:
            return KX_INPUT_ACTIVE in e.status or KX_INPUT_JUST_ACTIVATED in e.status

        def value_for(button: MouseButton) -> Observable:
            code = codes[button]

            return self._activeInputs.pipe(
                ops.start_with({}),
                ops.map(lambda i: code in i and pressed(i[code])),
                ops.map(lambda v: button if v else 0))

        # noinspection PyTypeChecker
        self.buttons = rx.combine_latest(
            *[value_for(b) for b in MouseButton]).pipe(
                ops.map(lambda v: reduce(lambda a, b: a | b, v)),
                ops.distinct_until_changed(), ops.share())
def parse_config(file_data):
    return file_data.pipe(
        ops.map(lambda i: json.loads(
            i,
            object_hook=lambda d: namedtuple('x', d.keys())(*d.values()))),
        ops.share(),
    )
Пример #3
0
def model_publisher(scheduler, sources):
    file_source = sources.file.response.pipe(ops.share())

    # kafka driver bootstrap. fixme
    kafka_source = sources.kafka.response.pipe(
        ops.do_action(print),
        ops.replay(),
        ops.ref_count(),
    )
    kafka_source.subscribe()

    config, config_read_request, http_request = read_config_from_args(
        sources.argv.argv,
        file_source,
        sources.http.response,
        scheduler=scheduler)

    config = config.pipe(ops.first())

    kafka_request = config.pipe(ops.map(lambda c: create_model_topics(c)), )

    return ModelPublisherSink(
        file=file.Sink(request=rx.merge(config_read_request)),
        http=http.Sink(request=http_request),
        kafka=kafka.Sink(request=kafka_request),
    )
def rmux_client(sources):
    response = sources.tcp_client.response.pipe(ops.share())
    tcp_connect = rx.just(tcp_client.Connect(
        host='127.0.0.1', port='8080'
    ))

    create_observable = response.pipe(
        ops.flat_map(lambda connection: 
            rx.just({'what': 'subscribe', 'id':42, 'name': '1234'}).pipe(
                ops.map(lambda i: json.dumps(i)),
                frame,
                ops.map(lambda j: tcp_client.Write(id=connection.id, data=j.encode()))
        ))
    )

    console = response.pipe(
        ops.flat_map(lambda connection: connection.observable.pipe(
            ops.map(lambda i: i.data.decode('utf-8')),
            unframe,
            ops.map(lambda i: json.loads(i)),
            ops.group_by(lambda i: i['id']),
            ops.flat_map(lambda subscription: subscription.pipe(
                ops.map(notification),
                ops.dematerialize(),
            ))
        )),
        ops.map(lambda i: "item: {}\n".format(i))
    )

    tcp_sink = rx.merge(tcp_connect, create_observable)

    return Sink(
        tcp_client=tcp_client.Sink(request=tcp_sink),
        stdout=stdout.Sink(data=console),
    )
Пример #5
0
  def start(self) -> rx.Observable:
    self._frames = rx \
      .interval(1 / self._fps, scheduler=rx.scheduler.TimeoutScheduler()) \
      .pipe(
        ops.share(),
      )

    return self._frames
Пример #6
0
def parse_config(config_data):
    ''' takes a stream with the content of the configuration file as input
    and returns a (hot) stream of arguments .
    '''
    config = config_data.pipe(
        ops.filter(lambda i: i.id == "config"), ops.flat_map(lambda i: i.data),
        ops.map(lambda i: json.loads(
            i, object_hook=lambda d: namedtuple('x', d.keys())(*d.values()))),
        ops.share())

    return config
Пример #7
0
    def to_rx(self, *inputs: rx.Observable) -> rx.Observable:
        """Produces output observable from input observables.

        Zips together the input observables,
        puts the resulting observable on the correct scheduler,
        and then runs `forward`.
        The output observable is multicast so it can be reused
        without worrying about recomputation.
        """
        self._check_num_inputs(len(inputs),
                               check_nodes=self._is_used_in_static_graph)
        observable = _zip_observables(*inputs)
        observable = observable.pipe(
            self._forward_to_rx_op(),
            ops.share(),
        )
        return observable
Пример #8
0
    def __init__(self,
                 endpoint_clients: [typing.ServiceEndpointClient] = None,
                 request_correlator: typing.RequestCorrelator = None):

        if endpoint_clients is None:
            self.__endpoint_clients = []
        else:
            self.__endpoint_clients = endpoint_clients

        if request_correlator is None:
            self._request_correlator = DefaultRequestCorrelator()
        else:
            self._request_correlator = request_correlator

        self.__events = rx.from_iterable(
            map(lambda ep: ep.events,
                self.__endpoint_clients)).pipe(merge_all(), share())
Пример #9
0
def tweets_for(topics):
    def observe_tweets(observer, scheduler):
        class TweetListener(StreamListener):
            def on_data(self, data):
                observer.on_next(data)
                return True

            def on_error(self, status):
                observer.on_error(status)

        # This handles Twitter authetification and the connection to Twitter Streaming API
        l = TweetListener()
        auth = OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        stream = Stream(auth, l)
        stream.filter(track=topics)

    return create(observe_tweets).pipe(ops.share())
Пример #10
0
    def get_responses(self, request) -> rx.Observable:
        if self._request_correlator is None:
            raise Exception(
                'Please configure the ServiceBus with a RequestCorrelator using .correlate_requests_with()'
            )

        self._request_correlator.set_request_correlation_ids(request)

        endpoints = self.__can_handle(request)

        responses = rx.from_iterable(
            map(lambda ep: ep.get_responses(request), endpoints))

        return responses.pipe(
            merge_all(),
            rx.operators.filter(
                lambda r: self._request_correlator.are_correlated(request, r)),
            share())
Пример #11
0
def makinage(aio_scheduler, sources):
    def on_error(e):
        raise e

    config, read_request, http_request = read_config_from_args(
        sources.argv.argv,
        sources.file.response,
        sources.http.response,
        scheduler=aio_scheduler
    )

    first_config = rx.concat(config.pipe(ops.take(1),), rx.never())

    kafka_source = sources.kafka.response.pipe(
        trace_observable("kafka source1"),
        ops.replay(),
        ops.ref_count(),
        trace_observable("kafka source2"),
    )
    kafka_source.subscribe(on_error=on_error)

    kafka_request = first_config.pipe(
        ops.flat_map(lambda i: create_operators(
            i, config,
            kafka_source,
            sources.kafka.feedback.pipe(ops.share()),
        )),
        ops.subscribe_on(aio_scheduler),
        trace_observable("makinage"),
    )

    '''
    config.pipe(ops.subscribe_on(aio_scheduler)).subscribe(
        on_next=print,
        on_error=print,
    )
    '''

    return MakiNageSink(
        file=file.Sink(request=read_request),
        http=http.Sink(request=http_request),
        kafka=kafka.Sink(request=kafka_request),
    )
Пример #12
0
 def __init__(self,
              concurrency_per_group,
              delay_seconds=0,
              description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self.request_scheduler = ThreadPoolScheduler(10)
     self._requests = Subject()
     self._output_subject = Subject()
     self._output = self._output_subject.pipe(share())
     self._description = description
     self._subscription = self._requests.pipe(
         observe_on(self.scheduler),
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group),
             delay(delay_seconds))),
         take_until_disposed()).subscribe(
             on_next=lambda request: self._output_subject.on_next(request),
             on_error=lambda error: logging.exception(
                 'Error in {} request stream'.format(self)),
             on_completed=lambda: self.dispose(),
             scheduler=self.scheduler)
Пример #13
0
import rx
from rx import operators as ops
import time

# source = rx.interval(1.0).pipe(
#     ops.publish(),
#     ops.ref_count()
# )

source = rx.interval(1.0).pipe(ops.share())

source.subscribe(lambda s: print("Subscriber 1: {0}".format(s)))

# sleep 5 seconds, then add another subscriber
time.sleep(5)
source.subscribe(lambda s: print("Subscriber 2: {0}".format(s)))

input("Press any key to exit\n")
Пример #14
0
 def get_trained_obs(self):
     return self.dataSetFolderSubject.pipe(
         async_switch_map(self.handleFolder),
         ops.do_action(self.set_learner),
         ops.flat_map_latest(lambda event: self.trainFirstStage),
         async_switch_map(self.train_stage_1), ops.share())
Пример #15
0
 def to_rx(self, *inputs: rx.Observable) -> rx.Observable:
     # NOTE If Module is not attached to graph, do not check len(inputs)
     # TODO Does it make sense for module to be detached from graph?
     if self._is_used_in_static_graph:
         self._check_num_inputs(len(inputs), check_nodes=True)
     return self.produce().pipe(ops.share())
Пример #16
0
    def __init__(self,
                 endpoints: [typing.ServiceEndpoint] = None,
                 endpoint_clients: [typing.ServiceEndpointClient] = None,
                 request_correlator: typing.RequestCorrelator = DefaultRequestCorrelator()):

        super().__init__(endpoint_clients, request_correlator)

        if endpoints is None:
            self.__endpoints = []
        else:
            self.__endpoints = endpoints

        self.__requests = rx.from_iterable(map(lambda ep: ep.requests, self.__endpoints)).pipe(merge_all(), share())
        self.__commands = rx.from_iterable(map(lambda ep: ep.commands, self.__endpoints)).pipe(merge_all(), share())
Пример #17
0
        def create_route_observable():
            def on_route_subscribe(observer, scheduler):
                nonlocal route_observer
                route_observer = observer

            return rx.create(on_route_subscribe).pipe(ops.share())
def audio_encoder(sources):
    # Parse configuration
    parser = create_arg_parser()

    parsed_argv = sources.argv.argv.pipe(
        ops.skip(1),
        argparse.parse(parser),
        ops.filter(lambda i: i.key == 'config'),
        ops.subscribe_on(aio_scheduler),
        ops.share(),
    )

    # monitor and parse config file
    monitor_init = parsed_argv.pipe(
        ops.flat_map(lambda i: rx.from_([
            inotify.AddWatch(
                id='config', path=i.value, flags=aionotify.Flags.MODIFY),
            inotify.Start(),
        ])))

    config_update = sources.inotify.response.pipe(
        ops.debounce(5.0, scheduler=aio_scheduler),
        ops.map(lambda i: True),
        ops.start_with(True),
    )

    read_request, read_response = rx.combine_latest(
        parsed_argv, config_update).pipe(
            ops.starmap(
                lambda config, _: file.Read(id='config', path=config.value)),
            file.read(sources.file.response),
        )

    config = read_response.pipe(
        ops.filter(lambda i: i.id == "config"),
        ops.flat_map(lambda i: i.data),
        parse_config,
    )

    # Transcode request handling
    encode_init = config.pipe(
        ops.map(lambda i: i.encode),
        ops.distinct_until_changed(),
        ops.map(lambda i: encoder.Configure(samplerate=i.samplerate,
                                            bitdepth=i.bitdepth)),
    )

    encode_request = sources.httpd.route.pipe(
        ops.filter(lambda i: i.id == 'flac_transcode'),
        ops.flat_map(lambda i: i.request),
        ops.flat_map(lambda i: rx.just(i, encode_scheduler)),
        ops.map(lambda i: encoder.EncodeMp3(
            id=i.context, data=i.data, key=i.match_info['key'])),
    )
    encoder_request = rx.merge(encode_init, encode_request)

    # store encoded file
    store_requests = sources.encoder.response.pipe(
        ops.observe_on(s3_scheduler),
        ops.map(lambda i: s3.UploadObject(
            key=i.key + '.flac',
            data=i.data,
            id=i.id,
        )),
    )

    # acknowledge http request
    http_response = sources.s3.response.pipe(
        ops.map(lambda i: httpd.Response(
            data='ok'.encode('utf-8'),
            context=i.id,
        )))

    # http server
    http_init = config.pipe(
        ops.take(1),
        ops.flat_map(lambda i: rx.from_([
            httpd.Initialize(request_max_size=0),
            httpd.AddRoute(
                methods=['POST'],
                path='/api/transcode/v1/flac/{key:[a-zA-Z0-9-\._]*}',
                id='flac_transcode',
            ),
            httpd.StartServer(host=i.server.http.host, port=i.server.http.port
                              ),
        ])),
    )
    http = rx.merge(http_init, http_response)

    # s3 database
    s3_init = config.pipe(
        ops.take(1),
        ops.map(lambda i: s3.Configure(
            access_key=i.s3.access_key,
            secret_key=i.s3.secret_key,
            bucket=i.s3.bucket,
            endpoint_url=i.s3.endpoint_url,
            region_name=i.s3.region_name,
        )),
    )

    # merge sink requests
    file_requests = read_request
    s3_requests = rx.merge(s3_init, store_requests)

    return Sink(
        encoder=encoder.Sink(request=encoder_request),
        s3=s3.Sink(request=s3_requests),
        file=file.Sink(request=file_requests),
        httpd=httpd.Sink(control=http),
        inotify=inotify.Sink(request=monitor_init),
    )
Пример #19
0
def create_store(initial_state: Optional[ReduxRootState] = None) -> ReduxRootStore:  # pylint: disable=too-many-locals
    """ Constructs a new store that can handle feature modules.

        Args:
            initial_state: optional initial state of the store, will typically be the empty dict

        Returns:
            An implementation of the store
    """

    # current reducer
    reducer: Reducer = identity_reducer

    def replace_reducer(new_reducer: Reducer) -> None:
        """ Callback that replaces the current reducer

            Args:
                new_reducer: the new reducer

        """
        nonlocal reducer
        reducer = new_reducer

    # subject used to dispatch actions
    actions = Subject()

    # the shared action observable
    actions_ = actions.pipe(op.share())

    _dispatch = actions.on_next

    # our current state
    state = BehaviorSubject(initial_state if initial_state else {})

    # shutdown trigger
    done_ = Subject()

    # The set of known modules, to avoid cycles and duplicate registration
    modules: MutableMapping[str, ReduxFeatureModule] = {}

    # Sequence of added modules
    module_subject = Subject()

    # Subscribe to the resolved modules
    module_ = module_subject.pipe(op.distinct(select_id), op.share())

    # Build the reducers
    reducer_ = module_.pipe(
        op.filter(has_reducer),
        op.scan(reduce_reducers, {}),
        op.map(combine_reducers),
        op.map(replace_reducer),
    )

    # Build the epic
    epic_ = module_.pipe(
        op.map(select_epic),
        op.filter(bool),
        op.map(normalize_epic)
    )

    # Root epic that combines all of the incoming epics
    def root_epic(
        action_: Observable, state_: Observable
    ) -> Observable:
        """ Implementation of the root epic. If listens for new epics
            to come in and automatically subscribes.

            Args:
                action_: the action observable
                state_: the state observable

            Returns
                The observable of resulting actions
        """
        return epic_.pipe(
            op.flat_map(run_epic(action_, state_)),
            op.map(_dispatch)
        )

    # notifications about new feature states
    new_module_ = module_.pipe(
        op.map(select_id),
        op.map(create_action(INIT_ACTION)),
        op.map(_dispatch),
    )

    def _add_feature_module(module: ReduxFeatureModule):
        """ Registers a new feature module

            Args:
                module: the new feature module

        """
        module_id = select_id(module)
        if not module_id in modules:
            modules[module_id] = module
            for dep in select_dependencies(module):
                _add_feature_module(dep)
            module_subject.on_next(module)

    # all state
    internal_ = merge(root_epic(actions_, state), reducer_, new_module_).pipe(
        op.ignore_elements()
    )

    def _as_observable() -> Observable:
        """ Returns the state as an observable

            Returns:
                the observable
        """
        return state

    def _on_completed() -> None:
        """ Triggers the done event """
        done_.on_next(None)

    merge(actions_, internal_).pipe(
        op.map(lambda action: reducer(state.value, action)),
        op.take_until(done_),
    ).subscribe(state, logger.error)

    return ReduxRootStore(
        _as_observable, _dispatch, _add_feature_module, _dispatch, _on_completed
    )
Пример #20
0
 def produce(self):
     return rx.interval(1).pipe(
         ops.do_action(lambda x: print(f"\n{get_time():.1f}  Frame {x}\n")),
         ops.map(lambda _: Tensor((224, 224, 3), "uint8")),
         ops.share(),
     )