コード例 #1
0
ファイル: client.py プロジェクト: dayfine/ibapi-rxpy
    def __init__(self, ip_addr: str, port: int, client_id: int):
        self._messages = ReplaySubject()
        self._message_wrapper = message_wrapper.IbApiMessageWrapper(
            self._messages)
        self._eclient = client.EClient(self._message_wrapper)

        self._eclient.connect(ip_addr, port, client_id)
        self._thread = threading.Thread(target=self._eclient.run)
        self._thread.start()
コード例 #2
0
def _failed_transfers(store):
    processing_files = ReplaySubject()

    def transfer_files():
        state = store.getState()
        if (state.processing):
            processing_files.on_next(state.processing)

    store.subscribe(transfer_files)
    return processing_files.pipe(
        operators.map(lambda paths: rx.from_iterable(paths)),
        operators.merge_all(), operators.flat_map(_transfer_file))
コード例 #3
0
ファイル: bindingOperators.py プロジェクト: aguil/RxPython
def replay(self,
           bufferSize=sys.maxsize,
           window=sys.maxsize,
           selector=None,
           scheduler=Scheduler.currentThread):
    assert isinstance(self, Observable)
    assert isinstance(scheduler, Scheduler)

    if selector == None:
        return self.multicast(ReplaySubject(bufferSize, window, scheduler))
    else:
        assert callable(selector)

        return self.multicastIndividual(
            lambda: ReplaySubject(bufferSize, window, scheduler), selector)
コード例 #4
0
    def __init__(self, namespace: str, use_repeaters=True, networks=[]):
        super().__init__(namespace)

        self.__networks = networks
        self.__reachable_peers: Set[InstanceReference] = set()
        self.__resource_subjects: Dict[bytes, ReplaySubject] = {}
        self.__peer_subjects: Dict[InstanceReference, ReplaySubject] = {}
        self.__has_aip_group_peers = ReplaySubject()

        if (len(networks) == 0):
            port = 5156
            while True:
                try:
                    network = IPv4("0.0.0.0", port)
                    self.__networks.append(network)
                    break
                except Exception as e:
                    if (port >= 9000):
                        raise e

                    port += 1

        self.__muxer = MX2()

        for network in self.__networks:
            network.bring_up()
            self.__muxer.register_network(network)

        self.__discoverer = AIP(self.__muxer)
        self.__instance = self.__muxer.create_instance(self.namespace)
        self.__transport = STP(self.__muxer, self.__instance)
        self.__path_finder = RPP(self.__muxer, self.__discoverer)

        self.__path_finder.add_instance(self.__instance.reference)

        self.__instance.incoming_greeting.subscribe(self.__received_greeting)
        self.__transport.incoming_stream.subscribe(self.__new_stream)

        for network in self.__networks:
            self.__discoverer.add_network(network)

        self.__info = ApplicationInformation.from_instance(self.__instance)

        # Add the application to the discoverer
        self.__discoverer.add_application(self.__info).subscribe(
            self.__new_aip_app_peer)
コード例 #5
0
ファイル: workqueue.py プロジェクト: muhammadsyukril/sepal
 def __init__(self, concurrency_per_group, description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self._requests = Subject()
     self._output = ReplaySubject()
     self._description = description
     self._subscription = self._requests.pipe(
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group)))).subscribe(
                 on_next=lambda request: self._output.on_next(request),
                 on_error=lambda error: logging.exception(
                     'Error in {} request stream'.format(self)),
                 on_completed=lambda: logging.error(
                     '{} request stream unexpectedly completed'.format(self
                                                                       )),
                 scheduler=self.scheduler)
コード例 #6
0
    def __get_instance_subject(self, ref: InstanceReference) -> Subject:
        # Do we have it?
        if (ref in self.__peer_subjects):
            # Yes
            return self.__peer_subjects[ref]

        # No, create it
        subject = ReplaySubject()
        self.__peer_subjects[ref] = subject
        return subject
コード例 #7
0
def select(selector: Mapper[T1, T2]
           ) -> Callable[[Observable], Observable]:
    """ Reactive operator that applies a selector
        and shares the result across multiple subscribers

        Args:
            selector: the selector function

        Returns:
            The reactive operator
    """
    return pipe(
        op.map(selector),
        op.distinct_until_changed(comparer=is_),
        op.multicast(subject=ReplaySubject(1)),
        op.ref_count(),
    )
コード例 #8
0
def _replay(
    mapper: Optional[Mapper] = None,
    buffer_size: Optional[int] = None,
    window: Optional[typing.RelativeTime] = None,
    scheduler: Optional[Scheduler] = None
) -> Callable[[Observable], Union[Observable, ConnectableObservable]]:
    """Returns an observable sequence that is the result of invoking the
    mapper on a connectable observable sequence that shares a single
    subscription to the underlying sequence replaying notifications
    subject to a maximum time length for the replay buffer.

    This operator is a specialization of Multicast using a
    ReplaySubject.

    Examples:
        >>> res = replay(buffer_size=3)
        >>> res = replay(buffer_size=3, window=500)
        >>> res = replay(None, 3, 500)
        >>> res = replay(lambda x: x.take(6).repeat(), 3, 500)

    Args:
        mapper: [Optional] Selector function which can use the multicasted
            source sequence as many times as needed, without causing
            multiple subscriptions to the source sequence. Subscribers to
            the given source will receive all the notifications of the
            source subject to the specified replay buffer trimming policy.
        buffer_size: [Optional] Maximum element count of the replay
            buffer.
        window: [Optional] Maximum time length of the replay buffer.
        scheduler: [Optional] Scheduler the observers are invoked on.

    Returns:
        An observable sequence that contains the elements of a
    sequence produced by multicasting the source sequence within a
    mapper function.
    """

    if mapper:

        def subject_factory(scheduler):
            return ReplaySubject(buffer_size, window, scheduler)

        return ops.multicast(subject_factory=subject_factory, mapper=mapper)

    return ops.multicast(ReplaySubject(buffer_size, window, scheduler))
コード例 #9
0
    def find_resource_peers(self, resource: bytes) -> Subject:
        # Do we already have a subject for this query?
        if (resource not in self.__resource_subjects):
            # Create one
            self.__resource_subjects[resource] = ReplaySubject()

        # Prepeare function to make the resource request
        def find_peers(has_group_peers):
            # Create a query for the resource
            query = self.__discoverer.find_application_resource(
                self.__info, resource)

            # Subscribe to the queries answer
            query.answer.subscribe(
                lambda x: self.__found_resource_instance(x, resource))

        # When we are in a position to ask group peers, do it
        self.__has_aip_group_peers.pipe(take(1)).subscribe(find_peers)

        # Return the resource subject
        return self.__resource_subjects[resource]
コード例 #10
0
class DefaultInstanceManager(InstanceManager):
    def __init__(self, namespace: str, use_repeaters=True, networks=[]):
        super().__init__(namespace)

        self.__networks = networks
        self.__reachable_peers: Set[InstanceReference] = set()
        self.__resource_subjects: Dict[bytes, ReplaySubject] = {}
        self.__peer_subjects: Dict[InstanceReference, ReplaySubject] = {}
        self.__has_aip_group_peers = ReplaySubject()

        if (len(networks) == 0):
            port = 5156
            while True:
                try:
                    network = IPv4("0.0.0.0", port)
                    self.__networks.append(network)
                    break
                except Exception as e:
                    if (port >= 9000):
                        raise e

                    port += 1

        self.__muxer = MX2()

        for network in self.__networks:
            network.bring_up()
            self.__muxer.register_network(network)

        self.__discoverer = AIP(self.__muxer)
        self.__instance = self.__muxer.create_instance(self.namespace)
        self.__transport = STP(self.__muxer, self.__instance)
        self.__path_finder = RPP(self.__muxer, self.__discoverer)

        self.__path_finder.add_instance(self.__instance.reference)

        self.__instance.incoming_greeting.subscribe(self.__received_greeting)
        self.__transport.incoming_stream.subscribe(self.__new_stream)

        for network in self.__networks:
            self.__discoverer.add_network(network)

        self.__info = ApplicationInformation.from_instance(self.__instance)

        # Add the application to the discoverer
        self.__discoverer.add_application(self.__info).subscribe(
            self.__new_aip_app_peer)

    def establish_stream(self,
                         peer: InstanceReference,
                         *,
                         in_reply_to=None) -> Subject:
        # Settle on a reply
        reply = in_reply_to or b"\x00" * 16

        # Ask the transport to establish a stream
        return self.__transport.initialise_stream(peer, in_reply_to=reply)

    def find_resource_peers(self, resource: bytes) -> Subject:
        # Do we already have a subject for this query?
        if (resource not in self.__resource_subjects):
            # Create one
            self.__resource_subjects[resource] = ReplaySubject()

        # Prepeare function to make the resource request
        def find_peers(has_group_peers):
            # Create a query for the resource
            query = self.__discoverer.find_application_resource(
                self.__info, resource)

            # Subscribe to the queries answer
            query.answer.subscribe(
                lambda x: self.__found_resource_instance(x, resource))

        # When we are in a position to ask group peers, do it
        self.__has_aip_group_peers.pipe(take(1)).subscribe(find_peers)

        # Return the resource subject
        return self.__resource_subjects[resource]

    @property
    def resources(self) -> Set[bytes]:
        return self.__info.resources

    def __new_aip_app_peer(self, instance):
        # Query for application instances
        self.__discoverer.find_application_instance(self.__info).subscribe(
            self.__found_instance)

        # We now have an AIP group peer
        self.__has_aip_group_peers.on_next(True)

    def __found_instance(self, instance_info: InstanceInformation):
        # Is this peer already reachable?
        if (instance_info.instance_reference in self.__reachable_peers):
            # Don't harras it
            return

        # Inquire about the peer
        subject = self.__muxer.inquire(self.__instance,
                                       instance_info.instance_reference,
                                       instance_info.connection_methods)

        # Handle timeouts
        subject.subscribe(on_error=lambda x: self.__greeting_timeout(
            instance_info.instance_reference))

    def __found_resource_instance(self, instance_info: InstanceInformation,
                                  resource: bytes):
        # Get the resource subject
        resource_subject = self.__resource_subjects[resource]

        # Get the instance subject
        instance_subject = self.__get_instance_subject(
            instance_info.instance_reference)

        # Notify resource subject when instance subject is reachable
        instance_subject.subscribe(resource_subject.on_next)

        # Handle new instance
        self.__found_instance(instance_info)

    def __received_greeting(self, instance: InstanceReference):
        # Have we already marked this instance as reachable
        if (instance in self.__reachable_peers):
            # Don't notify app again
            return

        # Add to reachable peers
        self.__reachable_peers.add(instance)

        # Notify instance subject
        self.__get_instance_subject(instance).on_next(instance)

        # Notify the app
        self.new_peer.on_next(instance)

    def __new_stream(self, stream):
        # Notify app of new stream
        self.new_stream.on_next(stream)

    def __get_instance_subject(self, ref: InstanceReference) -> Subject:
        # Do we have it?
        if (ref in self.__peer_subjects):
            # Yes
            return self.__peer_subjects[ref]

        # No, create it
        subject = ReplaySubject()
        self.__peer_subjects[ref] = subject
        return subject

    def __greeting_timeout(self, target: InstanceReference):
        # Have we already found this peer?
        if (target in self.__instance.reachable_peers
                or not self.use_repeaters):
            return

        # Did not receive greeting from instance, ask for paths via repeaters
        query = self.__path_finder.find_path(target)

        def handle_route(paths):
            # Have we already found this peer?
            if (target in self.__instance.reachable_peers):
                return

            # We have a path, inquire
            self.__muxer.inquire_via_paths(self.__instance, target, paths)

        # Subscribe to answers
        query.subscribe(handle_route)
コード例 #11
0
import time
from sys import stderr

from rx import create, interval
from rx.core import Observer
from rx.disposable import Disposable
from rx.operators import *
from rx.scheduler.scheduler import Scheduler
from rx.scheduler import ThreadPoolScheduler
from rx.subject import ReplaySubject

flow = ReplaySubject(None)

pool = ThreadPoolScheduler(1)


def on_well(e):
    if e == "Well":
        flow.on_next("E!")


flow.pipe(subscribe_on(pool), retry(3), do_action(on_next=on_well)).subscribe(
    on_next=lambda s: print(s), on_error=lambda e: print(e, file=stderr))

while True:
    flow.on_next("Hi")
    time.sleep(1)
    flow.on_next("Well")
    time.sleep(1)
コード例 #12
0
 def action1(scheduler, state=None):
     subject[0] = ReplaySubject(sys.maxsize, 100, scheduler)
コード例 #13
0
 def action1(scheduler, state=None):
     subject[0] = ReplaySubject(scheduler=scheduler)
コード例 #14
0
ファイル: subject.py プロジェクト: jinpanji/python-study
from rx.subject import Subject, AsyncSubject, BehaviorSubject, ReplaySubject

# Subject同时是Observer和Observable

print('--------Subject---------')
subject = Subject()
subject.on_next(1)
subject.subscribe(lambda i: print(i))
subject.on_next(2)
subject.on_next(3)
subject.on_next(4)
subject.on_completed()

# ReplaySubject会缓存所有值,如果指定参数的话只会缓存最近的几个值
print('--------ReplaySubject---------')
subject = ReplaySubject()
subject.on_next(1)
subject.subscribe(lambda i: print(i))
subject.on_next(2)
subject.on_next(3)
subject.on_next(4)
subject.on_completed()

# BehaviorSubject会缓存上次发射的值,除非Observable已经关闭
print('--------BehaviorSubject---------')
subject = BehaviorSubject(0)
subject.on_next(1)
subject.on_next(2)
subject.subscribe(lambda i: print(i))
subject.on_next(3)
subject.on_next(4)
コード例 #15
0
 def subject_factory(scheduler):
     return ReplaySubject(buffer_size, window, scheduler)
コード例 #16
0
ファイル: client.py プロジェクト: dayfine/ibapi-rxpy
class IbApiClient(object):
    def __init__(self, ip_addr: str, port: int, client_id: int):
        self._messages = ReplaySubject()
        self._message_wrapper = message_wrapper.IbApiMessageWrapper(
            self._messages)
        self._eclient = client.EClient(self._message_wrapper)

        self._eclient.connect(ip_addr, port, client_id)
        self._thread = threading.Thread(target=self._eclient.run)
        self._thread.start()

    def _next_valid_id(self) -> int:
        self._eclient.reqIds(-1)  # Argument is ignored
        return self._messages.pipe(
            _.first(lambda m: m.type == IbApiMessageType.NEXT_VALID_ID),
            _.map(lambda m: m.payload[0])).run()

    def get_account_summary(self) -> account_summary.AccountSummary:
        request_id = self._next_valid_id()
        self._eclient.reqAccountSummary(
            request_id, "All", account_summary_tags.AccountSummaryTags.AllTags)

        with ExitStack() as stack:
            stack.callback(
                lambda: self._eclient.cancelAccountSummary(request_id))
            obs = account_summary.collect(self._messages, request_id)
            return obs.run()

    def get_open_orders(self) -> List[open_orders.OpenOrder]:
        # reqAllOpenOrders is used instead of reqOpenOrders to include manual
        # orders. Also, reqAllOpenOrders does not initiate a subscription.
        self._eclient.reqAllOpenOrders()
        obs = open_orders.collect(self._messages)
        return obs.run()

    def get_positions(self) -> List[position.Position]:
        self._eclient.reqPositions()
        with ExitStack() as stack:
            stack.callback(self._eclient.cancelPositions)
            obs = position.collect(self._messages)
            return obs.run()

    def place_order(self, contract: contract.Contract, order: order.Order):
        self._eclient.placeOrder(self._next_valid_id(), contract, order)

    def cancel_order(self, order_id: int):
        self._eclient.cancelOrder(order_id)

    def get_historical_data(self, contract: contract.Contract,
                            data_options: HistoricalDataOptions):
        HistoricalDataOptions.validate(data_options)
        if data_options.stream:
            raise ValueError(
                'get_historical_data should not be called with |options.stream| = True'
            )

        request_id = self._next_valid_id()
        end_datetime_str = data_options.end_datetime.strftime(
            '%Y%m%d %H:%M%S') if data_options.end_datetime else ''
        self._eclient.reqHistoricalData(
            request_id,
            contract,
            endDateTime=end_datetime_str,
            durationStr=data_options.duration.as_string(),
            barSizeSetting=data_options.bar_size.as_string(),
            whatToShow=data_options.type.name,
            useRTH=data_options.use_rth,
            formatDate=data_options.format_time.value,
            keepUpToDate=False,
            chartOptions=None)
        obs = historical_data.collect(self._messages, request_id,
                                      data_options.type)
        return obs.run()
コード例 #17
0
ファイル: workqueue.py プロジェクト: muhammadsyukril/sepal
class WorkQueue(object):
    def __init__(self, concurrency_per_group, description=None):
        self.scheduler = ThreadPoolScheduler(concurrency_per_group)
        self._requests = Subject()
        self._output = ReplaySubject()
        self._description = description
        self._subscription = self._requests.pipe(
            group_by(lambda r: r['concurrency_group']),
            flat_map(lambda concurrency_group: concurrency_group.pipe(
                map(lambda r: r['request']),
                merge(max_concurrent=concurrency_per_group)))).subscribe(
                    on_next=lambda request: self._output.on_next(request),
                    on_error=lambda error: logging.exception(
                        'Error in {} request stream'.format(self)),
                    on_completed=lambda: logging.error(
                        '{} request stream unexpectedly completed'.format(self
                                                                          )),
                    scheduler=self.scheduler)

    def enqueue(self,
                observable: Observable,
                group: str = None,
                retries: int = 0,
                description: str = None):
        # Provide a function returning a callable?

        description = description or str(Observable)
        key = '{}({})'.format(description, random.random())

        def log_status(status):
            logging.debug(
                str({
                    'WorkQueue': str(self),
                    'group': group,
                    'key': key,
                    status: description
                }))

        log_status('ENQUEUED')
        error: Optional[Exception] = None

        def handle_error(e):
            log_status('FAILED')
            nonlocal error
            error = e
            return of({'key': key, 'error': e})

        def throw_if_error(request):
            if error:
                return throw(error)
            else:
                return of(request)

        def extract_value(value):
            if type(value) == Observable:
                return value
            else:
                return of(value)

        request = of(True).pipe(
            do_action(lambda _: log_status('STARTED')),
            flat_map(lambda _: observable.pipe(
                flat_map(extract_value),
                map(lambda value: {
                    'key': key,
                    'value': value
                }),
                retry_with_backoff(
                    retries=retries,
                    description='{}.enqueue(group={}, description={})'.format(
                        self, group, description)),
                catch(handler=lambda e, o: handle_error(e)),
            )),
            concat(
                of({
                    'key': key,
                    'complete': True
                }).pipe(do_action(lambda _: log_status('COMPLETED')))))
        result_stream = self._output.pipe(
            filter(lambda request: request['key'] == key),
            flat_map(lambda request: throw_if_error(request)),
            take_while(lambda request: not request.get('complete')),
            flat_map(lambda request: of(request.get('value'))))
        self._requests.on_next({
            'request': request,
            'concurrency_group': group
        })
        return result_stream

    def dispose(self):
        if self._subscription:
            self._subscription.dispose()

    def __str__(self):
        return 'WorkQueue({})'.format(
            self._description) if self._description else super().__str__()