Exemple #1
0
class CexGetOrderBookTask(object):
  def __init__(self, injector, target):
    self.client = injector.cex_client
    self.channels = injector.channels
    self.target = target
    self.async_result = AsyncResult()

  def execute(self):
    gevent.spawn(self.get_orderbook)
    return self.async_result

  def get_orderbook(self):
    try:
      _, equity_pair = self.target.orderbook_key.split(':')
      channel = self.channels.get_channel_or_none('cex')
      orderbook = None
      if channel is not None:
        # 만약 Channel이 있다면 Channel을 통해 Orderbook을 가져와본다.
        orderbook = channel.get_orderbook_or_none(equity_pair=equity_pair, depth=20)
      if orderbook is None:
        # 만약 Channel을 통해 Orderbook을 가져오지 못했다면 HTTP로 시도해본다.
        orderbook = self.client.get_orderbook(equity_pair=equity_pair, depth=20)
      self.async_result.set(orderbook)
    except Exception as e:
      self.async_result.set_exception(e)
Exemple #2
0
class OkexGetOrderBookTask(object):
    def __init__(self, injector, target):
        self.client = injector.okex_client
        self.logger = injector.logger
        self.target = target
        self.async_result = AsyncResult()

    def execute(self):
        gevent.spawn(self.get_orderbook)
        return self.async_result

    def get_orderbook(self):
        try:
            if self.target.type == 'spot':
                _, equity_pair = self.target.orderbook_key.split(':')
                orderbook = self.client.get_spot_orderbook(
                    equity_pair=equity_pair)
                self.async_result.set(orderbook)
            elif self.target.type == 'future':
                equity_pair = self.target.equity_pair
                contract_type = self.target.contract_type
                orderbook = self.client.get_future_orderbook(
                    equity_pair=equity_pair, contract_type=contract_type)
                self.async_result.set(orderbook)
        except Exception as e:
            self.async_result.set_exception(e)
Exemple #3
0
class DAOChallenger(object):

    request_timeout = 8.

    def __init__(self, chainservice, proto):
        self.chainservice = chainservice
        self.config = chainservice.config['eth']['block']
        self.proto = proto
        self.deferred = None
        gevent.spawn(self.run)

    def run(self):
        self.deferred = AsyncResult()
        self.proto.send_getblockheaders(self.config['DAO_FORK_BLKNUM'], 1, 0)
        try:
            dao_headers = self.deferred.get(block=True,
                                            timeout=self.request_timeout)
            log.debug("received DAO challenge answer",
                      proto=self.proto,
                      answer=dao_headers)
            result = len(dao_headers) == 1 and \
                    dao_headers[0].hash == self.config['DAO_FORK_BLKHASH'] and \
                    dao_headers[0].extra_data == self.config['DAO_FORK_BLKEXTRA']
            self.chainservice.on_dao_challenge_answer(self.proto, result)
        except gevent.Timeout:
            log.debug('challenge dao timed out', proto=self.proto)
            self.chainservice.on_dao_challenge_answer(self.proto, False)

    def receive_blockheaders(self, proto, blockheaders):
        log.debug('blockheaders received', proto=proto, num=len(blockheaders))
        if proto != self.proto:
            return
        self.deferred.set(blockheaders)
Exemple #4
0
    def send_async(
        self,
        receiver_address: Address,
        queue_name: bytes,
        message: Message,
    ):
        if not self._running:
            return
        self.log.info(
            'SEND ASYNC',
            receiver_address=to_normalized_address(receiver_address),
            message=message,
            queue_name=queue_name,
        )
        if not is_binary_address(receiver_address):
            raise ValueError('Invalid address {}'.format(
                pex(receiver_address)))

        # These are not protocol messages, but transport specific messages
        if isinstance(message, (Delivered, Ping, Pong)):
            raise ValueError(
                'Do not use send_async for {} messages'.format(
                    message.__class__.__name__), )

        message_id = message.message_identifier
        async_result = AsyncResult()
        if isinstance(message, Processed):
            async_result.set(
                True)  # processed messages shouldn't get a Delivered reply
            self._send_immediate(receiver_address,
                                 json.dumps(message.to_dict()))
        else:
            self._messageids_to_asyncresult[message_id] = async_result
            self._send_with_retry(receiver_address, async_result,
                                  json.dumps(message.to_dict()))
Exemple #5
0
    def register_secret(self, secret: typing.Secret):
        secrethash = sha3(secret)
        if self.check_registered(secrethash):
            log.info(
                'secret already registered',
                node=pex(self.node_address),
                contract=pex(self.address),
                secrethash=encode_hex(secrethash),
            )
            return

        log.info(
            'registerSecret called',
            node=pex(self.node_address),
            contract=pex(self.address),
        )

        if secret not in self.open_secret_transactions:
            secret_registry_transaction = AsyncResult()
            self.open_secret_transactions[secret] = secret_registry_transaction
            try:
                transaction_hash = self._register_secret(secret)
            except Exception as e:
                secret_registry_transaction.set_exception(e)
                raise
            else:
                secret_registry_transaction.set(transaction_hash)
            finally:
                self.open_secret_transactions.pop(secret, None)
        else:
            transaction_hash = self.open_secret_transactions[secret].get()
Exemple #6
0
    def test__control_flow_cancelled_call(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        # put a call in that will never finish
        waitar = AsyncResult()      # test specific, wait for this to indicate we're being processed/hung
        callar = AsyncResult()      # test specific, an ar that is just waited on by the spin call (eventually set in this test)
        def spin(inar, outar):
            outar.set(True)
            inar.wait()

        ar = p._routing_call(spin, MagicMock(), callar, waitar)

        # schedule a second call that we're going to cancel
        futurear = AsyncResult()
        ar2 = p._routing_call(futurear.set, MagicMock(), sentinel.val)

        # wait until we get notice we're being processed
        waitar.get(timeout=2)

        # cancel the SECOND call
        p.cancel_or_abort_call(ar2)

        # prove we didn't interrupt the current proc by allowing it to continue
        callar.set()
        ar.get(timeout=2)

        # now the second proc will get queued and never called because it is cancelled
        self.assertRaises(Timeout, futurear.get, timeout=2)
        self.assertTrue(ar2.ready())
class InstrumentAgentEventSubscribers(object):
    """
    Create subscribers for agent and driver events.
    """
    log.info("Start event subscribers")
    def __init__(self, instrument_agent_resource_id = None):
        # Start event subscribers, add stop to cleanup.
        self.no_events = None
        self.events_received = []
        self.async_event_result = AsyncResult()
        self.event_subscribers = []

        def consume_event(*args, **kwargs):
            log.debug('#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))

            log.debug("self.no_events = " + str(self.no_events))
            log.debug("self.event_received = " + str(self.events_received))

            self.events_received.append(args[0])
            if self.no_events and self.no_events == len(self.events_received):
                log.debug("CALLING self.async_event_result.set()")
                self.async_event_result.set()


        self.event_subscribers = EventSubscriber(
            event_type='ResourceAgentEvent', callback=consume_event,
            origin=instrument_agent_resource_id)
        self.event_subscribers.start()
        self.event_subscribers._ready_event.wait(timeout=5)
Exemple #8
0
    def __init__(self, rpc_destination, s_stomp, timeout=3600,
                 init_timeout=None):
        """
        Initialize an RpcProxy with a remote RpcProvider at rpc_destination
        through a SynchronousStomp object (s_stomp). If timeout is not None
        (default one hour) if any conversation blocks for that amount of time
        before receiving a reply, the call will raise a gevent.Timeout.
        """
        self._rpc_destination = rpc_destination
        self._s_stomp = s_stomp
        self._services = {}
        self._class = None
        self._timeout = timeout

        # Initialize the alternate proxy table
        self_result = AsyncResult()
        self_result.set(self)
        self._timeout_proxies = {timeout: self_result}

        # n.b. After this point, no new attributes may be added
        conv = self._make_conversation()
        msg = self._build_rpc_request('.list_services')
        # This will use self._timeout if init_timeout is None
        reply = conv.reply(msg, timeout=init_timeout)

        # Store the new representation
        self._services = reply['services']
        self._docs = reply['services_doc']
        self._class = reply.get('name', None)
def fake_async(obj):
    """
    For mocking RPC functions which will be called async
    """
    asr = AsyncResult()
    asr.set(obj)
    return asr
Exemple #10
0
    def send_async(
        self,
        receiver_address: Address,
        queue_name: bytes,
        message: Message,
    ):
        if not self._running:
            return
        self.log.info(
            'SEND ASYNC',
            receiver_address=to_normalized_address(receiver_address),
            message=message,
            queue_name=queue_name,
        )
        if not is_binary_address(receiver_address):
            raise ValueError('Invalid address {}'.format(pex(receiver_address)))

        # These are not protocol messages, but transport specific messages
        if isinstance(message, (Delivered, Ping, Pong)):
            raise ValueError(
                'Do not use send_async for {} messages'.format(message.__class__.__name__),
            )

        message_id = message.message_identifier
        async_result = AsyncResult()
        if isinstance(message, Processed):
            async_result.set(True)  # processed messages shouldn't get a Delivered reply
            self._send_immediate(receiver_address, json.dumps(message.to_dict()))
        else:
            self._messageids_to_asyncresult[message_id] = async_result
            self._send_with_retry(receiver_address, async_result, json.dumps(message.to_dict()))
def fake_async(obj):
    """
    For mocking RPC functions which will be called async
    """
    asr = AsyncResult()
    asr.set(obj)
    return asr
Exemple #12
0
class LogListenerTask(Task):
    def __init__(self, filter_, callback, contract_translator):
        super(LogListenerTask, self).__init__()

        self.filter_ = filter_
        self.callback = callback
        self.contract_translator = contract_translator

        self.stop_event = AsyncResult()
        self.sleep_time = 0.5

    def _run(self):  # pylint: disable=method-hidden
        stop = None

        while stop is None:
            filter_changes = self.filter_.changes()

            for log_event in filter_changes:
                event = self.contract_translator.decode_event(
                    log_event['topics'],
                    log_event['data'],
                )

                if event is not None:
                    originating_contract = log_event['address']
                    self.callback(originating_contract, event)

            stop = self.stop_event.wait(self.sleep_time)

    def stop(self):
        self.stop_event.set(True)
Exemple #13
0
class LogListenerTask(Task):
    def __init__(self, filter_, callback, contract_translator):
        super(LogListenerTask, self).__init__()

        self.filter_ = filter_
        self.callback = callback
        self.contract_translator = contract_translator

        self.stop_event = AsyncResult()
        self.sleep_time = 0.5

    def _run(self):  # pylint: disable=method-hidden
        stop = None

        while stop is None:
            filter_changes = self.filter_.changes()

            for log_event in filter_changes:
                event = self.contract_translator.decode_event(
                    log_event['topics'],
                    log_event['data'],
                )

                if event is not None:
                    originating_contract = log_event['address']
                    self.callback(originating_contract, event)

            stop = self.stop_event.wait(self.sleep_time)

    def stop(self):
        self.stop_event.set(True)
class TestAsyncResult(object):
    def __init__(self):
        self.event = AsyncResult()
 
    def run(self):
        producers = [gevent.spawn(self._producer, i) for i in xrange(3)]
        consumers = [gevent.spawn(self._consumer, i) for i in xrange(3)]
        tasks     = []
        tasks.extend(producers)
        tasks.extend(consumers)
        gevent.joinall(tasks)
 
    def _producer(self, pid):
        print("I'm producer %d and now I don't want consume to do something" % (pid,))
        sleeptime = random.randint(5, 10) * 0.01
        print("Sleeping time is %f" % (sleeptime, ))
        gevent.sleep(sleeptime)
        print("I'm producer %d and now consumer could do something." % (pid,))
        self.event.set('producer pid %d' % (pid, ))
        
    def _consumer(self, pid):
        print("I'm consumer %d and now I'm waiting for producer" % (pid,))
        gevent.sleep(random.randint(0, 5) * 0.01)
        value = self.event.wait()
        print("I'm consumer %d. Value is %r and now I can do something" % (pid, value))
Exemple #15
0
    def pozt(self, LocalMod):
        rvData = ""
        try:
            data = LocalMod["json"]
            dataEncoded = json.dumps(data)
            #req = self.http.request('POST', self.JITHashServer, body=dataEncoded)
            req = self.http.urlopen('POST',
                                    self.JITHashServer,
                                    headers=self.headers,
                                    body=dataEncoded)
            #response = self.http.urlopen(req)
            #rvData = req.data
        except HTTPError as inst:
            if inst.code == 204:
                return rvData
        except:
            print("{}{}".format(fg("red"),
                                "SERVER FAILED DESPITE MULTIPLE ATTEMPTS"))
            print("{}{}{}[{}]".format(fg("navajo_white_1"), "Exception ",
                                      fg("light_magenta"),
                                      str(sys.exc_info()[0])))
            for x in sys.exc_info():
                print("{}{}".format(fg("hot_pink_1b"), x))
        finally:
            a = AsyncResult()
            a.set(req.data)
            LocalMod["resp"] = a.get(block=True)
            req.release_conn()
            self.output(LocalMod)

        return LocalMod
Exemple #16
0
def _event():
    # 协程通信
    e = Event()
    ae = AsyncResult()
    e.wait()  # 唤醒
    e.set()  # 唤醒
    ae.wait()  # 唤醒
    ae.set('')  # 唤醒,可传递信息
Exemple #17
0
class WSGIServer(pywsgi.WSGIServer):
    def __init__(self, app):
        pywsgi.WSGIServer.__init__(self, ("127.0.0.1", 0), app)
        self.localport = AsyncResult()
        
    def start_accepting(self):
        self.localport.set(self.socket.getsockname()[1])
        pywsgi.WSGIServer.start_accepting(self)
Exemple #18
0
class WSGIServer(pywsgi.WSGIServer):
    def __init__(self, app):
        pywsgi.WSGIServer.__init__(self, ("127.0.0.1", 0), app)
        self.localport = AsyncResult()
        
    def start_accepting(self):
        self.localport.set(self.socket.getsockname()[1])
        pywsgi.WSGIServer.start_accepting(self)
class GConnection(Async):
    def __init__(self, *args, **kwargs):
        """
        This class is a 'GEvent'-optimized subclass of libcouchbase
        which utilizes the underlying IOPS structures and the gevent
        event primitives to efficiently utilize couroutine switching.
        """
        super(GConnection, self).__init__(IOPS(), *args, **kwargs)

    def _do_ctor_connect(self):
        if self.connected:
            return

        self._connect()
        self._evconn = AsyncResult()
        self._conncb = self._on_connected
        self._evconn.get()
        self._evconn = None

    def _on_connected(self, err):
        if err:
            self._evconn.set_exception(err)
        else:
            self._evconn.set(None)

    def _waitwrap(self, cbasync):
        cur_thread = getcurrent()
        cbasync.callback = cur_thread.switch
        cbasync.errback = lambda r, x, y, z: cur_thread.throw(x, y, z)

        return get_hub().switch()

    def _meth_factory(meth, name):
        def ret(self, *args, **kwargs):
            return self._waitwrap(meth(self, *args, **kwargs))

        return ret

    def _http_request(self, **kwargs):
        res = super(GConnection, self)._http_request(**kwargs)
        if kwargs.get('chunked', False):
            return res  #views

        e = Event()
        res._callback = lambda x, y: e.set()

        e.wait()

        res._maybe_raise()
        return res

    def query(self, *args, **kwargs):
        kwargs['itercls'] = GView
        ret = super(GConnection, self).query(*args, **kwargs)
        ret.start()
        return ret

    locals().update(Async._gen_memd_wrappers(_meth_factory))
class GConnection(Async):
    def __init__(self, *args, **kwargs):
        """
        This class is a 'GEvent'-optimized subclass of libcouchbase
        which utilizes the underlying IOPS structures and the gevent
        event primitives to efficiently utilize couroutine switching.
        """
        experimental.enabled_or_raise()
        super(GConnection, self).__init__(IOPS(), *args, **kwargs)

    def _do_ctor_connect(self):
        if self.connected:
            return

        self._connect()
        self._evconn = AsyncResult()
        self._conncb = self._on_connected
        self._evconn.get()
        self._evconn = None

    def _on_connected(self, err):
        if err:
            self._evconn.set_exception(err)
        else:
            self._evconn.set(None)

    def _waitwrap(self, cbasync):
        cur_thread = getcurrent()
        cbasync.callback = cur_thread.switch
        cbasync.errback = lambda r, x, y, z: cur_thread.throw(x, y, z)

        return get_hub().switch()

    def _meth_factory(meth, name):
        def ret(self, *args, **kwargs):
            return self._waitwrap(meth(self, *args, **kwargs))
        return ret

    def _http_request(self, **kwargs):
        res = super(GConnection, self)._http_request(**kwargs)
        if kwargs.get('chunked', False):
            return res #views

        e = Event()
        res._callback = lambda x, y: e.set()

        e.wait()

        res._maybe_raise()
        return res

    def query(self, *args, **kwargs):
        kwargs['itercls'] = GView
        ret = super(GConnection, self).query(*args, **kwargs)
        ret.start()
        return ret

    locals().update(Async._gen_memd_wrappers(_meth_factory))
Exemple #21
0
    def get_dirty_values_async_result(self):
        if self.mode == 'r':
            log.warn('Coverage not open for writing: mode=%s', self.mode)
            from gevent.event import AsyncResult
            ret = AsyncResult()
            ret.set(True)
            return ret

        return self._persistence_layer.get_dirty_values_async_result()
Exemple #22
0
class DiskSubscription(object):
    """A Subscription used by a PubSub channel backed by DiskCache

    Attributes:
        channel (str): The channel name associated with this subscription
        _listener (generator): The generator which yields the new values in the channel
        _result (AsyncResult): The result of the latest value in the channel
        _sync (Event): A synchronization event used to block the channel from yielding new values

    Args:
        channel (str): The channel name associated with this subscription
    """

    def __init__(self, channel):
        self.channel = channel
        self._listener = None
        self._result = AsyncResult()
        self._sync = Event()

    def listen(self):
        """Listen for updates in this channel

        Returns:
            (generator): A generator which yields new values in the channel
        """
        if self._listener is None:
            self._listener = self._listen()
        return self._listener

    def _listen(self):
        """Listen for updates in this channel and yield the results

        Yields:
            The new values in this channel
        """
        while True:
            self._sync.wait()
            result = self._result.get()
            if result == unsubscribe_message:
                break
            else:
                yield result
                sleep(0)

    def push(self, value):
        """Push a new value to the channel

        Args:
            value: The value to push to the channel
        """
        if self._listener is not None:
            self._result.set(value)
            sleep(0)
            self._sync.set()
            sleep(0)
            self._sync.clear()
Exemple #23
0
    def handshake(self):
        """returns None on error or (node_host, node_port)
        """
        data = {}
        data["name"] = base64.standard_b64encode(
            login.encrypt('frontend', socket.gethostname()))
        data["system"] = platform.system()
        data["machine"] = platform.machine()
        data["platform"] = platform.platform()
        data["release"] = platform.release()
        data["version"] = platform.version()

        result = AsyncResult()
        rid = str(id(result))
        self.login_results[rid] = result
        key = login.generate_backend_key()
        from .. import patch
        payload = {
            'id': rid,
            'version': proto.VERSION,
            'branch': patch.config.branch,
            'commit_id': patch.core_source.version,
            'l': login.get('login'),
            'system': data,
        }
        message_key = proto.pack_message('backend',
                                         'api.set_key',
                                         payload=dict(key=key),
                                         encrypt="rsa")
        message = proto.pack_message('backend', 'api.login', payload=payload)

        try:
            self.send_message(message_key)
            self.send_message(message)
        except AttributeError:
            result.set([False, 'Client login error'])
            return
        try:
            result = result.get(timeout=20)
        except gevent.Timeout:
            result = ["False", "Login timed out"]
        finally:
            try:
                del self.login_results[rid]
            except KeyError:
                pass

        if not result[0]:
            log.error('login failed: {}'.format(result[1]))
            if result[1] == 'Invalid Login Credentials':
                self.connect_retry = 0
                login.logout()
            return False
        return True
Exemple #24
0
 def exchange_async(self, type_, base_amount, quote_amount, self_address, target_address, identifier):
     result_async = AsyncResult()
     key = hash(str(type_) + str(base_amount) + str(quote_amount) + str(self_address) + str(target_address) + str(
         identifier))
     if key in self.expected:
         self.expected[key][0].set(True)
         result_async.set(True)
         del self.expected[key]
     else:
         result_async.set(False)
     return result_async
Exemple #25
0
        def wrapper(*args, **kwargs):
            if not ThreadPool.isMainThread():
                return ThreadPool.main_loop.call(wrapper, *args, **kwargs)

            if self.ignore_class:
                key = func  # Unique key only by function and class object
            elif self.ignore_args:
                key = (func, args[0]
                       )  # Unique key only by function and class object
            else:
                key = (func, tuple(args), str(kwargs)
                       )  # Unique key for function including parameters
            if key in self.threads:  # Thread already running (if using blocking mode)
                if self.queue:
                    self.queued = True
                thread = self.threads[key]
                if self.blocking:
                    if self.queued:
                        res = thread.get()  # Blocking until its finished
                        if key in self.threads:
                            return self.threads[key].get(
                            )  # Queue finished since started running
                        self.queued = False
                        return wrapper(*args,
                                       **kwargs)  # Run again after the end
                    else:
                        return thread.get()  # Return the value

                else:  # No blocking
                    if thread.ready():  # Its finished, create a new
                        thread = gevent.spawn(func, *args, **kwargs)
                        self.threads[key] = thread
                        return thread
                    else:  # Still running
                        return thread
            else:  # Thread not running
                if self.blocking:  # Wait for finish
                    asyncres = AsyncResult()
                    self.threads[key] = asyncres
                    try:
                        res = func(*args, **kwargs)
                        asyncres.set(res)
                        self.cleanup(key, asyncres)
                        return res
                    except Exception as err:
                        asyncres.set_exception(err)
                        self.cleanup(key, asyncres)
                        raise (err)
                else:  # No blocking just return the thread
                    thread = gevent.spawn(func, *args,
                                          **kwargs)  # Spawning new thread
                    thread.link(lambda thread: self.cleanup(key, thread))
                    self.threads[key] = thread
                    return thread
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException), err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Exemple #27
0
class AsyncResultWaiter(object):
    """
    Class that makes waiting for an async result notification easy.
    Creates a subscriber for a generated token name, which can be handed to the async provider.
    The provider then publishes the result to the token name when ready.
    The caller can wait for the result or timeout.
    """
    def __init__(self, process=None):
        self.process = process

        self.async_res = AsyncResult()
        self.wait_name = "asyncresult_" + create_simple_unique_id()
        if self.process:
            self.wait_name = self.wait_name + "_" + self.process.id
        # TODO: Use same mechanism as pooled RPC response endpoint (without the request)
        self.wait_sub = Subscriber(from_name=self.wait_name,
                                   callback=self._result_callback,
                                   auto_delete=True)
        self.activated = False

    def activate(self):
        if self.activated:
            raise BadRequest("Already active")
        self.listen_gl = spawn(self.wait_sub.listen
                               )  # This initializes and activates the listener
        self.wait_sub.get_ready_event().wait(timeout=1)
        self.activated = True

        return self.wait_name

    def _result_callback(self, msg, headers):
        log.debug("AsyncResultWaiter: received message")
        self.async_res.set(msg)

    def await (self, timeout=None, request_id=None):
        try:
            result = self.async_res.get(timeout=timeout)
            if request_id and isinstance(
                    result,
                    AsyncResultMsg) and result.request_id != request_id:
                log.warn("Received result for different request: %s", result)
                result = None

        except gevent.Timeout:
            raise Timeout("Timeout in AsyncResultWaiter name={}".format(
                self.wait_name))
        finally:
            self.wait_sub.deactivate()
            self.wait_sub.close()
            self.listen_gl.join(timeout=1)
            self.activated = False

        return result
Exemple #28
0
class ReadyCounter(object):
    def __init__(self):
        self.count = 0
        self.event = AsyncResult()

    def add(self):
        self.count += 1

    def ready(self):
        self.count -= 1
        if self.count == 0:
            self.event.set()
Exemple #29
0
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException), err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Exemple #30
0
class ESCostCollector(object):
    def __init__(self):
        self.accumulated = 0
        self.price = AsyncResult()

    def add(self, result):
        self.accumulated += result['took']

    def finalize(self):
        self.price.set(self.accumulated)

    def get_price(self):
        return self.price.get()
Exemple #31
0
 def test_server_rejects_bad_version_info(self):
     server_done = AsyncResult()
     client = self.client_cxn
     def server_thread():
         socket, addr = self.server_socket.accept()
         server = ServerConnection(socket, addr)
         try:
             server.recv_message()
             raise AssertionError("expected exception not raised")
         except ConnectionError, e:
             assert_contains(str(e), "VERSION_MISMATCH")
         assert not server.connected()
         server_done.set(True)
Exemple #32
0
 def test_server_rejects_bad_version_info(self):
     server_done = AsyncResult()
     client = self.client_cxn
     def server_thread():
         socket, addr = self.server_socket.accept()
         server = ServerConnection(socket, addr)
         try:
             server.recv_message()
             raise AssertionError("expected exception not raised")
         except ConnectionError, e:
             assert_contains(str(e), "VERSION_MISMATCH")
         assert not server.connected()
         server_done.set(True)
Exemple #33
0
class PageProxy(object):
    """A page in a wiki

    The page may not be loaded when this object is created; accessing its
    attributes may block.

    A page is true in a boolean context if it exists on the wiki.
    (Note that the usage in a bool context may also block.)
    """
    def __init__(self, cache, title):
        self.title = title
        self.cache = cache
        self._result = AsyncResult()
        self.edits = {}

    def _set_result(self, contents, page_info):
        self._contents = contents
        self.page_info = page_info
        self._result.set()

    @property
    def contents(self):
        """Return true if the page exists on the wiki"""
        self._result.get()
        return self._contents

    @property
    def exists(self):
        """Return true if the page exists on the wiki"""
        return self.contents is not None

    @property
    def text(self):
        """Return the contents of the page; raise ValueError if page missing"""
        self._result.get()
        if self.exists:
            return self.contents
        else:
            raise ValueError('Page does not exist')

    def __bool__(self):
        return self.exists
    __nonzero__ = __bool__

    def edit(self, text, section=None):
        self._result.get()
        if not self.page_info['edittoken']:
            raise ValueError('This Page is not editable')
        else:
            self.edits[section] = text
            EditRequest(self.cache, self).go()
Exemple #34
0
class LogListenerTask(Task):
    def __init__(self, listener_name, filter_, callback, contract_translator,
                 events_poll_timeout=DEFAULT_EVENTS_POLL_TIMEOUT):
        super(LogListenerTask, self).__init__()

        self.listener_name = listener_name
        self.filter_ = filter_
        self.callback = callback
        self.contract_translator = contract_translator

        self.stop_event = AsyncResult()
        self.sleep_time = events_poll_timeout

        # exposes the AsyncResult timer, this allows us to raise the timeout
        # inside this Task to force an update:
        #
        #   task.kill(task.timeout)
        #
        self.timeout = None

    def __repr__(self):
        return '<LogListenerTask {}>'.format(self.listener_name)

    def _run(self):  # pylint: disable=method-hidden
        stop = None

        while stop is None:
            filter_changes = self.filter_.changes()

            for log_event in filter_changes:
                log.debug('New Events', task=self.listener_name)

                event = self.contract_translator.decode_event(
                    log_event['topics'],
                    log_event['data'],
                )

                if event is not None:
                    originating_contract = log_event['address']

                    try:
                        self.callback(originating_contract, event)
                    except:
                        log.exception('unexpected exception on log listener')

            self.timeout = Timeout(self.sleep_time)  # wait() will call cancel()
            stop = self.stop_event.wait(self.timeout)

    def stop(self):
        self.stop_event.set(True)
Exemple #35
0
class ReusableCursor(Greenlet):
    def __init__(self, pool, key, sql, values):
        super(ReusableCursor, self).__init__(self.work)
        self.pool = pool
        self.key = self._formatted_info = key
        self.sql = sql
        self.values = values
        self.offset = 0
        self.queue = Queue()
        self._count = AsyncResult()
        self.last_access = time.time()
        self.idle = False
        self.listeners = []
        self.window = config.xgetint('web', 'query-reusable-window', 30)

    @property
    def count(self):
        return self._count.get()

    def work(self):
        try:
            with self.pool.connection() as conn:
                cur = conn.cursor('_cur')
                cur.execute(self.sql, self.values)
                logging.debug(cur.query)
                cur_tmp = conn.cursor()
                cur_tmp.execute('MOVE ALL FROM _cur')
                self._count.set(int(cur_tmp.statusmessage.split()[-1]))
                cur_tmp.close()
                cur.scroll(0, 'absolute')
                while True:
                    if not self.queue.qsize():
                        self.idle = True
                        for l in self.listeners:
                            spawn(l.onIdle, self)
                    result, limit, offset = self.queue.get(timeout=self.window)
                    self.idle = False
                    if limit is None:
                        raise Killed(result)
                    if self.offset != offset:
                        cur.scroll(offset, 'absolute')
                    data = cur.fetchmany(limit)
                    self.offset = offset + limit
                    result.set(data)
                    self.last_access = time.time()
        except Empty:
            pass
        except Killed, k:
            k.result.set()
        finally:
Exemple #36
0
class Future:
    def __init__(self):
        self.result = AsyncResult()

    def set(self, value):
        self.result.set(value)

    def get(self):
        return self.result.get()

    def on_ready(self, func):
        while self.result.ready() != False:
            gevent.sleep(0)
        func(self.result.get())
class Task(object):
  def __init__(self, _id, func, args, kwargs):
    self.id = _id
    self.result = AsyncResult()
    self.func = func
    self.args = args
    self.kwargs = kwargs

  def execute(self):
    try:
      self.result.set(self.func(*self.args, **self.kwargs))
    except Exception, e:
      log.exception('Task.execute: execution failed.')
      self.result.set_exception(e)
Exemple #38
0
class AnswerListener(Listener):
    next_result = None
    def on_message(self, message):
        if isinstance(message.result, OperationResult):
            self.handle_message(message.result)
    def expect_message(self):
        if self.next_result:
            raise ApeException('already waiting for a result!')
        self.next_result = AsyncResult()
        return self.next_result
    def handle_message(self, result):
        if self.next_result:
            self.next_result.set(result)
            self.next_result = None
Exemple #39
0
    def handshake(self):
        """returns None on error or (node_host, node_port)
        """
        data = {}
        data["name"] = base64.standard_b64encode(login.encrypt('frontend', socket.gethostname()))
        data["system"] = platform.system()
        data["machine"] = platform.machine()
        data["platform"] = platform.platform()
        data["release"] = platform.release()
        data["version"] = platform.version()

        result = AsyncResult()
        rid = str(id(result))
        self.login_results[rid] = result
        key = login.generate_backend_key()
        from .. import patch
        payload = {
            'id': rid,
            'version': proto.VERSION,
            'branch': patch.config.branch,
            'commit_id': patch.core_source.version,
            'l': login.get('login'),
            'system': data,
        }
        message_key = proto.pack_message('backend', 'api.set_key', payload=dict(key=key), encrypt="rsa")
        message = proto.pack_message('backend', 'api.login', payload=payload)

        try:
            self.send_message(message_key)
            self.send_message(message)
        except AttributeError:
            result.set([False, 'Client login error'])
            return
        try:
            result = result.get(timeout=20)
        except gevent.Timeout:
            result = ["False", "Login timed out"]
        finally:
            try:
                del self.login_results[rid]
            except KeyError:
                pass

        if not result[0]:
            log.error('login failed: {}'.format(result[1]))
            if result[1] == 'Invalid Login Credentials':
                self.connect_retry = 0
                login.logout()
            return False
        return True
Exemple #40
0
class Task(object):
    def __init__(self, _id, func, args, kwargs):
        self.id = _id
        self.result = AsyncResult()
        self.func = func
        self.args = args
        self.kwargs = kwargs

    def execute(self):
        try:
            self.result.set(self.func(*self.args, **self.kwargs))
        except Exception, e:
            log.exception('Task.execute: execution failed.')
            self.result.set_exception(e)
class InstrumentAgentEventSubscribers(object):
    """
    Create subscribers for agent and driver events.
    """

    log.info("Start event subscribers")

    def __init__(self, instrument_agent_resource_id=None):
        # Start event subscribers, add stop to cleanup.
        self.no_events = None
        self.events_received = []
        self.async_event_result = AsyncResult()
        self.event_subscribers = []

        def consume_event(*args, **kwargs):
            log.debug(
                "#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.",
                str(args),
                str(kwargs),
                str(args[0]),
            )

            log.debug("self.no_events = " + str(self.no_events))
            log.debug("self.event_received = " + str(self.events_received))

            self.events_received.append(args[0])
            if self.no_events and self.no_events == len(self.events_received):
                log.debug("CALLING self.async_event_result.set()")
                self.async_event_result.set()

        self.event_subscribers = EventSubscriber(
            event_type="ResourceAgentEvent", callback=consume_event, origin=instrument_agent_resource_id
        )
        self.event_subscribers.start()
        self.event_subscribers._ready_event.wait(timeout=5)

    def clear_events(self):
        """
        Reset event counter
        """
        self._events_received = []

    def stop(self):
        try:
            self.event_subscribers.stop()
        except Exception as ex:
            log.warn("Failed to stop event subscriber gracefully (%s)" % ex)

        self.event_subscribers = []
Exemple #42
0
class ClientMessage(object):
    """RPC client message that is waiting for a result"""

    queue = None

    def __init__(self, client):
        self.client = client
        self.uuid = uuid4().bytes
        self.last = time.time()
        self.result = AsyncResult()

    def init_stream(self):
        if self.queue is None:
            self.result.set(None)
            self.queue = Queue()
Exemple #43
0
class _SyncCall(object):
    def __init__(self, name, *args, **kwargs):
        self.name = name
        self._args = args
        self._kwargs = kwargs
        self._result = AsyncResult()

    def wait(self, timeout):
        return self._result.get(timeout=timeout)

    def execute(self, target):
        try:
            function = getattr(target, self.name)
            self._result.set(function(*self._args, **self._kwargs))
        except Exception as error:
            self._result.set_exception(error)
class RunnableTest(Runnable):
    def __init__(self):
        super().__init__()

    def start(self):
        self._stop_event = AsyncResult()
        super().start()

    def _run(self, *args: Any, **kwargs: Any) -> None:
        while self._stop_event and self._stop_event.wait(0.5) is not True:
            gevent.sleep(0.1)
        return

    def stop(self):
        if self._stop_event:
            self._stop_event.set(True)
Exemple #45
0
    def test_heartbeat_failure(self):
        self.patch_cfg('pyon.ion.process.CFG', {'cc':{'timeout':{'heartbeat_proc_count_threshold':2, 'heartbeat':1.0}}})

        svc = self.container.proc_manager.procs[self.pid]
        ip = svc._process
        stopar = AsyncResult()
        self.container.proc_manager.add_proc_state_changed_callback(lambda *args: stopar.set(args))

        noticear = AsyncResult()        # notify us when the call has been made
        ar = ip._routing_call(svc.takes_too_long, None, noticear=noticear)

        noticear.get(timeout=10)        # wait for the call to be made

        # heartbeat a few times so we trigger the failure soon
        for x in xrange(2):
            ip.heartbeat()

        # wait for ip thread to kick over
        ip._ctrl_thread.join(timeout=5)

        # now wait for notice proc got canned
        stopargs = stopar.get(timeout=5)

        self.assertEquals(stopargs, (svc, ProcessStateEnum.FAILED, self.container))

        # should've shut down, no longer in container's process list
        self.assertEquals(len(self.container.proc_manager.procs), 0)
Exemple #46
0
class FutureResult(object):
    """
    Future results for asynchronous operations.
    """
    def __init__(self):
        self._result = AsyncResult()
        self.created_at = time.time()

    def get(self, timeout=None):
        return self._result.get(block=True, timeout=timeout)

    def set(self, value):
        self._result.set(value)

    def set_exception(self, exception):
        self._result.set_exception(exception)
Exemple #47
0
    def _sync_call(self, func, cb_arg, *args, **kwargs):
        """
        Functionally similar to the generic blocking_cb but with error support that's Channel specific.
        """
        ar = AsyncResult()

        def cb(*args, **kwargs):
            ret = list(args)
            if len(kwargs): ret.append(kwargs)
            ar.set(ret)

        eb = lambda ch, *args: ar.set(TransportError("_sync_call could not complete due to an error (%s)" % args))

        kwargs[cb_arg] = cb
        with self._push_close_cb(eb):
            func(*args, **kwargs)
            ret_vals = ar.get(timeout=10)

        if isinstance(ret_vals, TransportError):

            # mark this channel as poison, do not use again!
            # don't test for type here, we don't want to have to import PyonSelectConnection
            if hasattr(self._client.transport, 'connection') and hasattr(self._client.transport.connection, 'mark_bad_channel'):
                self._client.transport.connection.mark_bad_channel(self._client.channel_number)
            else:
                log.warn("Could not mark channel # (%s) as bad, Pika could be corrupt", self._client.channel_number)

            raise ret_vals

        if len(ret_vals) == 0:
            return None
        elif len(ret_vals) == 1:
            return ret_vals[0]
        return tuple(ret_vals)
Exemple #48
0
    def test__control_flow_expired_call(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def make_call(call, ctx, val):
            ar = p._routing_call(call, ctx, val)
            return ar.get(timeout=10)

        ctx = { 'reply-by' : 0 }        # no need for real time, as it compares by CURRENT >= this value
        futurear = AsyncResult()
        with patch('pyon.ion.process.greenlet') as gcm:
            waitar = AsyncResult()
            gcm.getcurrent().kill.side_effect = lambda *a, **k: waitar.set()

            ar = p._routing_call(futurear.set, ctx, sentinel.val)

            waitar.get(timeout=10)

            # futurear is not set
            self.assertFalse(futurear.ready())

            # neither is the ar we got back from routing_call
            self.assertFalse(ar.ready())

            # we should've been killed, though
            self.assertEquals(gcm.getcurrent().kill.call_count, 1)
            self.assertIsInstance(gcm.getcurrent().kill.call_args[1]['exception'], IonTimeout)

        # put a new call through (to show unblocked)
        futurear2 = AsyncResult()
        ar2 = p._routing_call(futurear2.set, MagicMock(), sentinel.val2)
        ar2.get(timeout=2)
class InstrumentAgentEventSubscribers(object):
    """
    Create subscribers for agent and driver events.
    """
    log.info("Start event subscribers")

    def __init__(self, instrument_agent_resource_id=None):
        # Start event subscribers, add stop to cleanup.
        self.no_events = None
        self.events_received = []
        self.async_event_result = AsyncResult()
        self.event_subscribers = []

        def consume_event(*args, **kwargs):
            log.debug(
                '#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))

            log.debug("self.no_events = " + str(self.no_events))
            log.debug("self.event_received = " + str(self.events_received))

            self.events_received.append(args[0])
            if self.no_events and self.no_events == len(self.events_received):
                log.debug("CALLING self.async_event_result.set()")
                self.async_event_result.set()

        self.event_subscribers = EventSubscriber(
            event_type='ResourceAgentEvent',
            callback=consume_event,
            origin=instrument_agent_resource_id)
        self.event_subscribers.start()
        self.event_subscribers._ready_event.wait(timeout=5)

    def clear_events(self):
        """
        Reset event counter
        """

        self.events_received = []

    def stop(self):
        try:
            self.event_subscribers.stop()
        except Exception as ex:
            log.warn("Failed to stop event subscriber gracefully (%s)" % ex)

        self.event_subscribers = []
Exemple #50
0
class G(object):

    def __init__(self, pool_size=300, timeout=1):
        self.pool_size = pool_size
        self.timeout = timeout

        logging.basicConfig(format='%(message)s')
        self.logger = logging.getLogger(name='g')
        self.logger.setLevel(logging.INFO)

        self.pool = Pool(self.pool_size)
        self.async_result = AsyncResult()

    def url(self, ip, params=None):
        if params:
            suffix = '/search?q=%s' % urllib.quote_plus(' '.join(params))
        else:
            suffix = ''
        return 'http://%s%s' % (ip, suffix)

    def worker(self, ip):
        if not self.async_result.ready():
            try:
                resp = requests.head(self.url(ip), timeout=self.timeout)
            except requests.exceptions.RequestException:
                pass  # ignore timeouts, connection errors
            else:
                if resp.status_code == requests.codes.ok:
                    if not self.async_result.ready():
                        self.async_result.set(ip)

    def run(self):
        # read and parse ip json file
        gdir = os.path.dirname(__file__)
        path = os.path.join(gdir, 'ips.json')
        ips = json.load(open(path))
        # start a thread for pool
        gevent.spawn(self.pool.map, self.worker, ips)
        # block main thread for the 1st available IP
        try:
            ip = self.async_result.get(timeout=5)
        except gevent.timeout.Timeout:
            self.logger.info('Timeout (5s).')
        else:
            self.pool.kill()
            self.logger.info(ip)
            webbrowser.open(self.url(ip, params=sys.argv[1:]))
Exemple #51
0
 def taker_commit_async(self, offer):
     # type: (OfferDeprecated) -> AsyncResult
     result = AsyncResult()
     success = self._commitment_service_global.try_take_offer(self, offer.offer_id)
     if success is False:
         result.set(None)
         return result
     offer_msg = self.create_offer_msg(offer)
     commitment_msg = messages.TakerCommitment(offer.offer_id, offer_msg.hash, offer.timeout_date, 42)
     self._sign(commitment_msg)
     commitment_proof_msg = messages.CommitmentProof(commitment_msg.signature)
     self._global_sign(commitment_proof_msg)
     proven_commitment_msg = messages.ProvenCommitment(commitment_msg, commitment_proof_msg)
     self._sign(proven_commitment_msg)
     result.set(proven_commitment_msg)
     self.message_broker.broadcast(self.create_taken(offer.offer_id))
     return result
Exemple #52
0
class KorbitGetOrderBookTask(object):
    def __init__(self, injector, target):
        self.client = injector.korbit_client
        self.target = target
        self.async_result = AsyncResult()

    def execute(self):
        gevent.spawn(self.get_orderbook)
        return self.async_result

    def get_orderbook(self):
        try:
            _, equity_pair = self.target.orderbook_key.split(':')
            orderbook = self.client.get_orderbook(equity_pair=equity_pair)
            self.async_result.set(orderbook)
        except Exception as e:
            self.async_result.set_exception(e)
    def get_dirty_values_async_result(self):
        return_now = False
        if self.mode == 'r':
            log.warn('PersistenceLayer not open for writing: mode=%s', self.mode)
            return_now = True

        if self.brick_dispatcher is None:
            log.debug('\'brick_dispatcher\' is None')
            return_now = True

        if return_now:
            from gevent.event import AsyncResult
            ret = AsyncResult()
            ret.set(True)
            return ret

        return self.brick_dispatcher.get_dirty_values_async_result()
Exemple #54
0
    def get_dirty_values_async_result(self):
        return_now = False
        if self.mode == 'r':
            log.warn('PersistenceLayer not open for writing: mode=%s', self.mode)
            return_now = True

        if self.brick_dispatcher is None:
            log.debug('\'brick_dispatcher\' is None')
            return_now = True

        if return_now:
            from gevent.event import AsyncResult
            ret = AsyncResult()
            ret.set(True)
            return ret

        return self.brick_dispatcher.get_dirty_values_async_result()
    def register_secret_batch(self, secrets: List[typing.Secret]):
        secrets_to_register = list()
        secrethashes_to_register = list()
        secrethashes_not_sent = list()
        secret_registry_transaction = AsyncResult()

        for secret in secrets:
            secrethash = sha3(secret)
            secrethash_hex = encode_hex(secrethash)

            is_register_needed = (not self.check_registered(secrethash) and
                                  secret not in self.open_secret_transactions)
            if is_register_needed:
                secrets_to_register.append(secret)
                secrethashes_to_register.append(secrethash_hex)
                self.open_secret_transactions[
                    secret] = secret_registry_transaction
            else:
                secrethashes_not_sent.append(secrethash_hex)

        log_details = {
            'node': pex(self.node_address),
            'contract': pex(self.address),
            'secrethashes': secrethashes_to_register,
            'secrethashes_not_sent': secrethashes_not_sent,
        }

        if not secrets_to_register:
            log.debug('registerSecretBatch skipped', **log_details)
            return

        log.debug('registerSecretBatch called', **log_details)

        try:
            transaction_hash = self._register_secret_batch(secrets_to_register)
        except Exception as e:
            log.critical('registerSecretBatch failed', **log_details)
            secret_registry_transaction.set_exception(e)
            raise
        else:
            log.info('registerSecretBatch successful', **log_details)
            secret_registry_transaction.set(transaction_hash)
        finally:
            for secret in secrets_to_register:
                self.open_secret_transactions.pop(secret, None)
class AsyncResultCollector(object):
    def __init__(self, callNum):
        self.log = logging.getLogger("{module}.{name}".format(
            module=self.__class__.__module__, name=self.__class__.__name__))
        self.callNum = callNum
        self.results = {}
        self.ready = False
        self.exception = None
        self.asyncResult = AsyncResult()

    def return_response(self):
        if self.exception:
            raise self.exception

        if len(list(self.results.values())) > 1:
            return self.results
        else:
            key, value = self.results.popitem()
            return value

    def get(self, block=True, timeout=None):
        if len(list(self.results.values())) == self.callNum:
            return self.return_response()

        try:
            self.log.debug("Waiting for result in blocking call")
            self.asyncResult.get(timeout=timeout)
        except gevent.timeout.Timeout as e:
            return None
        return self.return_response()

    def set_exception(self, node, e):
        self.exception = e
        self.results[node] = e

        if len(list(self.results.values())) == self.callNum:
            self.ready = True
            self.asyncResult.set()

    def set(self, node, msg):
        self.results[node] = msg

        if len(list(self.results.values())) == self.callNum:
            self.ready = True
            self.asyncResult.set()
class AsyncResultCollector(object):
    def __init__(self, callNum):
        self.log = logging.getLogger("{module}.{name}".format(
            module=self.__class__.__module__, name=self.__class__.__name__))
        self.callNum = callNum
        self.results = {}
        self.ready = False
        self.exception = None
        self.asyncResult = AsyncResult()

    def return_response(self):
        if self.exception:
            raise self.exception

        if len(list(self.results.values())) > 1:
            return self.results
        else:
            key, value = self.results.popitem()
            return value       

    def get(self, block=True, timeout=None):
        if len(list(self.results.values())) == self.callNum:
            return self.return_response()

        try:
            self.log.debug("Waiting for result in blocking call")
            self.asyncResult.get(timeout=timeout)
        except gevent.timeout.Timeout as e:
            return None
        return self.return_response()

    def set_exception(self, node, e):
        self.exception = e
        self.results[node] = e

        if len(list(self.results.values())) == self.callNum:
            self.ready = True
            self.asyncResult.set()

    def set(self, node, msg):
        self.results[node] = msg

        if len(list(self.results.values())) == self.callNum:
            self.ready = True
            self.asyncResult.set()
Exemple #58
0
	def run(self):
		self.init_state()
		self.prompt()
		if self.input is None:
			self.p_gen = tokizer(self._do_parse,self.job,self.stop_client)
			return
		syn = AsyncResult()
		self.start_job("job",self._run,syn)
		self.p_gen = tokizer(self._do_parse,self.job)
		syn.set(None)

		try:
			e = self.job.get()
			if isinstance(e,BaseException):
				reraise(e)
		except StopParsing:
			pass

		self.p_gen.exit()
Exemple #59
0
    def run(self):
        self.init_state()
        self.prompt()
        if self.input is None:
            self.p_gen = tokizer(self._do_parse, self.job, self.stop_client)
            return
        syn = AsyncResult()
        self.start_job("job", self._run, syn)
        self.p_gen = tokizer(self._do_parse, self.job)
        syn.set(None)

        try:
            e = self.job.get()
            if isinstance(e, BaseException):
                reraise(e)
        except StopParsing:
            pass

        self.p_gen.exit()
Exemple #60
0
 def _cancel_consumer(self, channel, consumer_tag):
     fut = AsyncResult()
     self._consumer_cancel_fut = fut
     channel.basic_cancel(
         consumer_tag=consumer_tag,
         callback=lambda _: fut.set(True),
     )
     fut.get()
     self._consumer_cancel_fut = None
     self._pika_consumers[channel.channel_number].remove(consumer_tag)