Example #1
0
class LogListenerTask(Task):
    def __init__(self, filter_, callback, contract_translator):
        super(LogListenerTask, self).__init__()

        self.filter_ = filter_
        self.callback = callback
        self.contract_translator = contract_translator

        self.stop_event = AsyncResult()
        self.sleep_time = 0.5

    def _run(self):  # pylint: disable=method-hidden
        stop = None

        while stop is None:
            filter_changes = self.filter_.changes()

            for log_event in filter_changes:
                event = self.contract_translator.decode_event(
                    log_event['topics'],
                    log_event['data'],
                )

                if event is not None:
                    originating_contract = log_event['address']
                    self.callback(originating_contract, event)

            stop = self.stop_event.wait(self.sleep_time)

    def stop(self):
        self.stop_event.set(True)
    def setUp(self):
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.RR   = ResourceRegistryServiceClient(node=self.container.node)
        self.IMS  = InstrumentManagementServiceClient(node=self.container.node)
        self.DAMS = DataAcquisitionManagementServiceClient(node=self.container.node)
        self.DP   = DataProductManagementServiceClient(node=self.container.node)
        self.PSC  = PubsubManagementServiceClient(node=self.container.node)
        self.PDC  = ProcessDispatcherServiceClient(node=self.container.node)
        self.DSC  = DatasetManagementServiceClient()
        self.IDS  = IdentityManagementServiceClient(node=self.container.node)
        self.RR2  = EnhancedResourceRegistryClient(self.RR)


        # Use the network definition provided by RSN OMS directly.
        rsn_oms = CIOMSClientFactory.create_instance(DVR_CONFIG['oms_uri'])
        self._network_definition = RsnOmsUtil.build_network_definition(rsn_oms)
        # get serialized version for the configuration:
        self._network_definition_ser = NetworkUtil.serialize_network_definition(self._network_definition)
        if log.isEnabledFor(logging.TRACE):
            log.trace("NetworkDefinition serialization:\n%s", self._network_definition_ser)


        self._async_data_result = AsyncResult()
        self._data_subscribers = []
        self._samples_received = []
        self.addCleanup(self._stop_data_subscribers)

        self._async_event_result = AsyncResult()
        self._event_subscribers = []
        self._events_received = []
        self.addCleanup(self._stop_event_subscribers)
        self._start_event_subscriber()
Example #3
0
    def send_async(
        self,
        receiver_address: Address,
        queue_name: bytes,
        message: Message,
    ):
        if not self._running:
            return
        self.log.info(
            'SEND ASYNC',
            receiver_address=to_normalized_address(receiver_address),
            message=message,
            queue_name=queue_name,
        )
        if not is_binary_address(receiver_address):
            raise ValueError('Invalid address {}'.format(pex(receiver_address)))

        # These are not protocol messages, but transport specific messages
        if isinstance(message, (Delivered, Ping, Pong)):
            raise ValueError(
                'Do not use send_async for {} messages'.format(message.__class__.__name__),
            )

        message_id = message.message_identifier
        async_result = AsyncResult()
        if isinstance(message, Processed):
            async_result.set(True)  # processed messages shouldn't get a Delivered reply
            self._send_immediate(receiver_address, json.dumps(message.to_dict()))
        else:
            self._messageids_to_asyncresult[message_id] = async_result
            self._send_with_retry(receiver_address, async_result, json.dumps(message.to_dict()))
Example #4
0
 def request(self, service_name, data=""):
     """
     这里将数据写到缓冲队列,然后让当前请求协程在event上等待
     """
     if self._is_closed:
         logging.error("request in a close cliet")
         raise Exception("connection closed")
     rid = self.rid
     req = (service_name, data, rid)
     req_data = cp_dumps(req)
     req_data_len = len(req_data)
     head = s_pack("i", req_data_len)
     self._send_data.extend((head, req_data))
     if not self._write_open:
         self._write()                                          # 立即尝试写一下,可以降低延迟
         self._start_write()
     event = AsyncResult()
     self._events[rid] = event
     try:
         out = event.get(timeout=DEFAULT_REQUEST_TIMEOUT)       # 在这个event上面等待,阻塞当前协程
         if self._is_closed:
             raise Exception("connection disconnected")
         return out
     finally:
         del self._events[rid]
 def test_result_cb_error(self):
     result = AsyncResult()
     DNSResolver._result_cb(result, 13, pycares.errno.ARES_ENOTFOUND)
     with self.assertRaises(DNSError) as cm:
         result.get()
     self.assertEqual('Domain name not found [ARES_ENOTFOUND]',
                      str(cm.exception))
Example #6
0
def mediated_transfer(initiator_app, target_app, asset, amount, identifier=None):
    """ Nice to read shortcut to make a MediatedTransfer.

    The secret will be revealed and the apps will be synchronized.
    """
    # pylint: disable=too-many-arguments

    assetmanager = initiator_app.raiden.managers_by_asset_address[asset]
    has_channel = initiator_app.raiden.address in assetmanager.partneraddress_channel

    # api.transfer() would do a DirectTransfer
    if has_channel:
        transfermanager = assetmanager.transfermanager
        # Explicitly call the default identifier creation since this mock
        # function here completely skips the `transfer_async()` call.
        if not identifier:
            identifier = transfermanager.create_default_identifier(target_app.raiden.address)

        result = AsyncResult()
        task = StartMediatedTransferTask(
            transfermanager,
            amount,
            identifier,
            target_app.raiden.address,
            result,
        )
        task.start()
        result.wait()
    else:
        initiator_app.raiden.api.transfer(
            asset,
            amount,
            target_app.raiden.address,
            identifier
        )
Example #7
0
    def request_many(self, reqs):
        """
        salvo simultaneous query many service and get result
        """
        local_quests = {}
        idx = 0
        while idx < len(reqs):
            req = reqs[idx]
            ar = AsyncResult()
            qid = self.next_qid()
            self.emit_query(qid, *req)
            self._query_list[qid] = ar
            local_quests[qid] = (idx, ar)
            idx += 1

        for qid, it in local_quests.items():
            idx, ar = it
            """
            async result will always get a response even if other query
            reply soon

            """
            msg = ar.get()
            if msg:
                if msg.status:
                    yield ProxyError(qid, msg.status, msg.params), idx
                else:
                    yield msg.data, idx
class InstrumentAgentEventSubscribers(object):
    """
    Create subscribers for agent and driver events.
    """
    log.info("Start event subscribers")
    def __init__(self, instrument_agent_resource_id = None):
        # Start event subscribers, add stop to cleanup.
        self.no_events = None
        self.events_received = []
        self.async_event_result = AsyncResult()
        self.event_subscribers = []

        def consume_event(*args, **kwargs):
            log.debug('#**#**# Event subscriber (consume_event) recieved ION event: args=%s, kwargs=%s, event=%s.',
                str(args), str(kwargs), str(args[0]))

            log.debug("self.no_events = " + str(self.no_events))
            log.debug("self.event_received = " + str(self.events_received))

            self.events_received.append(args[0])
            if self.no_events and self.no_events == len(self.events_received):
                log.debug("CALLING self.async_event_result.set()")
                self.async_event_result.set()


        self.event_subscribers = EventSubscriber(
            event_type='ResourceAgentEvent', callback=consume_event,
            origin=instrument_agent_resource_id)
        self.event_subscribers.start()
        self.event_subscribers._ready_event.wait(timeout=5)
Example #9
0
    def __init__(self, rpc_destination, s_stomp, timeout=3600,
                 init_timeout=None):
        """
        Initialize an RpcProxy with a remote RpcProvider at rpc_destination
        through a SynchronousStomp object (s_stomp). If timeout is not None
        (default one hour) if any conversation blocks for that amount of time
        before receiving a reply, the call will raise a gevent.Timeout.
        """
        self._rpc_destination = rpc_destination
        self._s_stomp = s_stomp
        self._services = {}
        self._class = None
        self._timeout = timeout

        # Initialize the alternate proxy table
        self_result = AsyncResult()
        self_result.set(self)
        self._timeout_proxies = {timeout: self_result}

        # n.b. After this point, no new attributes may be added
        conv = self._make_conversation()
        msg = self._build_rpc_request('.list_services')
        # This will use self._timeout if init_timeout is None
        reply = conv.reply(msg, timeout=init_timeout)

        # Store the new representation
        self._services = reply['services']
        self._docs = reply['services_doc']
        self._class = reply.get('name', None)
Example #10
0
def actor_exec(node, fn, *args, **kwargs):
    class ExecActor(Actor):
        def run(self):
            ret.set(fn(*args, **kwargs))
    ret = AsyncResult()
    node.spawn(ExecActor)
    return ret.get()
    def setUp(self):
        """
        Initialize test members.
        Start port agent.
        Start container and client.
        Start streams and subscribers.
        Start agent, client.
        """

        TrhphTestCase.setUp(self)

        # Start port agent, add stop to cleanup.
        self._pagent = None
        self._start_pagent()
        self.addCleanup(self._stop_pagent)

        # Start container.
        self._start_container()

        # Bring up services in a deploy file (no need to message)
        self.container.start_rel_from_url("res/deploy/r2dm.yml")

        # Start data suscribers, add stop to cleanup.
        # Define stream_config.
        self._no_samples = None
        self._async_data_result = AsyncResult()
        self._data_greenlets = []
        self._stream_config = {}
        self._samples_received = []
        self._data_subscribers = []
        self._start_data_subscribers()
        self.addCleanup(self._stop_data_subscribers)

        # Start event subscribers, add stop to cleanup.
        self._no_events = None
        self._async_event_result = AsyncResult()
        self._events_received = []
        self._event_subscribers = []
        self._start_event_subscribers()
        self.addCleanup(self._stop_event_subscribers)

        # Create agent config.
        agent_config = {
            "driver_config": DVR_CONFIG,
            "stream_config": self._stream_config,
            "agent": {"resource_id": IA_RESOURCE_ID},
            "test_mode": True,
        }

        # Start instrument agent.
        self._ia_pid = None
        log.debug("TestInstrumentAgent.setup(): starting IA.")
        container_client = ContainerAgentClient(node=self.container.node, name=self.container.name)
        self._ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config)
        log.info("Agent pid=%s.", str(self._ia_pid))

        # Start a resource agent client to talk with the instrument agent.
        self._ia_client = None
        self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess())
        log.info("Got ia client %s.", str(self._ia_client))
Example #12
0
def fake_async(obj):
    """
    For mocking RPC functions which will be called async
    """
    asr = AsyncResult()
    asr.set(obj)
    return asr
Example #13
0
        def rpc(request=None, conn=None, connections=None, timeout=None,
                **kwargs):
            request = request or request_class(**kwargs)

            meta = self.meta
            meta.Clear()
            meta.service_method = service_method
            meta.packet_type = packet_type
            if connections is not None:
                for conn in connections:
                    conn.send(conn.pack_meta(meta, request))
            else:
                conn = conn or self.conn or \
                    self.connection_pool.get_connection()
                assert conn, conn
                if require_response:
                    meta.transmission_id = conn.transmission_id
                conn.transmission_id += 1
                conn.send(conn.pack_meta(meta, request))

                if hasattr(conn, 'release'):
                    conn.release()
                if not require_response:
                    return

                async_result = AsyncResult()
                conn.transmissions[meta.transmission_id] = async_result
                return async_result.get(timeout=timeout or self.timeout)
Example #14
0
    def multipublish(self, topic, messages, block=True, timeout=None,
                     raise_error=True):
        """Publish an iterable of messages to the given topic.

        :param topic: the topic to publish to

        :param messages: iterable of bytestrings to publish

        :param block: wait for a connection to become available before
            publishing the message. If block is `False` and no connections
            are available, :class:`~gnsq.errors.NSQNoConnections` is raised

        :param timeout: if timeout is a positive number, it blocks at most
            ``timeout`` seconds before raising
            :class:`~gnsq.errors.NSQNoConnections`

        :param raise_error: if ``True``, it blocks until a response is received
            from the nsqd server, and any error response is raised. Otherwise
            an :class:`~gevent.event.AsyncResult` is returned
        """
        result = AsyncResult()
        conn = self._get_connection(block=block, timeout=timeout)

        try:
            self._response_queues[conn].append(result)
            conn.multipublish(topic, messages)
        finally:
            self._put_connection(conn)

        if raise_error:
            return result.get()

        return result
 def test_run_multiple(self):
     result1 = AsyncResult()
     result2 = AsyncResult()
     env1 = Envelope('*****@*****.**', ['*****@*****.**'])
     env1.parse(b'From: [email protected]\r\n\r\ntest test\r\n')
     env2 = Envelope('*****@*****.**', ['*****@*****.**'])
     env2.parse(b'From: [email protected]\r\n\r\ntest test\r\n')
     queue = BlockingDeque()
     queue.append((result1, env1))
     queue.append((result2, env2))
     self.sock.recv(IsA(int)).AndReturn(b'220 Welcome\r\n')
     self.sock.sendall(b'EHLO test\r\n')
     self.sock.recv(IsA(int)).AndReturn(b'250-Hello\r\n250 PIPELINING\r\n')
     self.sock.sendall(b'MAIL FROM:<*****@*****.**>\r\nRCPT TO:<*****@*****.**>\r\nDATA\r\n')
     self.sock.recv(IsA(int)).AndReturn(b'250 Ok\r\n250 Ok\r\n354 Go ahead\r\n')
     self.sock.sendall(b'From: [email protected]\r\n\r\ntest test\r\n.\r\n')
     self.sock.recv(IsA(int)).AndReturn(b'250 Ok\r\n')
     self.sock.sendall(b'MAIL FROM:<*****@*****.**>\r\nRCPT TO:<*****@*****.**>\r\nDATA\r\n')
     self.sock.recv(IsA(int)).AndReturn(b'250 Ok\r\n250 Ok\r\n354 Go ahead\r\n')
     self.sock.sendall(b'From: [email protected]\r\n\r\ntest test\r\n.\r\n')
     self.sock.recv(IsA(int)).AndReturn(b'250 Ok\r\n')
     self.sock.sendall(b'QUIT\r\n')
     self.sock.recv(IsA(int)).AndReturn(b'221 Goodbye\r\n')
     self.sock.close()
     self.mox.ReplayAll()
     client = SmtpRelayClient('addr', queue, socket_creator=self._socket_creator, ehlo_as='test', idle_timeout=0.0)
     client._run()
     self.assertEqual({'*****@*****.**': Reply('250', 'Ok')}, result1.get_nowait())
     self.assertEqual({'*****@*****.**': Reply('250', 'Ok')}, result2.get_nowait())
    def wait_event(self, event, timeout=None, raises=False):
        """
        Blocks until an event and returns the results

        :param event: event identifier
        :param timeout: (optional)(default:None) seconds to wait
        :type timeout: :class:`int`
        :param raises: (optional)(default:False) On timeout if ``False`` return ``None``, else raise ``gevent.Timeout``
        :type raises: :class:`bool`
        :return: returns event arguments in tuple
        :rtype: :class:`None`, or :class:`tuple`
        :raises: ``gevent.Timeout``

        Handling timeout

        .. code:: python

            args = ee.wait_event('my event', timeout=5)
            if args is None:
                print "Timeout!"

        """
        result = AsyncResult()
        self.once(event, result)

        try:
            return result.get(True, timeout)
        except gevent.Timeout:
            self.remove_listener(event, result)

            if raises:
                raise
            else:
                return None
Example #17
0
    def test_heartbeat_listener_dead(self):
        mocklistener = Mock(spec=ProcessRPCServer)
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[mocklistener], service=svc)
        readyev = Event()
        readyev.set()
        mocklistener.get_ready_event.return_value = readyev

        def fake_listen(evout, evin):
            evout.set(True)
            evin.wait()

        listenoutev = AsyncResult()
        listeninev = Event()

        p.start()
        p.get_ready_event().wait(timeout=5)
        p.start_listeners()

        listenoutev.wait(timeout=5)         # wait for listen loop to start

        self.addCleanup(listeninev.set)     # makes listen loop fall out on shutdown
        self.addCleanup(p.stop)

        listeninev.set()                    # stop the listen loop
        p.thread_manager.children[1].join(timeout=5)        # wait for listen loop to terminate

        hb = p.heartbeat()

        self.assertEquals((False, True, True), hb)
        self.assertEquals(0, p._heartbeat_count)
        self.assertIsNone(p._heartbeat_op)
Example #18
0
    def test_heartbeat_current_op_over_limit(self):
        self.patch_cfg('pyon.ion.process.CFG', {'container':{'timeout':{'heartbeat_proc_count_threshold':2}}})

        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        p._ctrl_thread.ev_exit.set()            # prevent heartbeat loop in proc's target

        def fake_op(evout, evin):
            evout.set(True)
            evin.wait()

        listenoutev = AsyncResult()
        listeninev = Event()

        self.addCleanup(listeninev.set)     # allow graceful termination
        self.addCleanup(p.stop)

        ar = p._routing_call(fake_op, None, listenoutev, listeninev)

        listenoutev.wait(timeout=5)         # wait for ctrl thread to run our op

        # make sure it's over the threshold
        for x in xrange(3):
            hb = p.heartbeat()

        self.assertEquals((True, True, False), hb)
Example #19
0
    def test_heartbeat_with_current_op_multiple_times(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        p._ctrl_thread.ev_exit.set()            # prevent heartbeat loop in proc's target

        def fake_op(evout, evin):
            evout.set(True)
            evin.wait()

        listenoutev = AsyncResult()
        listeninev = Event()

        self.addCleanup(listeninev.set)     # allow graceful termination
        self.addCleanup(p.stop)

        ar = p._routing_call(fake_op, None, listenoutev, listeninev)

        listenoutev.wait(timeout=5)         # wait for ctrl thread to run our op

        for x in xrange(5):
            hb = p.heartbeat()

        self.assertEquals((True, True, True), hb)
        self.assertEquals(5, p._heartbeat_count)
        self.assertEquals(ar, p._heartbeat_op)
Example #20
0
    def test_unknown_error(self):

        # Unhandled exceptions get handled and then converted to ContainerErrors
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise self.ExpectedError("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, ContainerError)
        self.assertEquals(len(p._errors), 1)
Example #21
0
    def test_heartbeat_with_listeners(self):
        mocklistener = Mock(spec=ProcessRPCServer)
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[mocklistener], service=svc)
        readyev = Event()
        readyev.set()
        mocklistener.get_ready_event.return_value = readyev

        def fake_listen(evout, evin):
            evout.set(True)
            evin.wait()

        listenoutev = AsyncResult()
        listeninev = Event()

        mocklistener.listen = lambda *a, **kw: fake_listen(listenoutev, listeninev)

        p.start()
        p.get_ready_event().wait(timeout=5)
        p.start_listeners()

        listenoutev.wait(timeout=5)         # wait for listen loop to start

        self.addCleanup(listeninev.set)     # makes listen loop fall out on shutdown
        self.addCleanup(p.stop)

        # now test heartbeat!
        hb = p.heartbeat()

        self.assertEquals((True, True, True), hb)
        self.assertEquals(0, p._heartbeat_count)
        self.assertIsNone(p._heartbeat_op)
    def test_quit_stops_timers(self):

        ar = AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

            self.interval_timer_count += 1

        event_origin = "test_quitter"
        sub = EventSubscriber(event_type="TimerEvent", callback=cb, origin=event_origin)
        sub.start()
        self.addCleanup(sub.stop)

        tid = self.ssclient.create_interval_timer(start_time="now",
                                                  interval=1,
                                                  event_origin=event_origin)

        # wait until at least one scheduled message
        ar.get(timeout=5)

        # shut it down!
        p = self.container.proc_manager.procs_by_name['scheduler']
        self.container.terminate_process(p.id)

        # assert empty
        self.assertEquals(p.schedule_entries, {})
Example #23
0
    def test_known_error(self):

        # IonExceptions and TypeErrors get forwarded back intact
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise NotFound("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, NotFound)
Example #24
0
    def test_zero_max_size(self):
        q = queue.Channel()

        def sender(evt, q):
            q.put('hi')
            evt.set('done')

        def receiver(evt, q):
            x = q.get()
            evt.set(x)

        e1 = AsyncResult()
        e2 = AsyncResult()

        p1 = gevent.spawn(sender, e1, q)
        gevent.sleep(0.001)
        self.assert_(not e1.ready())
        p2 = gevent.spawn(receiver, e2, q)
        self.assertEquals(e2.get(), 'hi')
        self.assertEquals(e1.get(), 'done')
        timeout = gevent.Timeout.start_new(0)
        try:
            gevent.joinall([p1, p2])
        finally:
            timeout.cancel()
  def START(self, datatype, value):
    '''
    Start video method
    '''
    
    # Logger
    logger = logging.getLogger("AceClient_START")
    
    self._result = AsyncResult()
    self._urlresult = AsyncResult()
    
    if datatype.lower() == 'pid':
      self._write(AceMessage.request.START('PID', {'content_id': value}))
    elif datatype.lower() == 'torrent':
      self._write(AceMessage.request.START('TORRENT', {'url': value}))
      
    try:
      if not self._result.get(timeout = self._resulttimeout):
	errmsg = "START error!"
	logger.error(errmsg)
	raise AceException(errmsg)
    except gevent.Timeout:
      errmsg = "START timeout!"
      logger.error(errmsg)
      raise AceException(errmsg)
Example #26
0
    def _sync_call(self, func, cb_arg, *args, **kwargs):
        """
        Functionally similar to the generic blocking_cb but with error support that's Channel specific.
        """
        ar = AsyncResult()

        def cb(*args, **kwargs):
            ret = list(args)
            if len(kwargs): ret.append(kwargs)
            ar.set(ret)

        def eb(ch, code, text):
            ar.set(ChannelError("_sync_call could not complete due to an error (%d): %s" % (code, text)))

        kwargs[cb_arg] = cb
        with self.push_closed_error_callback(eb):
            func(*args, **kwargs)
            ret_vals = ar.get(timeout=10)

        if isinstance(ret_vals, ChannelError):
            raise ret_vals

        if len(ret_vals) == 0:
            return None
        elif len(ret_vals) == 1:
            return ret_vals[0]
        return tuple(ret_vals)
Example #27
0
    def test_two_waiters_one_dies(self):

        def waiter(q, evt):
            evt.set(q.get())

        def do_receive(q, evt):
            timeout = gevent.Timeout.start_new(0, RuntimeError())
            try:
                try:
                    result = q.get()
                    evt.set(result)
                except RuntimeError:
                    evt.set('timed out')
            finally:
                timeout.cancel()

        q = queue.Queue()
        dying_evt = AsyncResult()
        waiting_evt = AsyncResult()
        gevent.spawn(do_receive, q, dying_evt)
        gevent.spawn(waiter, q, waiting_evt)
        gevent.sleep(0.01)
        q.put('hi')
        self.assertEquals(dying_evt.get(), 'timed out')
        self.assertEquals(waiting_evt.get(), 'hi')
Example #28
0
    def _sync_call(self, func, cb_arg, *args, **kwargs):
        """
        Functionally similar to the generic blocking_cb but with error support that's Channel specific.
        """
        ar = AsyncResult()

        def cb(*args, **kwargs):
            ret = list(args)
            if len(kwargs): ret.append(kwargs)
            ar.set(ret)

        eb = lambda ch, *args: ar.set(TransportError("_sync_call could not complete due to an error (%s)" % args))

        kwargs[cb_arg] = cb
        with self._push_close_cb(eb):
            func(*args, **kwargs)
            ret_vals = ar.get(timeout=10)

        if isinstance(ret_vals, TransportError):

            # mark this channel as poison, do not use again!
            # don't test for type here, we don't want to have to import PyonSelectConnection
            if hasattr(self._client.transport, 'connection') and hasattr(self._client.transport.connection, 'mark_bad_channel'):
                self._client.transport.connection.mark_bad_channel(self._client.channel_number)
            else:
                log.warn("Could not mark channel # (%s) as bad, Pika could be corrupt", self._client.channel_number)

            raise ret_vals

        if len(ret_vals) == 0:
            return None
        elif len(ret_vals) == 1:
            return ret_vals[0]
        return tuple(ret_vals)
Example #29
0
class TestAsyncResult(object):
    def __init__(self):
        self.event = AsyncResult()
 
    def run(self):
        producers = [gevent.spawn(self._producer, i) for i in xrange(3)]
        consumers = [gevent.spawn(self._consumer, i) for i in xrange(3)]
        tasks     = []
        tasks.extend(producers)
        tasks.extend(consumers)
        gevent.joinall(tasks)
 
    def _producer(self, pid):
        print("I'm producer %d and now I don't want consume to do something" % (pid,))
        sleeptime = random.randint(5, 10) * 0.01
        print("Sleeping time is %f" % (sleeptime, ))
        gevent.sleep(sleeptime)
        print("I'm producer %d and now consumer could do something." % (pid,))
        self.event.set('producer pid %d' % (pid, ))
        
    def _consumer(self, pid):
        print("I'm consumer %d and now I'm waiting for producer" % (pid,))
        gevent.sleep(random.randint(0, 5) * 0.01)
        value = self.event.wait()
        print("I'm consumer %d. Value is %r and now I can do something" % (pid, value))
Example #30
0
 def call(self, protoname, msg):
     session = self._get_session()
     pack = proto.request(protoname, msg, session)
     ev = AsyncResult()
     self._sessions[session] = ev
     self._send(pack)
     return ev.get()
Example #31
0
import gevent
from gevent.event import AsyncResult

a = AsyncResult()


def setter():
    gevent.sleep(3)
    a.set()


def waiter():
    a.get()
    print 'I live!'


gevent.joinall([
    gevent.spawn(setter),
    gevent.spawn(waiter),
])
Example #32
0
    def token_swap_async(self, identifier, maker_token, maker_amount,
                         maker_address, taker_token, taker_amount,
                         taker_address):
        """ Start a token swap operation by sending a MediatedTransfer with
        `maker_amount` of `maker_token` to `taker_address`. Only proceed when a
        new valid MediatedTransfer is received with `taker_amount` of
        `taker_token`.
        """
        if not isaddress(maker_token):
            raise InvalidAddress(
                'Address for maker token is not in expected binary format in token swap'
            )
        if not isaddress(maker_address):
            raise InvalidAddress(
                'Address for maker is not in expected binary format in token swap'
            )

        if not isaddress(taker_token):
            raise InvalidAddress(
                'Address for taker token is not in expected binary format in token swap'
            )
        if not isaddress(taker_address):
            raise InvalidAddress(
                'Address for taker is not in expected binary format in token swap'
            )

        channelgraphs = self.raiden.token_to_channelgraph

        if taker_token not in channelgraphs:
            log.error('Unknown token {}'.format(pex(taker_token)))
            return

        if maker_token not in channelgraphs:
            log.error('Unknown token {}'.format(pex(maker_token)))
            return

        token_swap = TokenSwap(
            identifier,
            maker_token,
            maker_amount,
            maker_address,
            taker_token,
            taker_amount,
            taker_address,
        )

        async_result = AsyncResult()
        task = MakerTokenSwapTask(
            self.raiden,
            token_swap,
            async_result,
        )
        task.start()

        # the maker is expecting the taker transfer
        key = SwapKey(
            identifier,
            taker_token,
            taker_amount,
        )
        self.raiden.swapkey_to_greenlettask[key] = task
        self.raiden.swapkey_to_tokenswap[key] = token_swap

        return async_result
Example #33
0
class LNLSDiffractometer(GenericDiffractometer):
    """
    Descript. :
    """
    def __init__(self, *args):
        """
        Descript. :
        """
        GenericDiffractometer.__init__(self, *args)

    def init(self):
        """
        Descript. :
        """
        # self.image_width = 100
        # self.image_height = 100

        GenericDiffractometer.init(self)
        # Bzoom: 1.86 um/pixel (or 0.00186 mm/pixel) at minimum zoom
        self.x_calib = 0.00186
        self.y_calib = 0.00186
        self.last_centred_position = [318, 238]

        self.pixels_per_mm_x = 1.0 / self.x_calib
        self.pixels_per_mm_y = 1.0 / self.y_calib
        if "zoom" not in self.motor_hwobj_dict.keys():
            self.motor_hwobj_dict["zoom"] = self.getObjectByRole("zoom")
        calibration_x = self.zoom.getProperty("mm_per_pixel_x")
        calibration_y = self.zoom.getProperty("mm_per_pixel_y")
        self.zoom_calibration_x = ast.literal_eval(calibration_x)
        self.zoom_calibration_y = ast.literal_eval(calibration_y)

        self.beam_position = [318, 238]

        self.current_phase = GenericDiffractometer.PHASE_CENTRING

        self.cancel_centring_methods = {}
        self.current_motor_positions = {
            "phiy": 1.0,
            "sampx": 0.0,
            "sampy": -1.0,
            "zoom": 8.53,
            "focus": -0.42,
            "phiz": 1.1,
            "phi": 311.1,
            "kappa": 11,
            "kappa_phi": 22.0,
        }
        #self.move_motors(self._get_random_centring_position())

        self.current_state_dict = {}
        self.centring_status = {"valid": False}
        self.centring_time = 0

        # self.image_width = 400
        # self.image_height = 400

        self.mount_mode = self.getProperty("sample_mount_mode")
        if self.mount_mode is None:
            self.mount_mode = "manual"

        self.equipment_ready()

        # TODO FFS get this cleared up - one function, one name
        self.getPositions = self.get_positions
        #self.moveMotors = self.move_motors

        self.connect(self.motor_hwobj_dict["phi"], "positionChanged",
                     self.phi_motor_moved)
        self.connect(self.motor_hwobj_dict["phiy"], "positionChanged",
                     self.phiy_motor_moved)
        self.connect(self.motor_hwobj_dict["phiz"], "positionChanged",
                     self.phiz_motor_moved)
        self.connect(self.motor_hwobj_dict["kappa"], "positionChanged",
                     self.kappa_motor_moved)
        self.connect(
            self.motor_hwobj_dict["kappa_phi"],
            "positionChanged",
            self.kappa_phi_motor_moved,
        )
        self.connect(self.motor_hwobj_dict["sampx"], "positionChanged",
                     self.sampx_motor_moved)
        self.connect(self.motor_hwobj_dict["sampy"], "positionChanged",
                     self.sampy_motor_moved)

    def getStatus(self):
        """
        Descript. :
        """
        return "ready"

    def execute_server_task(self, method, timeout=30, *args):
        return

    def in_plate_mode(self):
        return self.mount_mode == "plate"

    def use_sample_changer(self):
        return self.mount_mode == "sample_changer"

    def is_reversing_rotation(self):
        return True

    def get_grid_direction(self):
        """
        Descript. :
        """
        return self.grid_direction

    def manual_centring(self):
        """
        Descript. :
        """
        for click in range(3):
            self.user_clicked_event = AsyncResult()
            x, y = self.user_clicked_event.get()
            if click < 2:
                self.motor_hwobj_dict["phi"].set_value_relative(90)
        self.last_centred_position[0] = x
        self.last_centred_position[1] = y
        centred_pos_dir = self._get_random_centring_position()
        return centred_pos_dir

    def automatic_centring(self):
        """Automatic centring procedure"""
        centred_pos_dir = self._get_random_centring_position()
        self.emit("newAutomaticCentringPoint", centred_pos_dir)
        return centred_pos_dir

    def _get_random_centring_position(self):
        """Get random centring result for current positions"""

        # Names of motors to vary during centring
        vary_motor_names = ("sampx", "sampy", "phiy")

        # Range of random variation
        var_range = 0.08

        # absolute value limit for varied motors
        var_limit = 2.0

        result = self.current_motor_positions.copy()
        for tag in vary_motor_names:
            val = result.get(tag)
            if val is not None:
                random_num = random.random()
                var = (random_num - 0.5) * var_range
                val += var
                if abs(val) > var_limit:
                    val *= 1 - var_range / var_limit
                result[tag] = val
        #
        return result

    def is_ready(self):
        """
        Descript. :
        """
        return True

    def isValid(self):
        """
        Descript. :
        """
        return True

    def invalidate_centring(self):
        """
        Descript. :
        """
        if self.current_centring_procedure is None and self.centring_status[
                "valid"]:
            self.centring_status = {"valid": False}
            # self.emitProgressMessage("")
            self.emit("centringInvalid", ())

    def get_centred_point_from_coord(self, x, y, return_by_names=None):
        """
        Descript. :
        """
        centred_pos_dir = self._get_random_centring_position()
        return centred_pos_dir

    def get_calibration_data(self, offset):
        """
        Descript. :
        """
        # return (1.0 / self.x_calib, 1.0 / self.y_calib)
        return (1.0 / self.x_calib, 1.0 / self.y_calib)

    def refresh_omega_reference_position(self):
        """
        Descript. :
        """
        return

    # def get_omega_axis_position(self):
    #     """
    #     Descript. :
    #     """
    #     return self.current_positions_dict.get("phi")

    def beam_position_changed(self, value):
        """
        Descript. :
        """
        self.beam_position = value

    def get_current_centring_method(self):
        """
        Descript. :
        """
        return self.current_centring_method

    def motor_positions_to_screen(self, centred_positions_dict):
        """
        Descript. :
        """
        return self.last_centred_position[0], self.last_centred_position[1]

    def moveToCentredPosition(self, centred_position, wait=False):
        """
        Descript. :
        """
        try:
            return self.move_to_centred_position(centred_position)
        except BaseException:
            logging.exception("Could not move to centred position")

    def phi_motor_moved(self, pos):
        """
        Descript. :
        """
        self.current_motor_positions["phi"] = pos
        self.emit("phiMotorMoved", pos)

    def phiy_motor_moved(self, pos):
        self.current_motor_positions["phiy"] = pos

    def phiz_motor_moved(self, pos):
        self.current_motor_positions["phiz"] = pos

    def sampx_motor_moved(self, pos):
        self.current_motor_positions["sampx"] = pos

    def sampy_motor_moved(self, pos):
        self.current_motor_positions["sampy"] = pos

    def kappa_motor_moved(self, pos):
        """
        Descript. :
        """
        self.current_motor_positions["kappa"] = pos
        if time.time() - self.centring_time > 1.0:
            self.invalidate_centring()
        self.emit_diffractometer_moved()
        self.emit("kappaMotorMoved", pos)

    def kappa_phi_motor_moved(self, pos):
        """
        Descript. :
        """
        self.current_motor_positions["kappa_phi"] = pos
        if time.time() - self.centring_time > 1.0:
            self.invalidate_centring()
        self.emit_diffractometer_moved()
        self.emit("kappaPhiMotorMoved", pos)

    def refresh_video(self):
        """
        Descript. :
        """
        self.emit("minidiffStateChanged", "testState")
        if HWR.beamline.beam:
            HWR.beamline.beam.beam_pos_hor_changed(300)
            HWR.beamline.beam.beam_pos_ver_changed(200)

    def start_auto_focus(self):
        """
        Descript. :
        """
        return

    def calculate_move_to_beam_pos(self, x, y):
        """
        Descript. : calculate motor positions to put sample on the beam.
        Returns: dict of motor positions
        """
        # Update beam position
        self.beam_position[0], self.beam_position[
            1] = HWR.beamline.beam.get_beam_position_on_screen()

        print(("moving to beam position: %d %d" % (
            self.beam_position[0],
            self.beam_position[1],
        )))

        # Set velocity of omega to move during centring
        #self.set_omega_default_velocity()

        # Set scale of pixels per mm according to current zoom
        #self.pixels_per_mm_x = self.motor_zoom_hwobj.getPixelsPerMm(0)
        #self.pixels_per_mm_y = self.motor_zoom_hwobj.getPixelsPerMm(1)

        # Get clicked position of mouse pointer
        #self.user_clicked_event = AsyncResult()
        #x, y = self.user_clicked_event.get()
        # Last clicked position
        self.last_centred_position[0] = x
        self.last_centred_position[1] = y

        # Get current value of involved motors
        omega_pos = self.motor_hwobj_dict["phi"].get_value()
        # For now, phiz refers to gonio x motor
        goniox_pos = self.motor_hwobj_dict["phiz"].get_value()
        sampx_pos = self.motor_hwobj_dict["sampx"].get_value()
        sampy_pos = self.motor_hwobj_dict["sampy"].get_value()

        # Pixels to move axis X of whole goniometer
        import math
        drx_goniox = abs(-x - y + 1152) / math.sqrt(2)
        if y <= (-x + 1152):
            dir_goniox = 1
        else:
            dir_goniox = -1
        move_goniox = dir_goniox * drx_goniox
        # mm to move
        move_goniox = move_goniox / self.pixels_per_mm_x

        # Move absolute
        move_goniox += goniox_pos

        # Calculate new position of X
        dry_samp = abs(x - y - 128) / math.sqrt(2)
        if (y >= x - 128):
            dir_samp = 1
        else:
            dir_samp = -1
        move_samp = dir_samp * dry_samp
        print('move_samp = ' + str(move_samp))

        move_sampx = (math.sin(math.radians(omega_pos)) * move_samp)
        # print("math.cos(math.radians(omega_pos)): ", math.cos(math.radians(omega_pos)))
        # print("self.beam_position[1]: ", self.beam_position[1])
        # print("float(last_centred_position[1])", float(last_centred_position[1]))
        # print("move_sampx = (math.cos(math.radians(omega_pos)) * (self.beam_position[1] - float(last_centred_position[1]))): ", move_sampx)
        #move_sampx = move_sampx / self.pixels_per_mm_x
        move_sampx = (move_sampx / self.pixels_per_mm_x) * -1
        # print("move_sampx = move_sampx / self.pixels_per_mm_x: ", move_sampx)
        # Move absolute
        move_sampx += sampx_pos
        # print("move_sampx += sampx_pos: ", move_sampx)

        # Calculate new position of Y
        move_sampy = (math.cos(math.radians(omega_pos)) * move_samp)
        # print("math.sin(math.radians(omega_pos)): ", math.sin(math.radians(omega_pos)))
        # print("self.beam_position[1]: ", self.beam_position[1])
        # print("float(last_centred_position[1])", float(last_centred_position[1]))
        # print("move_sampy = (math.sin(math.radians(omega_pos)) * (self.beam_position[1] - float(last_centred_position[1]))): ", move_sampy)
        move_sampy = (move_sampy / self.pixels_per_mm_y)
        print('move_sampy = ' + str(move_sampy))
        #move_sampy = move_sampy / self.pixels_per_mm_y
        # print("move_sampy = move_sampy / self.pixels_per_mm_y: ", move_sampy)
        # Move absolute
        move_sampy += sampy_pos
        # print("move_sampy += sampy_pos: ", move_sampy)
        centred_pos_dir = {
            'phiz': move_goniox,
            'sampx': move_sampx,
            'sampy': move_sampy
        }
        print('Target pos = ' + str(centred_pos_dir))
        return centred_pos_dir

    def move_to_beam(self, x, y, omega=None):
        """
        Descript. : function to create a centring point based on all motors
                    positions.
        """

        centred_pos_dir = self.calculate_move_to_beam_pos(x, y)
        print('Moving motors to beam...')
        self.move_to_motors_positions(centred_pos_dir, wait=True)
        return centred_pos_dir

    def move_to_coord(self, x, y, omega=None):
        """
        Descript. : function to create a centring point based on all motors
                    positions.
        """
        warnings.warn("Deprecated method, call move_to_beam instead",
                      DeprecationWarning)
        return self.move_to_beam(x, y, omega)

    def start_move_to_beam(self, coord_x=None, coord_y=None, omega=None):
        """
        Descript. :
        """
        self.last_centred_position[0] = coord_x
        self.last_centred_position[1] = coord_y
        self.centring_time = time.time()
        curr_time = time.strftime("%Y-%m-%d %H:%M:%S")
        self.centring_status = {
            "valid": True,
            "startTime": curr_time,
            "endTime": curr_time,
        }
        motors = self.get_positions()
        motors["beam_x"] = 0.1
        motors["beam_y"] = 0.1
        self.last_centred_position[0] = coord_x
        self.last_centred_position[1] = coord_y
        self.centring_status["motors"] = motors
        self.centring_status["valid"] = True
        self.centring_status["angleLimit"] = False
        self.emit_progress_message("")
        self.accept_centring()
        self.current_centring_method = None
        self.current_centring_procedure = None

    def update_values(self):
        self.emit("zoomMotorPredefinedPositionChanged", None, None)
        omega_ref = [0, 238]
        self.emit("omegaReferenceChanged", omega_ref)

    def move_kappa_and_phi(self, kappa, kappa_phi):
        return

    def get_osc_max_speed(self):
        return 66

    def get_osc_limits(self):
        if self.in_plate_mode:
            return (170, 190)
        else:
            return (-360, 360)

    def get_scan_limits(self, speed=None, num_images=None, exp_time=None):
        if self.in_plate_mode:
            return (170, 190)
        else:
            return (-360, 360)

    def get_osc_dynamic_limits(self):
        """Returns dynamic limits of oscillation axis"""
        return (0, 20)

    def get_scan_dynamic_limits(self, speed=None):
        return (-360, 360)

    def move_omega_relative(self, relative_angle):
        self.motor_hwobj_dict["phi"].syncMoveRelative(relative_angle, 5)

    def set_phase(self, phase, timeout=None):
        self.current_phase = str(phase)
        self.emit("minidiffPhaseChanged", (self.current_phase, ))

    def get_point_from_line(self, point_one, point_two, index, images_num):
        return point_one.as_dict()

    @property
    def zoom(self):
        """
        Override method.
        """
        return self.motor_hwobj_dict.get("zoom")

    def get_zoom_calibration(self):
        """Returns tuple with current zoom calibration (px per mm)."""
        zoom_enum = self.zoom.get_value()  # Get current zoom enum
        zoom_enum_str = zoom_enum.name  # as str
        self.x_calib = self.zoom_calibration_x.get(zoom_enum_str)
        self.y_calib = self.zoom_calibration_y.get(zoom_enum_str)
        try:
            float(self.x_calib)
            float(self.y_calib)
            self.pixels_per_mm_x = 1.0 / self.x_calib
            self.pixels_per_mm_y = 1.0 / self.y_calib
        except Exception as e:
            print("[Zoom] Error on calibration: " + str(e))
        return (self.pixels_per_mm_x, self.pixels_per_mm_y)

    def get_pixels_per_mm(self):
        """
        Override method.
        """
        pixels_per_mm_x, pixels_per_mm_y = self.get_zoom_calibration()
        return (pixels_per_mm_x, pixels_per_mm_y)

    def update_zoom_calibration(self):
        """
        Override method.
        """
        self.emit("pixelsPerMmChanged",
                  ((self.pixels_per_mm_x, self.pixels_per_mm_y)))
Example #34
0
class AlarmTask(gevent.Greenlet):
    """ Task to notify when a block is mined. """
    def __init__(self, chain):
        super().__init__()
        self.callbacks = list()
        self.stop_event = AsyncResult()
        self.chain = chain
        self.last_block_number = None
        self.response_queue = Queue()

        # TODO: Start with a larger wait_time and decrease it as the
        # probability of a new block increases.
        self.wait_time = 0.5
        self.last_loop = time.time()

    def register_callback(self, callback):
        """ Register a new callback.

        Note:
            The callback will be executed in the AlarmTask context and for
            this reason it should not block, otherwise we can miss block
            changes.
        """
        if not callable(callback):
            raise ValueError('callback is not a callable')

        self.callbacks.append(callback)

    def remove_callback(self, callback):
        """Remove callback from the list of callbacks if it exists"""
        if callback in self.callbacks:
            self.callbacks.remove(callback)

    def _run(self):  # pylint: disable=method-hidden
        self.last_block_number = self.chain.block_number()
        log.debug('starting block number', block_number=self.last_block_number)

        sleep_time = 0
        while self.stop_event.wait(sleep_time) is not True:
            try:
                self.poll_for_new_block()
            except RaidenShuttingDown:
                break

            # we want this task to iterate in the tick of `wait_time`, so take
            # into account how long we spent executing one tick.
            self.last_loop = time.time()
            work_time = self.last_loop - self.last_loop
            if work_time > self.wait_time:
                log.warning(
                    'alarm loop is taking longer than the wait time',
                    work_time=work_time,
                    wait_time=self.wait_time,
                )
                sleep_time = 0.001
            else:
                sleep_time = self.wait_time - work_time

        # stopping
        self.callbacks = list()

    def poll_for_new_block(self):
        chain_id = self.chain.network_id
        current_block = self.chain.block_number()

        if current_block > self.last_block_number + 1:
            difference = current_block - self.last_block_number - 1
            log.error('alarm missed %s blocks' % (difference),
                      current_block=current_block)

        if current_block != self.last_block_number:
            log.debug(
                'new block',
                number=current_block,
                timestamp=self.last_loop,
            )

            self.last_block_number = current_block
            remove = list()
            for callback in self.callbacks:
                result = callback(current_block, chain_id)
                if result is REMOVE_CALLBACK:
                    remove.append(callback)

            for callback in remove:
                self.callbacks.remove(callback)

    def stop_async(self):
        self.stop_event.set(True)
Example #35
0
 def takes_too_long(self, noticear=None):
     if noticear is not None:
         noticear.set(True)
     ar = AsyncResult()
     ar.wait()
Example #36
0
 def test_multiple_listeners_error_unlink_AsyncResult_rawlink(self):
     e = AsyncResult()
     gevent.spawn(e.set, 6)
     self._test_multiple_listeners_error_unlink(e, e.rawlink)
Example #37
0
 def leave_async(self, only_receiving=True):
     """ Async version of `leave()` """
     leave_result = AsyncResult()
     gevent.spawn(self.leave, only_receiving).link(leave_result)
     return leave_result
Example #38
0
    def direct_transfer_async(self, token_address, amount, target, identifier):
        """ Do a direct tranfer with target.

        Direct transfers are non cancellable and non expirable, since these
        transfers are a signed balance proof with the transferred amount
        incremented.

        Because the transfer is non cancellable, there is a level of trust with
        the target. After the message is sent the target is effectively paid
        and then it is not possible to revert.

        The async result will be set to False iff there is no direct channel
        with the target or the payer does not have balance to complete the
        transfer, otherwise because the transfer is non expirable the async
        result *will never be set to False* and if the message is sent it will
        hang until the target node acknowledge the message.

        This transfer should be used as an optimization, since only two packets
        are required to complete the transfer (from the payer's perspective),
        whereas the mediated transfer requires 6 messages.
        """
        graph = self.token_to_channelgraph[token_address]
        direct_channel = graph.partneraddress_to_channel.get(target)

        direct_channel_with_capacity = (direct_channel
                                        and direct_channel.can_transfer and
                                        amount <= direct_channel.distributable)

        if direct_channel_with_capacity:
            direct_transfer = direct_channel.create_directtransfer(
                amount, identifier)
            self.sign(direct_transfer)
            direct_channel.register_transfer(
                self.get_block_number(),
                direct_transfer,
            )

            direct_transfer_state_change = ActionTransferDirect(
                identifier,
                amount,
                token_address,
                direct_channel.partner_state.address,
            )
            # TODO: add the transfer sent event
            state_change_id = self.transaction_log.log(
                direct_transfer_state_change)

            # TODO: This should be set once the direct transfer is acknowledged
            transfer_success = EventTransferSentSuccess(identifier, )
            self.transaction_log.log_events(state_change_id,
                                            [transfer_success],
                                            self.get_block_number())

            async_result = self.protocol.send_async(
                direct_channel.partner_state.address,
                direct_transfer,
            )

        else:
            async_result = AsyncResult()
            async_result.set(False)

        return async_result
Example #39
0
class TestAgentConnectionFailures(IonIntegrationTestCase):
    """
    Test cases for instrument agent class. Functions in this class provide
    instrument agent integration tests and provide a tutorial on use of
    the agent setup and interface.
    """

    ############################################################################
    # Setup, teardown.
    ############################################################################

    def setUp(self):
        """
        Set up driver integration support.
        Start port agent, add port agent cleanup.
        Start container.
        Start deploy services.
        Define agent config, start agent.
        Start agent client.
        """
        self._ia_client = None

        # Start container.
        log.info('Staring capability container.')
        self._start_container()

        # Bring up services in a deploy file (no need to message)
        log.info('Staring deploy services.')
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        log.info('Creating driver integration test support:')
        log.info('driver uri: %s', DRV_URI)
        log.info('device address: %s', DEV_ADDR)
        log.info('device port: %s', DEV_PORT)
        log.info('log delimiter: %s', DELIM)
        log.info('work dir: %s', WORK_DIR)
        self._support = DriverIntegrationTestSupport(None, None, DEV_ADDR,
                                                     DEV_PORT, DATA_PORT,
                                                     CMD_PORT, PA_BINARY,
                                                     DELIM, WORK_DIR)

        # Start port agent, add stop to cleanup.
        self._start_pagent()
        self.addCleanup(self._support.stop_pagent)

        log.info('building stream configuration')
        # Setup stream config.
        self._build_stream_config()

        # Start a resource agent client to talk with the instrument agent.
        log.info('starting IA process')
        self._ia_client = start_instrument_agent_process(
            self.container, self._stream_config)
        self.addCleanup(self._verify_agent_reset)
        log.info('test setup complete')

    ###############################################################################
    # Port agent helpers.
    ###############################################################################

    def _start_pagent(self):
        """
        Construct and start the port agent.
        """

        port = self._support.start_pagent()
        log.info('Port agent started at port %i', port)

        # Configure driver to use port agent port number.
        DVR_CONFIG['comms_config'] = {
            'addr': 'localhost',
            'port': port,
            'cmd_port': CMD_PORT
        }

    def _verify_agent_reset(self):
        """
        Check agent state and reset if necessary.
        This called if a test fails and reset hasn't occurred.
        """
        if self._ia_client is None:
            return

        state = self._ia_client.get_agent_state(timeout=120.1)
        if state != ResourceAgentState.UNINITIALIZED:
            cmd = AgentCommand(command=ResourceAgentEvent.RESET)
            retval = self._ia_client.execute_agent(cmd, timeout=300)

    ###############################################################################
    # Event helpers.
    ###############################################################################

    def _start_event_subscriber(self, type='ResourceAgentEvent', count=0):
        """
        Start a subscriber to the instrument agent events.
        @param type The type of event to catch.
        @count Trigger the async event result when events received reaches this.
        """
        def consume_event(*args, **kwargs):
            log.info('Test recieved ION event: args=%s, kwargs=%s, event=%s.',
                     str(args), str(kwargs), str(args[0]))
            self._events_received.append(args[0])
            if self._event_count > 0 and \
                self._event_count == len(self._events_received):
                self._async_event_result.set()

        # Event array and async event result.
        self._event_count = count
        self._events_received = []
        self._async_event_result = AsyncResult()

        self._event_subscriber = EventSubscriber(event_type=type,
                                                 callback=consume_event,
                                                 origin=IA_RESOURCE_ID)
        self._event_subscriber.start()
        self._event_subscriber._ready_event.wait(timeout=5)

    def _stop_event_subscriber(self):
        """
        Stop event subscribers on cleanup.
        """
        self._event_subscriber.stop()
        self._event_subscriber = None

    ###############################################################################
    # Data stream helpers.
    ###############################################################################

    def _build_stream_config(self):
        """
        """
        # Create a pubsub client to create streams.
        pubsub_client = PubsubManagementServiceClient(node=self.container.node)
        dataset_management = DatasetManagementServiceClient()

        encoder = IonObjectSerializer()

        # Create streams and subscriptions for each stream named in driver.
        self._stream_config = {}

        stream_name = 'parsed'
        param_dict_name = 'ctd_parsed_param_dict'
        pd_id = dataset_management.read_parameter_dictionary_by_name(
            param_dict_name, id_only=True)
        stream_def_id = pubsub_client.create_stream_definition(
            name=stream_name, parameter_dictionary_id=pd_id)
        stream_def = pubsub_client.read_stream_definition(stream_def_id)
        stream_def_dict = encoder.serialize(stream_def)
        pd = stream_def.parameter_dictionary
        stream_id, stream_route = pubsub_client.create_stream(
            name=stream_name,
            exchange_point='science_data',
            stream_definition_id=stream_def_id)
        stream_config = dict(routing_key=stream_route.routing_key,
                             exchange_point=stream_route.exchange_point,
                             stream_id=stream_id,
                             parameter_dictionary=pd,
                             stream_def_dict=stream_def_dict)
        self._stream_config[stream_name] = stream_config

        stream_name = 'raw'
        param_dict_name = 'ctd_raw_param_dict'
        pd_id = dataset_management.read_parameter_dictionary_by_name(
            param_dict_name, id_only=True)
        stream_def_id = pubsub_client.create_stream_definition(
            name=stream_name, parameter_dictionary_id=pd_id)
        stream_def = pubsub_client.read_stream_definition(stream_def_id)
        stream_def_dict = encoder.serialize(stream_def)
        pd = stream_def.parameter_dictionary
        stream_id, stream_route = pubsub_client.create_stream(
            name=stream_name,
            exchange_point='science_data',
            stream_definition_id=stream_def_id)
        stream_config = dict(routing_key=stream_route.routing_key,
                             exchange_point=stream_route.exchange_point,
                             stream_id=stream_id,
                             parameter_dictionary=pd,
                             stream_def_dict=stream_def_dict)
        self._stream_config[stream_name] = stream_config

    def _start_data_subscribers(self, count, raw_count):
        """
        """
        # Create a pubsub client to create streams.
        pubsub_client = PubsubManagementServiceClient(node=self.container.node)

        # Create streams and subscriptions for each stream named in driver.
        self._data_subscribers = []
        self._samples_received = []
        self._raw_samples_received = []
        self._async_sample_result = AsyncResult()
        self._async_raw_sample_result = AsyncResult()

        # A callback for processing subscribed-to data.
        def recv_data(message, stream_route, stream_id):
            log.info('Received parsed data on %s (%s,%s)', stream_id,
                     stream_route.exchange_point, stream_route.routing_key)
            self._samples_received.append(message)
            if len(self._samples_received) == count:
                self._async_sample_result.set()

        def recv_raw_data(message, stream_route, stream_id):
            log.info('Received raw data on %s (%s,%s)', stream_id,
                     stream_route.exchange_point, stream_route.routing_key)
            self._raw_samples_received.append(message)
            if len(self._raw_samples_received) == raw_count:
                self._async_raw_sample_result.set()

        from pyon.util.containers import create_unique_identifier

        stream_name = 'parsed'
        parsed_config = self._stream_config[stream_name]
        stream_id = parsed_config['stream_id']
        exchange_name = create_unique_identifier("%s_queue" % stream_name)
        self._purge_queue(exchange_name)
        sub = StandaloneStreamSubscriber(exchange_name, recv_data)
        sub.start()
        self._data_subscribers.append(sub)
        sub_id = pubsub_client.create_subscription(name=exchange_name,
                                                   stream_ids=[stream_id],
                                                   timeout=120.2)
        pubsub_client.activate_subscription(sub_id, timeout=120.3)
        sub.subscription_id = sub_id  # Bind the subscription to the standalone subscriber (easier cleanup, not good in real practice)

        stream_name = 'raw'
        parsed_config = self._stream_config[stream_name]
        stream_id = parsed_config['stream_id']
        exchange_name = create_unique_identifier("%s_queue" % stream_name)
        self._purge_queue(exchange_name)
        sub = StandaloneStreamSubscriber(exchange_name, recv_raw_data)
        sub.start()
        self._data_subscribers.append(sub)
        sub_id = pubsub_client.create_subscription(name=exchange_name,
                                                   stream_ids=[stream_id],
                                                   timeout=120.4)
        pubsub_client.activate_subscription(sub_id, timeout=120.5)
        sub.subscription_id = sub_id  # Bind the subscription to the standalone subscriber (easier cleanup, not good in real practice)

    def _purge_queue(self, queue):
        xn = self.container.ex_manager.create_xn_queue(queue)
        xn.purge()

    def _stop_data_subscribers(self):
        for subscriber in self._data_subscribers:
            pubsub_client = PubsubManagementServiceClient()
            if hasattr(subscriber, 'subscription_id'):
                try:
                    pubsub_client.deactivate_subscription(
                        subscriber.subscription_id, timeout=120.6)
                except:
                    pass
                pubsub_client.delete_subscription(subscriber.subscription_id,
                                                  timeout=120.7)
            subscriber.stop()

    ###############################################################################
    # Socket listen.
    ###############################################################################

    def _socket_listen(self, s, prompt, timeout):

        buf = ''
        starttime = time.time()
        while True:
            try:
                buf += s.recv(1024)
                print '##### Listening, got: %s' % buf
                if prompt and buf.find(prompt) != -1:
                    break
            except:
                gevent.sleep(1)

            finally:
                delta = time.time() - starttime
                if delta > timeout:
                    break
        return buf

    ###############################################################################
    # Assert helpers.
    ###############################################################################

    def assertSampleDict(self, val):
        """
        Verify the value is a sample dictionary for the sbe37.
        """
        # AgentCommandResult.result['parsed']
        """
        {'quality_flag': 'ok', 'preferred_timestamp': 'driver_timestamp',
        'stream_name': 'parsed', 'pkt_format_id': 'JSON_Data',
        'pkt_version': 1, 'values':
        [{'value_id': 'temp', 'value': 21.4894},
        {'value_id': 'conductivity', 'value': 13.22157},
        {'value_id': 'pressure', 'value': 146.186}],
        'driver_timestamp': 3556901018.170206}
        """

        self.assertIsInstance(val, dict)
        self.assertTrue(val.has_key('values'))
        values_list = val['values']
        self.assertTrue(isinstance(values_list, list))
        self.assertTrue(len(values_list) == 3)

        ids = ['temp', 'conductivity', 'pressure']
        ids_found = []

        for x in values_list:
            self.assertTrue(x.has_key('value_id'))
            self.assertTrue(x.has_key('value'))
            ids_found.append(x['value_id'])
            self.assertTrue(isinstance(x['value'], float))

        self.assertItemsEqual(ids, ids_found)

        self.assertTrue(val.has_key('driver_timestamp'))
        time = val['driver_timestamp']
        self.assertTrue(isinstance(time, float))

    def assertParamDict(self, pd, all_params=False):
        """
        Verify all device parameters exist and are correct type.
        """
        if all_params:
            self.assertEqual(set(pd.keys()), set(PARAMS.keys()))
            for (key, type_val) in PARAMS.iteritems():
                if type_val == list or type_val == tuple:
                    self.assertTrue(isinstance(pd[key], (list, tuple)))
                else:
                    self.assertTrue(isinstance(pd[key], type_val))

        else:
            for (key, val) in pd.iteritems():
                self.assertTrue(PARAMS.has_key(key))
                self.assertTrue(isinstance(val, PARAMS[key]))

    def assertParamVals(self, params, correct_params):
        """
        Verify parameters take the correct values.
        """
        self.assertEqual(set(params.keys()), set(correct_params.keys()))
        for (key, val) in params.iteritems():
            correct_val = correct_params[key]
            if isinstance(val, float):
                # Verify to 5% of the larger value.
                max_val = max(abs(val), abs(correct_val))
                self.assertAlmostEqual(val, correct_val, delta=max_val * .01)

            elif isinstance(val, (list, tuple)):
                # list of tuple.
                self.assertEqual(list(val), list(correct_val))

            else:
                # int, bool, str.
                self.assertEqual(val, correct_val)

    ###############################################################################
    # Tests.
    ###############################################################################

    def test_lost_connection(self):
        """
        test_lost_connection
        """

        # Set up a subscriber to collect command events.
        self._start_event_subscriber('ResourceAgentConnectionLostErrorEvent',
                                     1)
        self.addCleanup(self._stop_event_subscriber)

        # Start in uninitialized.
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        # Initialize the agent.
        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        # Activate.
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        # Go into command mode.
        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        # Start streaming.
        cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)
        retval = self._ia_client.execute_resource(cmd)

        # Wait for a while, collect some samples.
        gevent.sleep(10)

        # Blow the port agent out from under the agent.
        self._support.stop_pagent()

        # Loop until we resyncronize to LOST_CONNECTION/DISCONNECTED.
        # Test will timeout if this dosn't occur.
        while True:
            state = self._ia_client.get_agent_state()
            if state == ResourceAgentState.LOST_CONNECTION:
                break
            else:
                gevent.sleep(1)

        # Verify the driver has transitioned to disconnected
        while True:
            state = self._ia_client.get_resource_state()
            if state == DriverConnectionState.DISCONNECTED:
                break
            else:
                gevent.sleep(1)

        # Make sure the lost connection error event arrives.
        self._async_event_result.get(timeout=CFG.endpoint.receive.timeout)
        self.assertEqual(len(self._events_received), 1)

        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

    #@unittest.skip('Fails on buildbot for some god unknown reason.')
    def test_autoreconnect(self):
        """
        test_autoreconnect
        """
        # Set up a subscriber to collect command events.
        self._start_event_subscriber('ResourceAgentConnectionLostErrorEvent',
                                     1)
        self.addCleanup(self._stop_event_subscriber)

        # Start in uninitialized.
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        # Initialize the agent.
        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        # Activate.
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.IDLE)

        # Go into command mode.
        cmd = AgentCommand(command=ResourceAgentEvent.RUN)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.COMMAND)

        def poll_func(test):
            cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
            while True:
                try:
                    gevent.sleep(.5)
                    test._ia_client.execute_resource(cmd)
                except IonException as ex:
                    # This exception could be ResourceException (broken pipe)
                    # Timeout or Conflict
                    log.info('#### pre shutdown exception: %s, %s',
                             str(type(ex)), str(ex))
                    break

            while True:
                try:
                    gevent.sleep(.5)
                    test._ia_client.execute_resource(cmd)
                    log.info('#### post shutdown got new sample.')
                    break
                except IonException as ex:
                    # This should be conflict.
                    log.info('#### post shutdown exception: %s, %s',
                             str(type(ex)), str(ex))

        timeout = gevent.Timeout(600)
        timeout.start()
        try:

            # Start the command greenlet and let poll for a bit.
            gl = gevent.spawn(poll_func, self)
            gevent.sleep(20)

            # Blow the port agent out from under the agent.
            self._support.stop_pagent()

            # Wait for a while, the supervisor is restarting the port agent.
            gevent.sleep(10)
            self._support.start_pagent()

            # Wait for the device to connect and start sampling again.
            gl.join()
            gl = None
            timeout.cancel()

        except (Exception, gevent.Timeout) as ex:
            if gl:
                gl.kill()
                gl = None
            self.fail(('Could not reconnect to device: %s,  %s', str(type(ex)),
                       str(ex)))

    def test_connect_failed(self):
        """
        test_connect_failed
        """
        # Stop the port agent.
        self._support.stop_pagent()

        # Sleep a bit.
        gevent.sleep(3)

        # Start in uninitialized.
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        # Initialize the agent.
        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        # Activate. This should fail because there is no port agent to connect to.
        cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
        with self.assertRaises(ResourceError):
            retval = self._ia_client.execute_agent(cmd)

        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

    def test_get_set_alerts(self):
        """
        test_get_set_alerts
        Test specific of get/set alerts, including using result of get to
        set later.
        """
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)

        cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.INACTIVE)

        retval = self._ia_client.get_agent(['alerts'])['alerts']
        self.assertItemsEqual(retval, [])

        alert_def1 = {
            'name': 'temp_warning_interval',
            'stream_name': 'parsed',
            'description': 'Temperature is above normal range.',
            'alert_type': StreamAlertType.WARNING,
            'aggregate_type': AggregateStatusType.AGGREGATE_DATA,
            'value_id': 'temp',
            'lower_bound': None,
            'lower_rel_op': None,
            'upper_bound': 10.5,
            'upper_rel_op': '<',
            'alert_class': 'IntervalAlert'
        }

        alert_def2 = {
            'name': 'temp_alarm_interval',
            'stream_name': 'parsed',
            'description': 'Temperature is way above normal range.',
            'alert_type': StreamAlertType.WARNING,
            'aggregate_type': AggregateStatusType.AGGREGATE_DATA,
            'value_id': 'temp',
            'lower_bound': None,
            'lower_rel_op': None,
            'upper_bound': 15.5,
            'upper_rel_op': '<',
            'alert_class': 'IntervalAlert'
        }
        """
        Interval alerts are returned from get like this:
        (value and status fields describe state of the alert)
        {
        'name': 'temp_warning_interval',
        'stream_name': 'parsed',
        'description': 'Temperature is above normal range.',
        'alert_type': 1,
        'aggregate_type': 2,
        'value_id': 'temp',
        'lower_bound': None,
        'lower_rel_op': None,
        'upper_bound': 10.5,
        'upper_rel_op': '<',
        'alert_class': 'IntervalAlert',

        'status': None,
        'value': None
        }
        """

        alert_def3 = {
            'name': 'late_data_warning',
            'stream_name': 'parsed',
            'description': 'Expected data has not arrived.',
            'alert_type': StreamAlertType.WARNING,
            'aggregate_type': AggregateStatusType.AGGREGATE_COMMS,
            'time_delta': 180,
            'alert_class': 'LateDataAlert'
        }
        """
        Late data alerts are returned from get like this:
        (value and status fields describe state of the alert)
        {
        'name': 'late_data_warning',
        'stream_name': 'parsed',
        'description': 'Expected data has not arrived.',
        'alert_type': 1,
        'aggregate_type': 1,
        'value_id': None,
        'time_delta': 180,
        'alert_class': 'LateDataAlert',
        
        'status': None,
        'value': None
        }
        """
        """
        [
            {'status': None,
            'alert_type': 1,
            'name': 'temp_warning_interval',
            'upper_bound': 10.5,
            'lower_bound': None,
            'aggregate_type': 2,
            'alert_class': 'IntervalAlert',
            'value': None,
            'value_id': 'temp',
            'lower_rel_op': None,
            'upper_rel_op': '<',
            'description': 'Temperature is above normal range.'},
            {'status': None,
            'alert_type': 1,
            'name': 'temp_alarm_interval',
            'upper_bound': 15.5,
            'lower_bound': None,
            'aggregate_type': 2,
            'alert_class': 'IntervalAlert',
            'value': None,
            'value_id': 'temp',
            'lower_rel_op': None,
            'upper_rel_op': '<',
            'description': 'Temperature is way above normal range.'},
            {'status': None,
             'stream_name': 'parsed',
             'alert_type': 1,
             'name': 'late_data_warning',
             'aggregate_type': 1,
             'alert_class': 'LateDataAlert',
             'value': None,
             'time_delta': 180,
             'description': 'Expected data has not arrived.'}
        ]
        """

        orig_alerts = [alert_def1, alert_def2, alert_def3]
        self._ia_client.set_agent({'alerts': orig_alerts})

        retval = self._ia_client.get_agent(['alerts'])['alerts']
        self.assertTrue(len(retval) == 3)
        alerts = retval

        self._ia_client.set_agent({'alerts': ['clear']})
        retval = self._ia_client.get_agent(['alerts'])['alerts']
        self.assertItemsEqual(retval, [])

        self._ia_client.set_agent({'alerts': alerts})
        retval = self._ia_client.get_agent(['alerts'])['alerts']
        self.assertTrue(len(retval) == 3)

        count = 0
        for x in retval:
            x.pop('status')
            x.pop('value')
            for y in orig_alerts:
                if x['name'] == y['name']:
                    count += 1
                    self.assertItemsEqual(x.keys(), y.keys())
        self.assertEquals(count, 3)

        cmd = AgentCommand(command=ResourceAgentEvent.RESET)
        retval = self._ia_client.execute_agent(cmd)
        state = self._ia_client.get_agent_state()
        self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
Example #40
0
 def __init__(self):
     self.accumulated = 0
     self.price = AsyncResult()
Example #41
0
class DiffractometerMockup(GenericDiffractometer):
    """
    Descript. :
    """
    def __init__(self, *args):
        """
        Descript. :
        """
        GenericDiffractometer.__init__(self, *args)

    def init(self):
        """
        Descript. :
        """
        # self.image_width = 100
        # self.image_height = 100

        GenericDiffractometer.init(self)
        self.x_calib = 0.000444
        self.y_calib = 0.000446
        self.last_centred_position = [318, 238]

        self.pixels_per_mm_x = 1.0 / self.x_calib
        self.pixels_per_mm_y = 1.0 / self.y_calib
        self.beam_position = [318, 238]

        self.current_phase = GenericDiffractometer.PHASE_CENTRING

        self.cancel_centring_methods = {}
        self.current_motor_positions = {
            "phiy": 1.0,
            "sampx": 0.0,
            "sampy": -1.0,
            "zoom": 8.53,
            "focus": -0.42,
            "phiz": 1.1,
            "phi": 311.1,
            "kappa": 11,
            "kappa_phi": 22.0,
        }
        self.move_motors(self._get_random_centring_position())

        self.current_state_dict = {}
        self.centring_status = {"valid": False}
        self.centring_time = 0

        # self.image_width = 400
        # self.image_height = 400

        self.mount_mode = self.getProperty("sample_mount_mode")
        if self.mount_mode is None:
            self.mount_mode = "manual"

        self.equipment_ready()

        self.connect(self.motor_hwobj_dict["phi"], "valueChanged",
                     self.phi_motor_moved)
        self.connect(self.motor_hwobj_dict["phiy"], "valueChanged",
                     self.phiy_motor_moved)
        self.connect(self.motor_hwobj_dict["phiz"], "valueChanged",
                     self.phiz_motor_moved)
        self.connect(self.motor_hwobj_dict["kappa"], "valueChanged",
                     self.kappa_motor_moved)
        self.connect(
            self.motor_hwobj_dict["kappa_phi"],
            "valueChanged",
            self.kappa_phi_motor_moved,
        )
        self.connect(self.motor_hwobj_dict["sampx"], "valueChanged",
                     self.sampx_motor_moved)
        self.connect(self.motor_hwobj_dict["sampy"], "valueChanged",
                     self.sampy_motor_moved)

    def getStatus(self):
        """
        Descript. :
        """
        return "ready"

    def execute_server_task(self, method, timeout=30, *args):
        return

    def in_plate_mode(self):
        return self.mount_mode == "plate"

    def use_sample_changer(self):
        return self.mount_mode == "sample_changer"

    def is_reversing_rotation(self):
        return True

    def get_grid_direction(self):
        """
        Descript. :
        """
        return self.grid_direction

    def manual_centring(self):
        """
        Descript. :
        """
        for click in range(3):
            self.user_clicked_event = AsyncResult()
            x, y = self.user_clicked_event.get()
            if click < 2:
                self.motor_hwobj_dict["phi"].set_value_relative(90)
        self.last_centred_position[0] = x
        self.last_centred_position[1] = y
        centred_pos_dir = self._get_random_centring_position()
        return centred_pos_dir

    def automatic_centring(self):
        """Automatic centring procedure"""
        centred_pos_dir = self._get_random_centring_position()
        self.emit("newAutomaticCentringPoint", centred_pos_dir)
        return centred_pos_dir

    def _get_random_centring_position(self):
        """Get random centring result for current positions"""

        # Names of motors to vary during centring
        vary_actuator_names = ("sampx", "sampy", "phiy")

        # Range of random variation
        var_range = 0.08

        # absolute value limit for varied motors
        var_limit = 2.0

        result = self.current_motor_positions.copy()
        for tag in vary_actuator_names:
            val = result.get(tag)
            if val is not None:
                random_num = random.random()
                var = (random_num - 0.5) * var_range
                val += var
                if abs(val) > var_limit:
                    val *= 1 - var_range / var_limit
                result[tag] = val
        #
        return result

    def is_ready(self):
        """
        Descript. :
        """
        return True

    def is_valid(self):
        """
        Descript. :
        """
        return True

    def invalidate_centring(self):
        """
        Descript. :
        """
        if self.current_centring_procedure is None and self.centring_status[
                "valid"]:
            self.centring_status = {"valid": False}
            # self.emitProgressMessage("")
            self.emit("centringInvalid", ())

    def get_centred_point_from_coord(self, x, y, return_by_names=None):
        """
        Descript. :
        """
        centred_pos_dir = self._get_random_centring_position()
        return centred_pos_dir

    def get_calibration_data(self, offset):
        """
        Descript. :
        """
        # return (1.0 / self.x_calib, 1.0 / self.y_calib)
        return (1.0 / self.x_calib, 1.0 / self.y_calib)

    def refresh_omega_reference_position(self):
        """
        Descript. :
        """
        return

    # def get_omega_axis_position(self):
    #     """
    #     Descript. :
    #     """
    #     return self.current_positions_dict.get("phi")

    def beam_position_changed(self, value):
        """
        Descript. :
        """
        self.beam_position = value

    def get_current_centring_method(self):
        """
        Descript. :
        """
        return self.current_centring_method

    def motor_positions_to_screen(self, centred_positions_dict):
        """
        Descript. :
        """
        return self.last_centred_position[0], self.last_centred_position[1]

    def moveToCentredPosition(self, centred_position, wait=False):
        """
        Descript. :
        """
        try:
            return self.move_to_centred_position(centred_position)
        except Exception:
            logging.exception("Could not move to centred position")

    def phi_motor_moved(self, pos):
        """
        Descript. :
        """
        self.current_motor_positions["phi"] = pos
        self.emit("phiMotorMoved", pos)

    def phiy_motor_moved(self, pos):
        self.current_motor_positions["phiy"] = pos

    def phiz_motor_moved(self, pos):
        self.current_motor_positions["phiz"] = pos

    def sampx_motor_moved(self, pos):
        self.current_motor_positions["sampx"] = pos

    def sampy_motor_moved(self, pos):
        self.current_motor_positions["sampy"] = pos

    def kappa_motor_moved(self, pos):
        """
        Descript. :
        """
        self.current_motor_positions["kappa"] = pos
        if time.time() - self.centring_time > 1.0:
            self.invalidate_centring()
        self.emit_diffractometer_moved()
        self.emit("kappaMotorMoved", pos)

    def kappa_phi_motor_moved(self, pos):
        """
        Descript. :
        """
        self.current_motor_positions["kappa_phi"] = pos
        if time.time() - self.centring_time > 1.0:
            self.invalidate_centring()
        self.emit_diffractometer_moved()
        self.emit("kappaPhiMotorMoved", pos)

    def refresh_video(self):
        """
        Descript. :
        """
        self.emit("minidiffStateChanged", "testState")
        if HWR.beamline.beam:
            HWR.beamline.beam.beam_pos_hor_changed(300)
            HWR.beamline.beam.beam_pos_ver_changed(200)

    def start_auto_focus(self):
        """
        Descript. :
        """
        return

    def move_to_beam(self, x, y, omega=None):
        """
        Descript. : function to create a centring point based on all motors
                    positions.
        """

        print(("moving to beam position: %d %d" %
               (self.beam_position[0], self.beam_position[1])))

    def move_to_coord(self, x, y, omega=None):
        """
        Descript. : function to create a centring point based on all motors
                    positions.
        """
        warnings.warn("Deprecated method, call move_to_beam instead",
                      DeprecationWarning)
        return self.move_to_beam(x, y, omega)

    def start_move_to_beam(self, coord_x=None, coord_y=None, omega=None):
        """
        Descript. :
        """
        self.last_centred_position[0] = coord_x
        self.last_centred_position[1] = coord_y
        self.centring_time = time.time()
        curr_time = time.strftime("%Y-%m-%d %H:%M:%S")
        self.centring_status = {
            "valid": True,
            "startTime": curr_time,
            "endTime": curr_time,
        }
        motors = self.get_positions()
        motors["beam_x"] = 0.1
        motors["beam_y"] = 0.1
        self.last_centred_position[0] = coord_x
        self.last_centred_position[1] = coord_y
        self.centring_status["motors"] = motors
        self.centring_status["valid"] = True
        self.centring_status["angleLimit"] = False
        self.emit_progress_message("")
        self.accept_centring()
        self.current_centring_method = None
        self.current_centring_procedure = None

    def re_emit_values(self):
        self.emit("zoomMotorPredefinedPositionChanged", None, None)
        omega_ref = [0, 238]
        self.emit("omegaReferenceChanged", omega_ref)

    def move_kappa_and_phi(self, kappa, kappa_phi):
        return

    def get_osc_max_speed(self):
        return 66

    def get_osc_limits(self):
        if self.in_plate_mode:
            return (170, 190)
        else:
            return (-360, 360)

    def get_scan_limits(self, speed=None, num_images=None, exp_time=None):
        if self.in_plate_mode:
            return (170, 190)
        else:
            return (-360, 360)

    def get_osc_dynamic_limits(self):
        """Returns dynamic limits of oscillation axis"""
        return (0, 20)

    def get_scan_dynamic_limits(self, speed=None):
        return (-360, 360)

    def move_omega_relative(self, relative_angle):
        self.motor_hwobj_dict["phi"].set_value_relative(relative_angle, 5)

    def set_phase(self, phase, timeout=None):
        self.current_phase = str(phase)
        self.emit("minidiffPhaseChanged", (self.current_phase, ))

    def get_point_from_line(self, point_one, point_two, index, images_num):
        return point_one.as_dict()
Example #42
0
 def __init__(self, request, asynccall):
     # requests and responses
     self.ioRequest = request
     self.ioResult = AsyncResult()
     self.ioCall = asynccall
Example #43
0
 def get_dirty_values_async_result(self):
     from gevent.event import AsyncResult
     ret = AsyncResult()
     ret.set(True)
     return ret
Example #44
0
 def test_can_wrap_existing_async_result(self):
     async_result = AsyncResult()
     future = GeventFuture(async_result)
     self.assertEquals(async_result, future.async_result)
Example #45
0
    def _start_services(self):
        from raiden.ui.console import Console
        from raiden.api.python import RaidenAPI

        config = deepcopy(App.DEFAULT_CONFIG)
        if self._options.get('extra_config', dict()):
            merge_dict(config, self._options['extra_config'])
            del self._options['extra_config']
        self._options['config'] = config

        if self._options['showconfig']:
            print('Configuration Dump:')
            dump_config(config)
            dump_cmd_options(self._options)
            dump_module('settings', settings)
            dump_module('constants', constants)

        # this catches exceptions raised when waiting for the stalecheck to complete
        try:
            app_ = run_app(**self._options)
        except (EthNodeCommunicationError, RequestsConnectionError):
            print(ETHEREUM_NODE_COMMUNICATION_ERROR)
            sys.exit(1)
        except RuntimeError as e:
            click.secho(str(e), fg='red')
            sys.exit(1)
        except EthNodeInterfaceError as e:
            click.secho(str(e), fg='red')
            sys.exit(1)

        tasks = [app_.raiden]  # RaidenService takes care of Transport and AlarmTask

        domain_list = []
        if self._options['rpccorsdomain']:
            if ',' in self._options['rpccorsdomain']:
                for domain in self._options['rpccorsdomain'].split(','):
                    domain_list.append(str(domain))
            else:
                domain_list.append(str(self._options['rpccorsdomain']))

        self._raiden_api = RaidenAPI(app_.raiden)

        if self._options['rpc']:
            rest_api = RestAPI(self._raiden_api)
            (api_host, api_port) = split_endpoint(self._options['api_address'])
            api_server = APIServer(
                rest_api,
                config={'host': api_host, 'port': api_port},
                cors_domain_list=domain_list,
                web_ui=self._options['web_ui'],
                eth_rpc_endpoint=self._options['eth_rpc_endpoint'],
            )

            try:
                api_server.start()
            except APIServerPortInUseError:
                click.secho(
                    f'ERROR: API Address {api_host}:{api_port} is in use. '
                    f'Use --api-address <host:port> to specify a different port.',
                    fg='red',
                )
                sys.exit(1)

            print(
                'The Raiden API RPC server is now running at http://{}:{}/.\n\n'
                'See the Raiden documentation for all available endpoints at\n'
                'http://raiden-network.readthedocs.io/en/stable/rest_api.html'.format(
                    api_host,
                    api_port,
                ),
            )
            tasks.append(api_server)

        if self._options['console']:
            console = Console(app_)
            console.start()
            tasks.append(console)

        # spawn a greenlet to handle the version checking
        version = get_system_spec()['raiden']
        tasks.append(gevent.spawn(check_version, version))

        # spawn a greenlet to handle the gas reserve check
        tasks.append(gevent.spawn(check_gas_reserve, app_.raiden))
        # spawn a greenlet to handle the periodic check for the network id
        tasks.append(gevent.spawn(
            check_network_id,
            app_.raiden.chain.network_id,
            app_.raiden.chain.client.web3,
        ))

        # spawn a greenlet to handle the functions

        self._startup_hook()

        # wait for interrupt
        event = AsyncResult()

        def sig_set(sig=None, _frame=None):
            event.set(sig)

        gevent.signal(signal.SIGQUIT, sig_set)
        gevent.signal(signal.SIGTERM, sig_set)
        gevent.signal(signal.SIGINT, sig_set)

        # quit if any task exits, successfully or not
        for task in tasks:
            task.link(event)

        try:
            event.get()
            print('Signal received. Shutting down ...')
        except (EthNodeCommunicationError, RequestsConnectionError):
            print(ETHEREUM_NODE_COMMUNICATION_ERROR)
            sys.exit(1)
        except RaidenError as ex:
            click.secho(f'FATAL: {ex}', fg='red')
        except Exception as ex:
            file = NamedTemporaryFile(
                'w',
                prefix=f'raiden-exception-{datetime.utcnow():%Y-%m-%dT%H-%M}',
                suffix='.txt',
                delete=False,
            )
            with file as traceback_file:
                traceback.print_exc(file=traceback_file)
                click.secho(
                    f'FATAL: An unexpected exception occured. '
                    f'A traceback has been written to {traceback_file.name}\n'
                    f'{ex}',
                    fg='red',
                )
        finally:
            self._shutdown_hook()

            def stop_task(task):
                try:
                    if isinstance(task, Runnable):
                        task.stop()
                    else:
                        task.kill()
                finally:
                    task.get()  # re-raise

            gevent.joinall(
                [gevent.spawn(stop_task, task) for task in tasks],
                app_.config.get('shutdown_timeout', settings.DEFAULT_SHUTDOWN_TIMEOUT),
                raise_error=True,
            )

        return app_
Example #46
0
 def leave_async(self):
     """ Async version of `leave()`
     """
     leave_result = AsyncResult()
     gevent.spawn(self.leave).link(leave_result)
     return leave_result
Example #47
0
    def init(self):
        """
        Descript. :
        """
        self.x_calib = 0.000444
        self.y_calib = 0.000446

        self.pixels_per_mm_x = 1.0 / self.x_calib
        self.pixels_per_mm_y = 1.0 / self.y_calib
        self.beam_position = [200, 200]

        self.centring_methods = {
            DiffractometerMockup.MANUAL3CLICK_MODE: self.start_3Click_centring,
            DiffractometerMockup.C3D_MODE: self.start_automatic_centring
        }
        self.cancel_centring_methods = {}
        self.current_positions_dict = {
            'phiy': 0,
            'phiz': 0,
            'sampx': 0,
            'sampy': 0,
            'zoom': 0,
            'phi': 17.6,
            'focus': 0,
            'kappa': 0,
            'kappa_phi': 0,
            'beam_x': 0,
            'beam_y': 0
        }
        self.centring_status = {"valid": False}
        self.centring_time = 0
        self.user_confirms_centring = True
        self.user_clicked_event = AsyncResult()
        self.image_width = 400
        self.image_height = 400
        self.equipmentReady()
        self.user_clicked_event = AsyncResult()

        self.kappaMotor = self.getDeviceByRole('kappa')
        self.kappaPhiMotor = self.getDeviceByRole('kappa_phi')

        if self.kappaMotor is not None:
            self.connect(self.kappaMotor, "positionChanged",
                         self.kappa_motor_moved)
        else:
            logging.getLogger("HWR").error(
                'MiniDiff: kappa motor is not defined')

        if self.kappaPhiMotor is not None:
            self.connect(self.kappaPhiMotor, 'positionChanged',
                         self.kappa_phi_motor_moved)
        else:
            logging.getLogger("HWR").error(
                'MiniDiff: kappa phi motor is not defined')

        self.beam_info_hwobj = HardwareRepository.HardwareRepository().\
                                getHardwareObject(self.getProperty("beam_info"))
        if self.beam_info_hwobj is not None:
            self.connect(self.beam_info_hwobj, 'beamPosChanged',
                         self.beam_position_changed)
        else:
            logging.getLogger("HWR").debug('Minidiff: Beaminfo is not defined')

        try:
            self.zoom_centre = eval(self.getProperty("zoomCentre"))
        except:
            if self.image_width is not None and self.image_height is not None:
                self.zoom_centre = {
                    'x': self.image_width / 2,
                    'y': self.image_height / 2
                }
                self.beam_position = [
                    self.image_width / 2, self.image_height / 2
                ]
                logging.getLogger("HWR").warning('MiniDiff: Zoom center is ' +\
                       'not defined continuing with the middle: %s' % self.zoom_centre)
            else:
                logging.getLogger("HWR").warning(
                    'MiniDiff: Neither zoom centre nor camera size iz defined')

        try:
            self.omega_reference_par = eval(self.getProperty("omegaReference"))
            self.omega_reference_motor = self.getDeviceByRole(
                self.omega_reference_par["motor_name"])
            self.connect(self.omega_reference_motor, 'positionChanged',
                         self.omega_reference_motor_moved)
        except:
            logging.getLogger("HWR").warning(
                'MiniDiff: Omega axis is not defined')
Example #48
0
class DiffractometerMockup(Equipment):
    """
    Descript. :
    """
    MANUAL3CLICK_MODE = "Manual 3-click"
    C3D_MODE = "Computer automatic"
    MOVE_TO_BEAM_MODE = "Move to Beam"

    def __init__(self, *args):
        """
        Descript. :
        """
        Equipment.__init__(self, *args)

        qmo.CentredPosition.set_diffractometer_motor_names(
            "phi", "focus", "phiz", "phiy", "zoom", "sampx", "sampy", "kappa",
            "kappa_phi")

        self.phiMotor = None
        self.phizMotor = None
        self.phiyMotor = None
        self.lightMotor = None
        self.zoomMotor = None
        self.sampleXMotor = None
        self.sampleYMotor = None
        self.kappaMotor = None
        self.kappaPhiMotor = None
        self.camera = None
        self.beam_info_hwobj = None

        self.beam_position = None
        self.x_calib = None
        self.y_calib = None
        self.pixels_per_mm_x = None
        self.pixels_per_mm_y = None
        self.image_width = None
        self.image_height = None
        self.current_sample_info = None
        self.cancel_centring_methods = None
        self.current_centring_procedure = None
        self.current_centring_method = None
        self.current_positions_dict = None
        self.centring_methods = None
        self.centring_status = None
        self.centring_time = None
        self.user_confirms_centring = None
        self.user_clicked_event = None

        self.connect(self, 'equipmentReady', self.equipmentReady)
        self.connect(self, 'equipmentNotReady', self.equipmentNotReady)

        self.startCentringMethod = self.start_centring_method
        self.cancelCentringMethod = self.cancel_centring_method
        self.imageClicked = self.image_clicked
        self.acceptCentring = self.accept_centring
        self.rejectCentring = self.reject_centring
        self.getCentringStatus = self.get_centring_status
        self.takeSnapshots = self.take_snapshots

    def init(self):
        """
        Descript. :
        """
        self.x_calib = 0.000444
        self.y_calib = 0.000446

        self.pixels_per_mm_x = 1.0 / self.x_calib
        self.pixels_per_mm_y = 1.0 / self.y_calib
        self.beam_position = [200, 200]

        self.centring_methods = {
            DiffractometerMockup.MANUAL3CLICK_MODE: self.start_3Click_centring,
            DiffractometerMockup.C3D_MODE: self.start_automatic_centring
        }
        self.cancel_centring_methods = {}
        self.current_positions_dict = {
            'phiy': 0,
            'phiz': 0,
            'sampx': 0,
            'sampy': 0,
            'zoom': 0,
            'phi': 17.6,
            'focus': 0,
            'kappa': 0,
            'kappa_phi': 0,
            'beam_x': 0,
            'beam_y': 0
        }
        self.centring_status = {"valid": False}
        self.centring_time = 0
        self.user_confirms_centring = True
        self.user_clicked_event = AsyncResult()
        self.image_width = 400
        self.image_height = 400
        self.equipmentReady()
        self.user_clicked_event = AsyncResult()

        self.kappaMotor = self.getDeviceByRole('kappa')
        self.kappaPhiMotor = self.getDeviceByRole('kappa_phi')

        if self.kappaMotor is not None:
            self.connect(self.kappaMotor, "positionChanged",
                         self.kappa_motor_moved)
        else:
            logging.getLogger("HWR").error(
                'MiniDiff: kappa motor is not defined')

        if self.kappaPhiMotor is not None:
            self.connect(self.kappaPhiMotor, 'positionChanged',
                         self.kappa_phi_motor_moved)
        else:
            logging.getLogger("HWR").error(
                'MiniDiff: kappa phi motor is not defined')

        self.beam_info_hwobj = HardwareRepository.HardwareRepository().\
                                getHardwareObject(self.getProperty("beam_info"))
        if self.beam_info_hwobj is not None:
            self.connect(self.beam_info_hwobj, 'beamPosChanged',
                         self.beam_position_changed)
        else:
            logging.getLogger("HWR").debug('Minidiff: Beaminfo is not defined')

        try:
            self.zoom_centre = eval(self.getProperty("zoomCentre"))
        except:
            if self.image_width is not None and self.image_height is not None:
                self.zoom_centre = {
                    'x': self.image_width / 2,
                    'y': self.image_height / 2
                }
                self.beam_position = [
                    self.image_width / 2, self.image_height / 2
                ]
                logging.getLogger("HWR").warning('MiniDiff: Zoom center is ' +\
                       'not defined continuing with the middle: %s' % self.zoom_centre)
            else:
                logging.getLogger("HWR").warning(
                    'MiniDiff: Neither zoom centre nor camera size iz defined')

        try:
            self.omega_reference_par = eval(self.getProperty("omegaReference"))
            self.omega_reference_motor = self.getDeviceByRole(
                self.omega_reference_par["motor_name"])
            self.connect(self.omega_reference_motor, 'positionChanged',
                         self.omega_reference_motor_moved)
        except:
            logging.getLogger("HWR").warning(
                'MiniDiff: Omega axis is not defined')

    def getStatus(self):
        """
        Descript. :
        """
        return "ready"

    def manual_centring(self):
        """
        Descript. :
        """
        self.user_clicked_event = AsyncResult()
        x, y = self.user_clicked_event.get()
        last_centred_position[0] = x
        last_centred_position[1] = y
        random_num = random.random()
        centred_pos_dir = {
            'phiy': random_num * 10,
            'phiz': random_num,
            'sampx': 0.0,
            'sampy': 9.3,
            'zoom': 8.53,
            'phi': 311.1,
            'focus': -0.42,
            'kappa': self.kappaMotor.getPosition(),
            'kappa_phi': self.kappaPhiMotor.getPosition()
        }
        return centred_pos_dir

    def set_sample_info(self, sample_info):
        """
        Descript. :
        """
        self.current_sample_info = sample_info

    def emit_diffractometer_moved(self, *args):
        """
        Descript. :
        """
        self.emit("diffractometerMoved", ())

    def isReady(self):
        """
        Descript. :
        """
        return True

    def isValid(self):
        """
        Descript. :
        """
        return True

    def equipmentReady(self):
        """
        Descript. :
        """
        self.emit('minidiffReady', ())

    def equipmentNotReady(self):
        """
        Descript. :
        """
        self.emit('minidiffNotReady', ())

    def invalidate_centring(self):
        """
        Descript. :
        """
        if self.current_centring_procedure is None and self.centring_status[
                "valid"]:
            self.centring_status = {"valid": False}
            self.emitProgressMessage("")
            self.emit('centringInvalid', ())

    def kappa_motor_moved(self, pos):
        """
        Descript. :
        """
        self.emit_diffractometer_moved()
        self.emit('kappaMoved', pos)

    def kappa_phi_motor_moved(self, pos):
        """
        Descript. :
        """
        self.emit_diffractometer_moved()
        self.emit('kappaPhiMoved', pos)

    def get_available_centring_methods(self):
        """
        Descript. :
        """
        return self.centring_methods.keys()

    def get_calibration_data(self, offset):
        """
        Descript. :
        """
        #return (1.0 / self.x_calib, 1.0 / self.y_calib)
        return (1.0 / self.x_calib, 1.0 / self.y_calib)

    def get_pixels_per_mm(self):
        """
        Descript. :
        """
        return (self.pixels_per_mm_x, self.pixels_per_mm_y)

    def refresh_omega_reference_position(self):
        """
        Descript. :
        """
        return

    def get_omega_axis_position(self):
        """
        Descript. :
        """
        return self.current_positions_dict.get("phi")

    def get_positions(self):
        """
        Descript. :
        """
        return self.current_positions_dict

    def get_current_positions_dict(self):
        """
        Descript. :
        """
        return self.current_positions_dict

    def beam_position_changed(self, value):
        """
        Descript. :
        """
        self.beam_position = value

    def start_centring_method(self, method, sample_info=None):
        """
        Descript. :
        """
        if self.current_centring_method is not None:
            logging.getLogger("HWR").error("already in centring method %s" %\
                    self.current_centring_method)
            return
        curr_time = time.strftime("%Y-%m-%d %H:%M:%S")
        self.centring_status = {"valid": False, "startTime": curr_time}
        self.emit_centring_started(method)
        try:
            fun = self.centring_methods[method]
        except KeyError, diag:
            logging.getLogger("HWR").error("unknown centring method (%s)" % \
                    str(diag))
            self.emit_centring_failed()
        else:
Example #49
0
 def _add_result(self):
     result = AsyncResult()
     ident = id(result)
     self._results[ident] = result
     return ident, result
Example #50
0
class ThreadManager(object):
    """
    Manage spawning greenlet threads and ensure they're alive.
    TODO: Add heartbeats with zeromq for monitoring and restarting.
    """
    def __init__(self, heartbeat_secs=10.0, failure_notify_callback=None):
        """
        Creates a ThreadManager.

        @param  heartbeat_secs              Seconds between heartbeats.
        @param  failure_notify_callback     Callback to execute when a child fails unexpectedly. Should be
                                            a callable taking two params: this process supervisor, and the
                                            thread that failed.
        """
        super(ThreadManager, self).__init__()

        # NOTE: Assumes that pids never overlap between the various process types
        self.children = []
        self.heartbeat_secs = heartbeat_secs
        self._shutting_down = False
        self._failure_notify_callback = failure_notify_callback
        self._shutdown_event = AsyncResult()

    def _create_thread(self, target=None, **kwargs):
        """
        Creates a "thread" of the proper type.
        """
        return PyonThread(target=target, **kwargs)

    def spawn(self, target=None, **kwargs):
        """
        Spawn a pyon thread

        """
        log.debug("ThreadManager.spawn, target=%s, kwargs=%s", target, kwargs)
        proc = self._create_thread(target=target, **kwargs)
        proc.supervisor = self

        proc.start()
        self.children.append(proc)

        # install failure monitor
        proc.proc.link_exception(self._child_failed)

        return proc

    def _child_failed(self, gproc):
        # extract any PyonThreadTracebacks - one should be last
        extra = ""
        if len(gproc.exception.args) and isinstance(gproc.exception.args[-1],
                                                    PyonThreadTraceback):
            extra = "\n" + str(gproc.exception.args[-1])

        log.error("Child failed with an exception: (%s) %s%s", gproc,
                  gproc.exception, extra)
        if self._failure_notify_callback:
            self._failure_notify_callback(gproc)

    def ensure_ready(self, proc, errmsg=None, timeout=20):
        """
        Waits until either the thread dies or reports it is ready, whichever comes first.

        If the thread dies or times out while waiting for it to be ready, a ContainerError is raised.
        You must be sure the thread implements get_ready_event properly, otherwise this method
        returns immediately as the base class behavior simply passes.

        @param  proc        The thread to wait on.
        @param  errmsg      A custom error message to put in the ContainerError's message. May be blank.
        @param  timeout     Amount of time (in seconds) to wait for the ready, default 20 seconds.
        @throws ContainerError  If the thread dies or if we get a timeout before the process signals ready.
        """
        if not errmsg:
            errmsg = "ensure_ready failed"

        ev = Event()

        def cb(*args, **kwargs):
            ev.set()

        # link either a greenlet failure due to exception OR a success via ready event
        proc.proc.link_exception(cb)
        ready_evt = proc.get_ready_event()
        ready_evt.rawlink(cb)

        retval = ev.wait(timeout=timeout)

        # unlink the events: ready event is probably harmless but the exception one, we want to install our own later
        ready_evt.unlink(cb)

        # if the thread is stopped while we are waiting, proc.proc is set to None
        if proc.proc is not None:
            proc.proc.unlink(cb)

        # raise an exception if:
        # - we timed out
        # - we caught an exception
        if not retval:
            raise ContainerError("%s (timed out)" % errmsg)
        elif proc.proc is not None and proc.proc.dead and not proc.proc.successful(
        ):
            raise ContainerError("%s (failed): %s" %
                                 (errmsg, proc.proc.exception))

    def child_stopped(self, proc):
        if proc in self.children:
            # no longer need to listen for exceptions
            if proc.proc is not None:
                proc.proc.unlink(self._child_failed)

    def join_children(self, timeout=None):
        """ Give child threads "timeout" seconds to shutdown, then forcibly terminate. """

        time_start = time.time()
        child_count = len(self.children)

        for proc in self.children:

            # if a child thread has already exited, we don't need to wait on anything -
            # it's already good to go and can be considered joined. Otherwise we will likely
            # double call notify_stop which is a bad thing.
            if proc.proc.dead:
                continue

            time_elapsed = time.time() - time_start
            if timeout is not None:
                time_remaining = timeout - time_elapsed

                if time_remaining > 0:
                    # The nice way; let it do cleanup
                    try:
                        proc.notify_stop()
                        proc.join(time_remaining)
                    except Exception:
                        # not playing nice? just kill it.
                        proc.stop()

                else:
                    # Out of time. Cya, sucker
                    proc.stop()
            else:
                proc.join()

        time_elapsed = time.time() - time_start
        #log.debug("Took %.2fs to shutdown %d child threads", time_elapsed, child_count)

        return time_elapsed

    def wait_children(self, timeout=None):
        """
        Performs a join to allow children to complete, then a get() to fetch their results.

        This will raise an exception if any of the children raises an exception.
        """
        self.join_children(timeout=timeout)
        return [x.get() for x in self.children]

    def target(self):
        try:
            while not self._shutting_down:
                self.send_heartbeats()
                self._shutdown_event.wait(timeout=self.heartbeat_secs)
        except:
            log.error("thread died", exc_info=True)

    def send_heartbeats(self):
        """ TODO: implement heartbeat and monitors """
        #log.debug("lub-dub")
        pass

    def shutdown(self, timeout=30.0):
        """
        @brief Give child thread "timeout" seconds to shutdown, then forcibly terminate.
        """
        self._shutting_down = True
        self._shutdown_event.set(True)
        elapsed = self.join_children(timeout)

        #unset()
        return elapsed
Example #51
0
blueprint2 = AppBlueprint(blueprint=Blueprint('HelloWorldPage2', __name__),
                          rule='/<string:action>')


def load(*args, **kwargs):
    return {}


# These blueprints will be registered with the Flask app and can be used to make your own endpoints.
@blueprint.blueprint.route('/test_blueprint')
def test_basic_blueprint():
    # This can be called using the url /apps/HelloWorld/test_blueprint
    return 'successfully called basic blueprint'


__random_num_event = AsyncResult()
__sync = Event()


def random_number_receiver():
    while True:
        data = __random_num_event.get()
        yield 'data: %s\n\n' % data
        __sync.wait()


def random_number_pusher():
    while True:
        __random_num_event.set(random.random())
        gevent.sleep(2)
        __sync.set()
Example #52
0
 def create(self):
     if self.async_result is not None:
         assert False, 'Trying to create async_result but it is not None'
     self.async_result = AsyncResult()
Example #53
0
 def __init__(self, injector, target):
     self.client = injector.coinone_client
     self.target = target
     self.async_result = AsyncResult()
Example #54
0
            running_context.db.session.commit()
            return json.dumps(case_subscription.subscriptions_as_json())
        else:
            return json.dumps({"status": "Error: malformed JSON"})
    else:
        return json.dumps({"status": "Error: no JSON in request"})


@cases_page.route('/subscriptions/', methods=['GET'])
@auth_token_required
@roles_accepted(*running_context.user_roles['/cases'])
def display_subscriptions():
    return json.dumps(case_subscription.subscriptions_as_json())


__case_event_json = AsyncResult()
__sync_signal = Event()


def __case_event_stream():
    while True:
        data = __case_event_json.get()
        yield 'data: %s\n\n' % data
        __sync_signal.wait()


def __push_to_case_stream(sender, **kwargs):
    out = {'name': sender.name, 'ancestry': sender.ancestry}
    if 'data' in kwargs:
        out['data'] = kwargs['data']
    __case_event_json.set(json.dumps(out))
Example #55
0
    def start_mediated_transfer(self, token_address, amount, identifier,
                                target):
        # pylint: disable=too-many-locals

        async_result = AsyncResult()
        graph = self.token_to_channelgraph[token_address]

        available_routes = get_best_routes(
            graph,
            self.protocol.nodeaddresses_networkstatuses,
            self.address,
            target,
            amount,
            None,
        )

        if not available_routes:
            async_result.set(False)
            return async_result

        self.protocol.start_health_check(target)

        if identifier is None:
            identifier = create_default_identifier()

        route_state = RoutesState(available_routes)
        our_address = self.address
        block_number = self.get_block_number()

        transfer_state = LockedTransferState(
            identifier=identifier,
            amount=amount,
            token=token_address,
            initiator=self.address,
            target=target,
            expiration=None,
            hashlock=None,
            secret=None,
        )

        # Issue #489
        #
        # Raiden may fail after a state change using the random generator is
        # handled but right before the snapshot is taken. If that happens on
        # the next initialization when raiden is recovering and applying the
        # pending state changes a new secret will be generated and the
        # resulting events won't match, this breaks the architecture model,
        # since it's assumed the re-execution of a state change will always
        # produce the same events.
        #
        # TODO: Removed the secret generator from the InitiatorState and add
        # the secret into all state changes that require one, this way the
        # secret will be serialized with the state change and the recovery will
        # use the same /random/ secret.
        random_generator = RandomSecretGenerator()

        init_initiator = ActionInitInitiator(
            our_address=our_address,
            transfer=transfer_state,
            routes=route_state,
            random_generator=random_generator,
            block_number=block_number,
        )

        state_manager = StateManager(initiator.state_transition, None)
        self.state_machine_event_handler.log_and_dispatch(
            state_manager, init_initiator)

        # TODO: implement the network timeout raiden.config['msg_timeout'] and
        # cancel the current transfer if it hapens (issue #374)
        self.identifier_to_statemanagers[identifier].append(state_manager)
        self.identifier_to_results[identifier].append(async_result)

        return async_result
Example #56
0
class AlarmTask(Runnable):
    """ Task to notify when a block is mined. """
    def __init__(self, chain):
        super().__init__()

        self.callbacks = list()
        self.chain = chain
        self.chain_id = None
        self.known_block_number = None
        self._stop_event = AsyncResult()

        # TODO: Start with a larger sleep_time and decrease it as the
        # probability of a new block increases.
        self.sleep_time = 0.5

    def start(self):
        log.debug('Alarm task started', node=pex(self.chain.node_address))
        super().start()

    def _run(self):  # pylint: disable=method-hidden
        try:
            self.loop_until_stop()
        finally:
            self.callbacks = list()

    def register_callback(self, callback):
        """ Register a new callback.

        Note:
            The callback will be executed in the AlarmTask context and for
            this reason it should not block, otherwise we can miss block
            changes.
        """
        if not callable(callback):
            raise ValueError('callback is not a callable')

        self.callbacks.append(callback)

    def remove_callback(self, callback):
        """Remove callback from the list of callbacks if it exists"""
        if callback in self.callbacks:
            self.callbacks.remove(callback)

    def loop_until_stop(self):
        # The AlarmTask must have completed its first_run() before starting
        # the background greenlet.
        #
        # This is required because the first run will synchronize the node with
        # the blockchain since the last run.
        assert self.chain_id, 'chain_id not set'
        assert self.known_block_number is not None, 'known_block_number not set'

        chain_id = self.chain_id

        sleep_time = self.sleep_time
        while self._stop_event.wait(sleep_time) is not True:
            latest_block = self.chain.get_block(block_identifier='latest')
            self._maybe_run_callbacks(latest_block)

            if chain_id != self.chain.network_id:
                raise RuntimeError(
                    'Changing the underlying blockchain while the Raiden node is running '
                    'is not supported.', )

    def first_run(self, known_block_number):
        """ Blocking call to update the local state, if necessary. """
        assert self.callbacks, 'callbacks not set'

        chain_id = self.chain.network_id
        latest_block = self.chain.get_block(block_identifier='latest')

        log.debug(
            'Alarm task first run',
            known_block_number=known_block_number,
            latest_block_number=latest_block['number'],
            latest_gas_limit=latest_block['gasLimit'],
            latest_block_hash=to_hex(latest_block['hash']),
        )

        self.known_block_number = known_block_number
        self.chain_id = chain_id
        self._maybe_run_callbacks(latest_block)

    def _maybe_run_callbacks(self, latest_block):
        """ Run the callbacks if there is at least one new block.

        The callbacks are executed only if there is a new block, otherwise the
        filters may try to poll for an inexisting block number and the Ethereum
        client can return an JSON-RPC error.
        """
        assert self.known_block_number is not None, 'known_block_number not set'

        latest_block_number = latest_block['number']
        missed_blocks = latest_block_number - self.known_block_number

        if missed_blocks < 0:
            log.critical(
                'Block number decreased',
                chain_id=self.chain_id,
                known_block_number=self.known_block_number,
                old_block_number=latest_block['number'],
                old_gas_limit=latest_block['gasLimit'],
                old_block_hash=to_hex(latest_block['hash']),
            )
        elif missed_blocks > 0:
            log_details = dict(
                known_block_number=self.known_block_number,
                latest_block_number=latest_block_number,
                latest_block_hash=to_hex(latest_block['hash']),
                latest_block_gas_limit=latest_block['gasLimit'],
            )
            if missed_blocks > 1:
                log_details['num_missed_blocks'] = missed_blocks - 1

            log.debug(
                'Received new block',
                **log_details,
            )

            remove = list()
            for callback in self.callbacks:
                result = callback(latest_block)
                if result is REMOVE_CALLBACK:
                    remove.append(callback)

            for callback in remove:
                self.callbacks.remove(callback)

            self.known_block_number = latest_block_number

    def stop(self):
        self._stop_event.set(True)
        log.debug('Alarm task stopped', node=pex(self.chain.node_address))
        return self.join()
Example #57
0
from interfaces import dispatcher, AppBlueprint
from core.events import WalkoffEvent
from flask import Blueprint, jsonify, Response
from flask_jwt_extended import jwt_required
from gevent import sleep
from gevent.event import AsyncResult, Event
import json
from datetime import datetime
from server.security import jwt_required_in_query
from core.helpers import create_sse_event

blueprint = AppBlueprint(blueprint=Blueprint('HelloWorldPage__', __name__))

hello_world_action_count = {}

action_event_json = AsyncResult()
action_signal = Event()

action_event_id_counter = 0


@dispatcher.on_app_actions('HelloWorld',
                           events=WalkoffEvent.ActionStarted,
                           weak=False)
def handle_action_start(data):
    global hello_world_action_count
    action_name = data['action_name']

    if action_name not in hello_world_action_count:
        hello_world_action_count[action_name] = 1
    else:
Example #58
0
    def test_competing__routing_call(self):
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)

        sem = Semaphore()

        # define a callable method that tries to grab a shared semaphore
        def thecall(ar=None):

            semres = sem.acquire(blocking=False)
            if not semres:
                raise StandardError(
                    "Could not get semaphore, routing_call/control flow is broken!"
                )

            # make this take a sec
            time.sleep(1)

            # make sure we release
            sem.release()

            # set the ar
            ar.set(True)

        # schedule some calls (in whatever order)
        ar1 = AsyncResult()
        ar2 = AsyncResult()
        ar3 = AsyncResult()

        p._routing_call(thecall, None, ar=ar3)
        p._routing_call(thecall, None, ar=ar1)
        p._routing_call(thecall, None, ar=ar2)

        # wait on all the ARs to be set
        ar1.get(timeout=5)
        ar2.get(timeout=5)
        ar3.get(timeout=5)

        # just getting here without throwing an exception is the true test!

        p._notify_stop()
        p.stop()
Example #59
0
def handle_websocket():

    gevent.sleep(1)

    def _incoming(
        wsock, timeout
    ):  #these seem to run in another namespace, you must pass them global or inner variables

        try:
            client_previous_clip = get_latest_row_and_clips(
            )['latest_row'] or {
            }  #SHOULD CHECK SERVER TO AVOID RACE CONDITIONS? #too much bandwidth if receiving row itself, only text and hash are fine (data)

            for second in range(
                    timeout
            ):  #Even though greenlets don't use much memory, if the user disconnects, this server greenlet will run forever, and this "little memory" will become a big problem

                received = wsock.receive()

                if not received:
                    raise WebSocketError

                delivered = json.loads(received)

                if delivered['message'] == "Alive?":

                    send_im_still_alive.set(1)

                if delivered["message"] == "Salt?":

                    send_usr_crypt_salt.set(checked_login["found"]["salt"])

                if delivered['message'] == "Upload?":

                    container_name = delivered['data']

                    file_path = os.path.join(UPLOAD_DIR, container_name)
                    file_exists = os.path.isfile(file_path)
                    send_upload_command.set({container_name: file_exists})
                    print "\nFILE EXISTS:%s\n" % file_exists

                elif delivered['message'] == "Update?":

                    client_latest_clip = delivered['data']

                    if client_latest_clip.get(
                            'clip_hash_secure') != client_previous_clip.get(
                                'clip_hash_secure'):  #else just wait

                        client_latest_clip['timestamp_server'] = time.time()
                        new_clip_id = clips.insert_one(client_latest_clip)

                        print "INSERTED:%s " % new_clip_id

                        client_previous_clip = client_latest_clip  #reset prev

                    else:
                        print "hashes match, request rejected"
                        print "OLD: \n%s - %s\nNEW:%s - %s" % (
                            client_previous_clip.get('clip_hash_secure'),
                            client_previous_clip.get("clip_file_name"),
                            client_latest_clip.get('clip_hash_secure'),
                            client_latest_clip.get('clip_file_name'))

                print "incoming wait..."
                sleep(0.1)
        except ZeroDivisionError:
            #print "incoming error...%s"%str(sys.exc_info()[0]) #http://goo.gl/cmtlsL
            pass
        finally:
            wsock.close()  #OR IT WILL LEAVE THE CLIENT HANGING!

    def _outgoing(wsock, timeout):
        try:
            server_previous_row = {'_id': None}
            for second in range(timeout):
                if send_im_still_alive.get():
                    wsock.send(
                        json.dumps(dict(message="Alive!"))
                    )  #send blank list of clips to tell client's incoming server is still alive.
                    send_im_still_alive.set(0)
                if send_usr_crypt_salt.get():
                    wsock.send(
                        json.dumps(
                            dict(message="Salt!",
                                 data=send_usr_crypt_salt.get())))
                    send_usr_crypt_salt.set(None)
                elif send_upload_command.get():
                    wsock.send(
                        json.dumps(
                            dict(
                                message="Upload!",
                                data=send_upload_command.get(),
                            )))
                    send_upload_command.set({})
                else:
                    server_latest_row_and_clips = get_latest_row_and_clips()
                    server_latest_row = server_latest_row_and_clips[
                        'latest_row']
                    server_latest_clips = server_latest_row_and_clips[
                        'latest_clips']
                    if server_latest_row:
                        #print server_latest_row
                        if server_latest_row['_id'] != server_previous_row[
                                '_id']:
                            #print "if server_latest_row['_id'] != server_previous_row['_id']")
                            wsock.send(
                                json.dumps(
                                    dict(
                                        message="Update!",
                                        data=server_latest_clips,
                                    )))
                            #print server_latest_row)

                            server_previous_row = server_latest_row  #reset prev

                #print "outgoing wait...")
                sleep(0.1)
        except ZeroDivisionError:
            #print "outgoing error...%s"%str(sys.exc_info()[0]) #http://goo.gl/cmtlsL
            pass
        finally:
            wsock.close()

    try:

        wsock = request.environ.get('wsgi.websocket')

        if not wsock:
            abort(400, 'Expected WebSocket request.')

        checked_login = login(request.query.email, request.query.password)

        if not checked_login['success']:

            wsock.send(
                json.dumps(
                    dict(
                        message="Error!",
                        data=checked_login["reason"],
                    )))

        else:
            timeout = 40000

            args = [
                wsock, timeout
            ]  #Only objects in the main thread are visible to greenlets, all other cases, pass the objects as arguments to greenlet.

            send_im_still_alive, send_upload_command, send_usr_crypt_salt = AsyncResult(
            ), AsyncResult(), AsyncResult()
            send_im_still_alive.set(0)
            send_usr_crypt_salt.set(None)
            send_upload_command.set({})
            #send_update_command.set(None)

            greenlets = [
                gevent.spawn(_incoming, *args),
                gevent.spawn(_outgoing, *args),
            ]
            gevent.joinall(greenlets)

    except WebSocketError:
        abort(500, 'Websocket failure.')
    finally:
        wsock.close()
Example #60
0
class _Socket(_original_Socket):
    """Green version of :class:`zmq.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
    
    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.Socket`'s namespace.
    """
    __in_send_multipart = False
    __in_recv_multipart = False
    __writable = None
    __readable = None
    _state_event = None
    _gevent_bug_timeout = 11.6  # timeout for not trusting gevent
    _debug_gevent = False  # turn on if you think gevent is missing events
    _poller_class = _Poller

    def __init__(self, context, socket_type):
        _original_Socket.__init__(self, context, socket_type)
        self.__in_send_multipart = False
        self.__in_recv_multipart = False
        self.__setup_events()

    def __del__(self):
        self.close()

    def close(self, linger=None):
        super(_Socket, self).close(linger)
        self.__cleanup_events()

    def __cleanup_events(self):
        # close the _state_event event, keeps the number of active file descriptors down
        if getattr(self, '_state_event', None):
            _stop(self._state_event)
            self._state_event = None
        # if the socket has entered a close state resume any waiting greenlets
        self.__writable.set()
        self.__readable.set()

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        self.__readable.set()
        self.__writable.set()

        try:
            self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD),
                                                  1)  # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event
            self._state_event = read_event(self.getsockopt(zmq.FD),
                                           self.__state_changed,
                                           persist=True)

    def __state_changed(self, event=None, _evtype=None):
        if self.closed:
            self.__cleanup_events()
            return
        try:
            # avoid triggering __state_changed from inside __state_changed
            events = super(_Socket, self).getsockopt(zmq.EVENTS)
        except zmq.ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        assert self.__writable.ready(
        ), "Only one greenlet can be waiting on this event"
        self.__writable = AsyncResult()
        # timeout is because libzmq cannot be trusted to properly signal a new send event:
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__writable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if self._debug_gevent and timeout and toc-tic > dt and \
                    self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
                print(
                    "BUG: gevent may have missed a libzmq send event on %i!" %
                    self.FD,
                    file=sys.stderr)
        finally:
            if timeout:
                timeout.cancel()
            self.__writable.set()

    def _wait_read(self):
        assert self.__readable.ready(
        ), "Only one greenlet can be waiting on this event"
        self.__readable = AsyncResult()
        # timeout is because libzmq cannot always be trusted to play nice with libevent.
        # I can only confirm that this actually happens for send, but lets be symmetrical
        # with our dirty hacks.
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__readable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if self._debug_gevent and timeout and toc-tic > dt and \
                    self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
                print(
                    "BUG: gevent may have missed a libzmq recv event on %i!" %
                    self.FD,
                    file=sys.stderr)
        finally:
            if timeout:
                timeout.cancel()
            self.__readable.set()

    def send(self, data, flags=0, copy=True, track=False):
        """send, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """

        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).send(data, flags, copy, track)
            finally:
                if not self.__in_send_multipart:
                    self.__state_changed()
            return msg
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while True:  # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                msg = super(_Socket, self).send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    if not self.__in_send_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_send_multipart:
                    self.__state_changed()
                return msg
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        """recv, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            finally:
                if not self.__in_recv_multipart:
                    self.__state_changed()
            return msg

        flags |= zmq.NOBLOCK
        while True:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    if not self.__in_recv_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_recv_multipart:
                    self.__state_changed()
                return msg
            self._wait_read()

    def send_multipart(self, *args, **kwargs):
        """wrap send_multipart to prevent state_changed on each partial send"""
        self.__in_send_multipart = True
        try:
            msg = super(_Socket, self).send_multipart(*args, **kwargs)
        finally:
            self.__in_send_multipart = False
            self.__state_changed()
        return msg

    def recv_multipart(self, *args, **kwargs):
        """wrap recv_multipart to prevent state_changed on each partial recv"""
        self.__in_recv_multipart = True
        try:
            msg = super(_Socket, self).recv_multipart(*args, **kwargs)
        finally:
            self.__in_recv_multipart = False
            self.__state_changed()
        return msg

    def get(self, opt):
        """trigger state_changed on getsockopt(EVENTS)"""
        if opt in TIMEOS:
            warnings.warn("TIMEO socket options have no effect in zmq.green",
                          UserWarning)
        optval = super(_Socket, self).get(opt)
        if opt == zmq.EVENTS:
            self.__state_changed()
        return optval

    def set(self, opt, val):
        """set socket option"""
        if opt in TIMEOS:
            warnings.warn("TIMEO socket options have no effect in zmq.green",
                          UserWarning)
        return super(_Socket, self).set(opt, val)