Example #1
0
    def _run_impl(self):
        sync = Greenlet(retry_with_logging,
                        self.sync,
                        account_id=self.account_id,
                        logger=self.log)
        sync.start()

        while not sync.ready():
            if self.shutdown.is_set():
                # Ctrl-c, basically!
                self.log.info('Stopping sync',
                              email=self.email_address,
                              account_id=self.account_id)
                # Make sure the parent can't start/stop any folder monitors
                # first
                sync.kill(block=True)

                return self._cleanup()
            else:
                sleep(self.heartbeat)

        if sync.successful():
            return self._cleanup()

        # We just want the name of the exception so don't bother with
        # sys.exc_info()
        self.log.error('mail sync should run forever',
                       provider=self.provider_name,
                       account_id=self.account_id,
                       exception=type(sync.exception).__name__)
        raise sync.exception
Example #2
0
File: base.py Project: wmv/inbox
    def _run_impl(self):
        sync = Greenlet(retry_and_report_killed,
                        self.sync,
                        account_id=self.account_id,
                        logger=self.log,
                        fail_classes=self.retry_fail_classes)
        sync.start()
        while not sync.ready():
            try:
                cmd = self.inbox.get_nowait()
                if not self.process_command(cmd):
                    # ctrl-c, basically!
                    self.log.info("Stopping sync", email=self.email_address)
                    # make sure the parent can't start/stop any folder monitors
                    # first
                    sync.kill(block=True)
                    self.folder_monitors.kill()
                    return
            except Empty:
                sleep(self.heartbeat)

        if sync.successful():
            self.folder_monitors.kill()
            return

        # We just want the name of the exception so don't bother with
        # sys.exc_info()
        self.log.error('mail sync should run forever',
                       provider=self.provider_name,
                       account_id=self.account_id,
                       exception=type(sync.exception).__name__)
        raise sync.exception
Example #3
0
    def _run_impl(self):
        sync = Greenlet(retry_and_report_killed, self.sync,
                        account_id=self.account_id, logger=self.log,
                        fail_classes=self.retry_fail_classes)
        sync.start()
        while not sync.ready():
            try:
                cmd = self.inbox.get_nowait()
                if not self.process_command(cmd):
                    # ctrl-c, basically!
                    self.log.info("Stopping sync", email=self.email_address)
                    # make sure the parent can't start/stop any folder monitors
                    # first
                    sync.kill(block=True)
                    self.folder_monitors.kill()
                    return
            except Empty:
                sleep(self.heartbeat)

        if sync.successful():
            self.folder_monitors.kill()
            return

        # We just want the name of the exception so don't bother with
        # sys.exc_info()
        self.log.error('mail sync should run forever',
                       provider=self.provider_name,
                       account_id=self.account_id,
                       exception=type(sync.exception).__name__)
        raise sync.exception
Example #4
0
    def _run_impl(self):
        sync = Greenlet(retry_and_report_killed,
                        self.sync,
                        account_id=self.account_id,
                        logger=self.log)
        sync.link_value(lambda _: report_stopped(account_id=self.account_id))
        sync.start()
        while not sync.ready():
            try:
                cmd = self.inbox.get_nowait()
                if not self.process_command(cmd):
                    # ctrl-c, basically!
                    self.log.info("Stopping sync", email=self.email_address)
                    # make sure the parent can't start/stop any folder monitors
                    # first
                    sync.kill(block=True)
                    self.folder_monitors.kill()
                    return
            except Empty:
                sleep(self.heartbeat)

        if sync.successful():
            self.folder_monitors.kill()
            return

        self.log.error("mail sync should run forever",
                       provider=self.provider_name,
                       account_id=self.account_id)
        raise sync.exception
Example #5
0
    def _run_impl(self):
        sync = Greenlet(retry_with_logging, self.sync,
                        account_id=self.account_id, logger=self.log)
        sync.start()

        while not sync.ready():
            if self.shutdown.is_set():
                # Ctrl-c, basically!
                self.log.info('Stopping sync', email=self.email_address,
                              account_id=self.account_id)
                # Make sure the parent can't start/stop any folder monitors
                # first
                sync.kill(block=True)

                return self._cleanup()
            else:
                sleep(self.heartbeat)

        if sync.successful():
            return self._cleanup()

        # We just want the name of the exception so don't bother with
        # sys.exc_info()
        self.log.error('mail sync should run forever',
                       provider=self.provider_name,
                       account_id=self.account_id,
                       exception=type(sync.exception).__name__)
        raise sync.exception
Example #6
0
    def _run_impl(self):
        sync = Greenlet(retry_and_report_killed, self.sync,
                        account_id=self.account_id, logger=self.log)
        sync.start()
        while not sync.ready():
            try:
                cmd = self.inbox.get_nowait()
                if not self.process_command(cmd):
                    # ctrl-c, basically!
                    self.log.info("Stopping sync", email=self.email_address)
                    # make sure the parent can't start/stop any folder monitors
                    # first
                    sync.kill(block=True)
                    self.folder_monitors.kill()
                    return
            except Empty:
                sleep(self.heartbeat)

        if sync.successful():
            self.folder_monitors.kill()
            return

        self.log.error("mail sync should run forever",
                       provider=self.provider_name,
                       account_id=self.account_id)
        raise sync.exception
class StreamFactory(object):
    def __init__(self, auth, redis, chan, filters):
        self.auth = auth
        self.redis = redis
        self.chan = chan
        self.filters = filters
        self.g = None

    def start(self):
        if self.g is not None:
            raise "A greenlet is already there"
        self.g = Greenlet(self._start)
        self.g.start()
        return self.g

    def kill(self):
        if self.g is not None:
            self.g.kill()
            self.g = None

    def restart(self):
        self.kill()
        self.start()

    def _start(self):
        for tweet in TwitterStream(auth=self.auth).statuses.filter(**self.filters):
            # XXX: have to dump tweet, redis seems to do not know how to dump it itself.
            self.redis.publish(self.chan, json.dumps(tweet))
Example #8
0
 def kill(self, *args, **kwargs):
     """ Override our default kill method and kill our child greenlets as
     well """
     self.logger.info(
         "Auxilury network monitor for {} shutting down...".format(
             self.config['name']))
     Greenlet.kill(self, *args, **kwargs)
Example #9
0
 def kill(self, *args, **kwargs):
     self.share_reporter.kill(*args, **kwargs)
     self._report_shares(flush=True)
     self.logger.info("Flushing the reporter task queue, {} items blocking "
                      "exit".format(self.queue.qsize()))
     while not self.queue.empty():
         self._queue_proc()
     self.logger.info("Shutting down CeleryReporter..")
     Greenlet.kill(self, *args, **kwargs)
Example #10
0
 def kill(self, *args, **kwargs):
     self.share_reporter.kill(*args, **kwargs)
     self._report_shares(flush=True)
     self.logger.info("Flushing the reporter task queue, {} items blocking "
                      "exit".format(self.queue.qsize()))
     while not self.queue.empty():
         self._queue_proc()
     self.logger.info("Shutting down CeleryReporter..")
     Greenlet.kill(self, *args, **kwargs)
 def kill(self, exception = GreenletExit, block = True, timeout = None):
     try:
         self._stop()
         Greenlet.kill(self, timeout)
     except OSError as e:
         logging.exception('Failed to stop interference, errno: %d', e.errno)
         raise
     except Exception as e:
         logging.exception('Failed to stop interference: %s', str(e))
         raise
Example #12
0
class BaseTestCase(TestCase):
    def start_server(self, host, port, linked_servers=[]):
        result = ObjectoPlex((host, port),
                             middlewares=[
                                 PingPongMiddleware(),
                                 LegacySubscriptionMiddleware(),
                                 StatisticsMiddleware(),
                                 ChecksumMiddleware(),
                                 RoutingMiddleware(),
                             ],
                             linked_servers=linked_servers)
        gevent.signal(signal.SIGTERM, result.stop)
        gevent.signal(signal.SIGINT, result.stop)
        Greenlet.spawn(result.serve_forever)
        sleep(0.1)

        return result

    def start_client_registry(self, host, port):
        self.service = ClientRegistry(host, port)
        self.service_greenlet = Greenlet(self.service.start)
        gevent.signal(signal.SIGTERM, self.service_greenlet.kill)
        gevent.signal(signal.SIGINT, self.service_greenlet.kill)
        self.service_greenlet.start()
        logger.info('Started client registry, connecting to %s:%s', host, port)

    def stop_client_registry(self):
        self.service.cleanup()
        self.service_greenlet.kill()
        logger.info('Stopped client registry, connecting to %s:%s', _host,
                    _port)

    def assertCorrectClientListReply(self, obj, payload):
        self.assertIn('clients',
                      payload,
                      msg=u"attribute 'clients' not in payload")
        d = None
        for dct in payload['clients']:
            self.assertIn(
                'routing-id',
                dct,
                msg=u"attribute 'routing-id' not in list item in clients list")
            if dct['routing-id'] == self.routing_id:
                d = dct
                break

        self.assertIsNotNone(
            d, msg=u'Client not present in returned client listing')
        self.assertEquals(obj.metadata['client'],
                          d['client'],
                          msg=u"attribute 'client' not equal")
        self.assertEquals(obj.metadata['user'],
                          d['user'],
                          msg=u"attribute 'user' not equal")
Example #13
0
 def kill(self, *args, **kwargs):
     """ Override our default kill method and kill our child greenlets as
     well """
     self.logger.info("Network monitoring jobmanager shutting down...")
     self._node_monitor.kill(*args, **kwargs)
     if self._height_poller:
         self.logger.info("Killing height poller...")
         self._height_poller.kill(*args, **kwargs)
     # stop all greenlets
     for gl in self.auxmons.itervalues():
         gl.kill(timeout=kwargs.get('timeout'), block=False)
     Greenlet.kill(self, *args, **kwargs)
Example #14
0
 def kill(self, *args, **kwargs):
     """ Override our default kill method and kill our child greenlets as
     well """
     self.logger.info("Network monitoring jobmanager shutting down...")
     self._node_monitor.kill(*args, **kwargs)
     if self._height_poller:
         self.logger.info("Killing height poller...")
         self._height_poller.kill(*args, **kwargs)
     # stop all greenlets
     for gl in self.auxmons.itervalues():
         gl.kill(timeout=kwargs.get('timeout'), block=False)
     Greenlet.kill(self, *args, **kwargs)
Example #15
0
def check_stream(client, namespace, stream, start, end, limit, timeout,
                 latency):
  def run():
    for event in client.get(stream, start, end, limit=limit, timeout=latency):
      # Yeah, I'm useless.
      pass

  read_greenlet = Greenlet(run)
  read_greenlet.start()
  read_greenlet.join(timeout)
  if not read_greenlet.ready():
    read_greenlet.kill()
    success = False
  else:
    success = read_greenlet.successful()
  return success
Example #16
0
class Periodic(object):
    def __init__(self, interval, f, *args, **kwargs):
        self.interval = interval
        self.f = f
        self.args = args
        self.kwargs = kwargs
        self._greenlet = None

    def _run(self):
        while True:
            spawn_raw(self.f, *self.args, **self.kwargs)
            sleep(self.interval)

    def _discard_greenlet(self, val):
        self._greenlet = None

    @property
    def started(self):
        return bool(self._greenlet)

    def start(self, right_away=True):
        if self._greenlet:
            raise RuntimeError("Periodic already started.")

        self._greenlet = Greenlet(self._run)
        self._greenlet.link(self._discard_greenlet)

        if right_away:
            self._greenlet.start()
        else:
            self._greenlet.start_later(self.interval)

    def stop(self, block=True, timeout=None):
        if not self._greenlet:
            raise RuntimeError("Periodic is not started")

        self._greenlet.kill(block=block, timeout=timeout)
        self._greenlet = None

    def __repr__(self):
        return "<Periodic[%.2f seconds, %s] %r(*%r, **%r)>" % (
            self.interval,
            "running" if self.started else "stopped",
            self.f,
            self.args,
            self.kwargs,
        )
Example #17
0
class Periodic(object):
    def __init__(self, interval, f, *args, **kwargs):
        self.interval = interval
        self.f = f
        self.args = args
        self.kwargs = kwargs
        self._greenlet = None

    def _run(self):
        while True:
            spawn_raw(self.f, *self.args, **self.kwargs)
            sleep(self.interval)

    def _discard_greenlet(self, val):
        self._greenlet = None

    @property
    def started(self):
        return bool(self._greenlet)

    def start(self, right_away=True):
        if self._greenlet:
            raise RuntimeError("Periodic already started.")

        self._greenlet = Greenlet(self._run)
        self._greenlet.link(self._discard_greenlet)

        if right_away:
            self._greenlet.start()
        else:
            self._greenlet.start_later(self.interval)

    def stop(self, block=True, timeout=None):
        if not self._greenlet:
            raise RuntimeError("Periodic is not started")

        self._greenlet.kill(block=block, timeout=timeout)
        self._greenlet = None

    def __repr__(self):
        return "<Periodic[%.2f seconds, %s] %r(*%r, **%r)>" % (
            self.interval, 'running' if self.started else 'stopped', self.f,
            self.args, self.kwargs)
Example #18
0
class BaseTestCase(TestCase):
    def start_server(self, host, port, linked_servers=[]):
        result = ObjectoPlex((host, port),
                             middlewares=[
                                 PingPongMiddleware(),
                                 LegacySubscriptionMiddleware(),
                                 StatisticsMiddleware(),
                                 ChecksumMiddleware(),
                                 RoutingMiddleware(),
                                 ],
                             linked_servers=linked_servers)
        gevent.signal(signal.SIGTERM, result.stop)
        gevent.signal(signal.SIGINT, result.stop)
        Greenlet.spawn(result.serve_forever)
        sleep(0.1)

        return result

    def start_client_registry(self, host, port):
        self.service = ClientRegistry(host, port)
        self.service_greenlet = Greenlet(self.service.start)
        gevent.signal(signal.SIGTERM, self.service_greenlet.kill)
        gevent.signal(signal.SIGINT, self.service_greenlet.kill)
        self.service_greenlet.start()
        logger.info('Started client registry, connecting to %s:%s', host, port)

    def stop_client_registry(self):
        self.service.cleanup()
        self.service_greenlet.kill()
        logger.info('Stopped client registry, connecting to %s:%s', _host, _port)

    def assertCorrectClientListReply(self, obj, payload):
        self.assertIn('clients', payload, msg=u"attribute 'clients' not in payload")
        d = None
        for dct in payload['clients']:
            self.assertIn('routing-id', dct, msg=u"attribute 'routing-id' not in list item in clients list")
            if dct['routing-id'] == self.routing_id:
                d = dct
                break

        self.assertIsNotNone(d, msg=u'Client not present in returned client listing')
        self.assertEquals(obj.metadata['client'], d['client'], msg=u"attribute 'client' not equal")
        self.assertEquals(obj.metadata['user'], d['user'], msg=u"attribute 'user' not equal")
Example #19
0
def check_stream(client, namespace, stream, start, end, limit, timeout,
                 latency):
    def run():
        for event in client.get(stream,
                                start,
                                end,
                                limit=limit,
                                timeout=latency):
            # Yeah, I'm useless.
            pass

    read_greenlet = Greenlet(run)
    read_greenlet.start()
    read_greenlet.join(timeout)
    if not read_greenlet.ready():
        read_greenlet.kill()
        success = False
    else:
        success = read_greenlet.successful()
    return success
Example #20
0
 def _run_impl(self):
     sync = Greenlet(retry_and_report_killed, self.sync,
                     account_id=self.account_id, logger=self.log)
     sync.link_value(lambda _: report_stopped(account_id=self.account_id))
     sync.start()
     while not sync.ready():
         try:
             cmd = self.inbox.get_nowait()
             if not self.process_command(cmd):
                 # ctrl-c, basically!
                 self.log.info("Stopping sync for {0}".format(
                     self.email_address))
                 # make sure the parent can't start/stop any folder monitors
                 # first
                 sync.kill(block=True)
                 self.folder_monitors.kill()
                 return
         except Empty:
             sleep(self.heartbeat)
     assert not sync.successful(), \
         "mail sync for {} account {} should run forever!"\
         .format(self.provider, self.account_id)
     raise sync.exception
Example #21
0
class TransceiverESME(ESME):
    def __init__(self):
        super(TransceiverESME, self).__init__()
        self.pending_response = {} # sequence_number -> event
        self._greenlet = Greenlet(self._receive)

    def _receive(self):
        while True:
            pdu = self._recv()
            if pdu is not None:
                self.logger.debug('received %s', pdu)
                self._handleInPDU(pdu)
            else:
                self.disconnect()

    def _handleInPDU(self, pdu):
        if pdu.sequence_number in self.pending_response:
            self.logger.debug('received response for %d', pdu.sequence_number)
            event = self.pending_response.pop(pdu.sequence_number)
            event.set(pdu)
        else:
            handler = getattr(self, '_handle_%s' % pdu.command_id, None)
            if handler is not None:
                handler(pdu)
            else:
                self.logger.critical('No handler for pdu %s', pdu.command_id)

    def _handle_enquire_link(self, pdu, **kwargs):
        pdu = EnquireLinkResp(pdu.sequence_number, **dict(self.defaults, **kwargs))
        self.logger.debug('enquire_link_resp %s', pdu)
        self.conn.send(pdu.get_bin())

    def _handle_unbind(self, pdu, **kwargs):
        pdu = UnbindResp(pdu.sequence_number, **dict(self.defaults, **kwargs))
        self.logger.debug('unbind_resp %s', pdu)
        self.conn.send(pdu.get_bin())
        self.state = 'OPEN'
        self.disconnect()

    def _handle_deliver_sm(self, pdu, **kwargs):
        self.logger.debug('deliver_sm %s', pdu)
        try:
            self.on_receive_sm(pdu)
        except:
            self.logger.exception('problem during sm processing')
            self.logger.warning('not confirming sm processing')
        else:
            pdu = DeliverSMResp(pdu.sequence_number, **dict(self.defaults, **kwargs))
            self.logger.debug('deliver_sm_resp %s', pdu)

    def on_receive_sm(self, pdu):
        short_message = pdu['body']['mandatory_parameters']['short_message']
        self.logger.info('short message: %s', short_message)

    def _handle_data_sm(self, pdu):
        self.logger.critical('data_sm %s', pdu.obj)
        raise NotImplementedError()

    def _handle_alert_notification(self, pdu):
        pass

    def asyncRes(self, sequence_number):
        defered = AsyncResult()
        self.pending_response[sequence_number] = defered
        return defered.get()

    def connect(self):
        if self.state in ['CLOSED']:
            super(TransceiverESME, self).connect()
            self._greenlet.start()

    def disconnect(self):
        if self.state in ['BOUND_TX', 'BOUND_RX', 'BOUND_TRX']:
            super(TransceiverESME, self).disconnect()
        if self.state in ['OPEN']:
            super(TransceiverESME, self).disconnect()
            self._greenlet.kill()

    def _unbind(self):
        if self.state in ['BOUND_TX', 'BOUND_RX', 'BOUND_TRX']:
            sequence_number = self.sequence_number
            self.sequence_number +=1
            pdu = Unbind(sequence_number)
            self.conn.send(pdu.get_bin())
            if self._is_ok(self.asyncRes(sequence_number), 'unbind_resp'):
                self.state = 'OPEN'

    def submit_sm(self, **kwargs):
        if self.state in ['BOUND_TX', 'BOUND_TRX']:
            sequence_number = self.sequence_number
            self.sequence_number +=1
            pdu = SubmitSM(sequence_number, **dict(self.defaults, **kwargs))
            self.logger.debug('submit_sm %s', pdu)
            self.conn.send(pdu.get_bin())
            submit_sm_resp = self.asyncRes(sequence_number)
            return submit_sm_resp
        else:
            raise StateError('cannot submit sm in state %s', self.state)


    def submit_multi(self, dest_address=[], **kwargs):
        if self.state in ['BOUND_TX', 'BOUND_TRX']:
            sequence_number = self.sequence_number
            self.sequence_number +=1
            pdu = SubmitMulti(sequence_number, **dict(self.defaults, **kwargs))
            for item in dest_address:
                if isinstance(item, str): # assume strings are addresses not lists
                    pdu.addDestinationAddress(
                            item,
                            dest_addr_ton = self.defaults['dest_addr_ton'],
                            dest_addr_npi = self.defaults['dest_addr_npi'],
                            )
                elif isinstance(item, dict):
                    if item.get('dest_flag') == 1:
                        pdu.addDestinationAddress(
                                item.get('destination_addr', ''),
                                dest_addr_ton = item.get('dest_addr_ton',
                                    self.defaults['dest_addr_ton']),
                                dest_addr_npi = item.get('dest_addr_npi',
                                    self.defaults['dest_addr_npi']),
                                )
                    elif item.get('dest_flag') == 2:
                        pdu.addDistributionList(item.get('dl_name'))
            self.conn.send(pdu.get_bin())
            submit_multi_resp = self.asyncRes(sequence_number)
        else:
            raise StateError('cannot submit multi sm in state %s', self.state)

    def bind_transceiver(self):
        if self.state in ['CLOSED']:
            self.connect()
        if self.state in ['OPEN']:
            self.logger.info('bind transceiver')
            sequence_number = self.sequence_number
            self.sequence_number +=1
            pdu = BindTransceiver(sequence_number, **self.defaults)
            self.conn.send(pdu.get_bin())
            self.logger.debug('bind_transceiver: waiting for response')
            response = self.asyncRes(sequence_number)
            self.logger.debug('bind_transceiver: received response %s', response)
            if self._is_ok(response,
                            'bind_transceiver_resp'):
                self.state = 'BOUND_TRX'
            else:
                raise StateError('unexpected response')

    def enquire_link(self, **kwargs):
        if self.state in ('BOUND_TX', 'BOUND_TRX', 'BOUND_RX'):
            sequence_number = self.sequence_number
            self.sequence_number +=1
            pdu = EnquireLink(sequence_number, **dict(self.defaults, **kwargs))
            self.logger.debug('enquire_link_resp %s', pdu)
            self.conn.send(pdu.get_bin())
            if not self._is_ok(self.asyncRes(sequence_number), 'enquire_link_resp'):
                self.disconnect()
        else:
            raise StateError('unbound')
Example #22
0
 def kill(self, *args, **kwargs):
     self.logger.info("ZeroMQ job bridge shutting down...")
     self.heartbeat.kill()
     Greenlet.kill(self, *args, **kwargs)
Example #23
0
class FakeClient(object):
    """
    A fake client with persistent connection.

    Driven by a dedicated greenlet, it will die trying to operate by the rules
    from the script orderly, round and round.
    """

    def __init__(self, swarm, server, script_):
        self._swarm = swarm
        self._server = server
        self._socket = None
        self._greenlet = Greenlet(self._run)

        self._status = INITIAL
        self._prev_status = None
        self._script = script_
        self._id = id(self)

    def _report(self, status):
        """
        Report to swarm immediately on status change
        """
        if status != self._status:
            self._swarm.commit(CommandSTATUS(self._id, status, self._status))
            self._status, self._prev_status = (status, self._status)

    def _reconnect_server(self):
        """
        Die trying
        """
        while True:
            try:
                # To scatter connect requests
                time.sleep(randint(1, 20))

                self._report(CONNECT)
                self._disconnect_server()
                self._socket = create_connection(self._server, 3)
                self._socket.setsockopt(SOL_SOCKET, SO_RCVBUF, 128)
                self._socket.setsockopt(SOL_SOCKET, SO_SNDBUF, 1024)

                break

            except socket.error as e:
                # A fact: `socket.timeout`, `socket.herror`, and
                # `socket.gaierror` are all subclasses of `socket.error`.
                self._report(e.args[0])
                continue

    def _disconnect_server(self):
        if self._socket:
            self._socket.close()
            self._socket = None

    def _run(self):
        try:
            self._report(STARTED)
            self._reconnect_server()

            while True:
                try:
                    self._report(ACTING)
                    script.execute(self, self._script)
                    self._report(STANDBY)

                except (socket.error, BenchException) as e:
                    self._report(e.args[0])
                    self._reconnect_server()

        except GreenletExit:
            self._report(KILLED)

        except:
            self._report(FATAL)
            # let gevent print this exception
            raise

        finally:
            self._disconnect_server()

    def start(self):
        self._greenlet.start()

    def stop(self):
        self._greenlet.kill()
        self._greenlet.join()

    def send_for_reply(self, data, reply_parser):
        """
        Called by object of Script.

        Exceptions raised here should be handled in `_run`.
        """
        self._socket.sendall(data)

        need = reply_parser(None)
        while need > 0:
            data = self._socket.recv(need)
            if not data:
                raise ServerClosed("server closed")

            need = reply_parser(data)

    def send_noreply(self, data):
        self._socket.sendall(data)

    def close_connection(self):
        raise CloseForcibly("client closed")
Example #24
0
 def stop(self):
     self.is_stopped = True
     Greenlet.kill(self)
Example #25
0
class BaseMailSyncMonitor(Greenlet):
    """
    The SYNC_MONITOR_CLS for all mail sync providers should subclass this.

    Parameters
    ----------
    account_id : int
        Which account to sync.
    email_address : str
        Email address for `account_id`.
    provider : str
        Provider for `account_id`.
    heartbeat : int
        How often to check for commands.
    """

    def __init__(self, account, heartbeat=1):
        bind_context(self, "mailsyncmonitor", account.id)
        self.shutdown = event.Event()
        # how often to check inbox, in seconds
        self.heartbeat = heartbeat
        self.log = log.new(component="mail sync", account_id=account.id)
        self.account_id = account.id
        self.namespace_id = account.namespace.id
        self.email_address = account.email_address
        self.provider_name = account.verbose_provider

        Greenlet.__init__(self)

    def _run(self):
        try:
            return retry_with_logging(
                self._run_impl,
                account_id=self.account_id,
                provider=self.provider_name,
                logger=self.log,
            )
        except GreenletExit:
            self._cleanup()
            raise

    def _run_impl(self):
        self.sync = Greenlet(
            retry_with_logging,
            self.sync,
            account_id=self.account_id,
            provider=self.provider_name,
            logger=self.log,
        )
        self.sync.start()
        self.sync.join()

        if self.sync.successful():
            return self._cleanup()

        self.log.error(
            "mail sync should run forever",
            provider=self.provider_name,
            account_id=self.account_id,
            exc=self.sync.exception,
        )
        raise self.sync.exception

    def sync(self):
        raise NotImplementedError

    def _cleanup(self):
        self.sync.kill()
        with session_scope(self.namespace_id) as mailsync_db_session:
            map(lambda x: x.set_stopped(mailsync_db_session), self.folder_monitors)
        self.folder_monitors.kill()
Example #26
0
 def kill(self, *args, **kwargs):
     """ Override our default kill method and kill our child greenlets as
     well """
     self.context.term()
     Greenlet.kill(self, *args, **kwargs)
Example #27
0
class BaseMailSyncMonitor(Greenlet):
    """
    The SYNC_MONITOR_CLS for all mail sync providers should subclass this.

    Parameters
    ----------
    account_id : int
        Which account to sync.
    email_address : str
        Email address for `account_id`.
    provider : str
        Provider for `account_id`.
    heartbeat : int
        How often to check for commands.
    """

    def __init__(self, account, heartbeat=1):
        bind_context(self, 'mailsyncmonitor', account.id)
        self.shutdown = event.Event()
        # how often to check inbox, in seconds
        self.heartbeat = heartbeat
        self.log = log.new(component='mail sync', account_id=account.id)
        self.account_id = account.id
        self.namespace_id = account.namespace.id
        self.email_address = account.email_address
        self.provider_name = account.verbose_provider

        Greenlet.__init__(self)

    def _run(self):
        try:
            return retry_with_logging(self._run_impl,
                                      account_id=self.account_id,
                                      provider=self.provider_name,
                                      logger=self.log)
        except GreenletExit:
            self._cleanup()
            raise

    def _run_impl(self):
        self.sync = Greenlet(retry_with_logging, self.sync,
                             account_id=self.account_id,
                             provider=self.provider_name,
                             logger=self.log)
        self.sync.start()
        self.sync.join()

        if self.sync.successful():
            return self._cleanup()

        self.log.error('mail sync should run forever',
                       provider=self.provider_name,
                       account_id=self.account_id,
                       exc=self.sync.exception)
        raise self.sync.exception

    def sync(self):
        raise NotImplementedError

    def _cleanup(self):
        self.sync.kill()
        with session_scope(self.namespace_id) as mailsync_db_session:
            map(lambda x: x.set_stopped(mailsync_db_session),
                self.folder_monitors)
        self.folder_monitors.kill()
Example #28
0
class Runnable:
    """Greenlet-like class, __run() inside one, but can be stopped and restarted

    Allows subtasks to crash self, and bubble up the exception in the greenlet
    In the future, when proper restart is implemented, may be replaced by actual greenlet
    """

    greenlet: Greenlet
    args: Sequence = tuple()  # args for _run()
    kwargs: dict = dict()  # kwargs for _run()

    def __init__(self, *args: Any, **kwargs: Any) -> None:
        self.args = args
        self.kwargs = kwargs

        self.greenlet = Greenlet(self._run, *self.args, **self.kwargs)
        self.greenlet.name = f"{self.__class__.__name__}|{self.greenlet.name}"
        self.greenlets: List[Greenlet] = list()

    def start(self) -> None:
        """ Synchronously start task

        Reimplements in children an call super().start() at end to start _run()
        Start-time exceptions may be raised
        """
        if self.greenlet:
            raise RuntimeError(f"Greenlet {self.greenlet!r} already started")
        pristine = (not self.greenlet.dead
                    and tuple(self.greenlet.args) == tuple(self.args)
                    and self.greenlet.kwargs == self.kwargs)
        if not pristine:
            self.greenlet = Greenlet(self._run, *self.args, **self.kwargs)
            self.greenlet.name = f"{self.__class__.__name__}|{self.greenlet.name}"
        self.greenlet.start()

    def _run(self, *args: Any, **kwargs: Any) -> None:
        """ Reimplements in children to busy wait here

        This busy wait should be finished gracefully after stop(),
        or be killed and re-raise on subtasks exception """
        raise NotImplementedError

    def stop(self) -> None:
        """ Synchronous stop, gracefully tells _run() to exit

        Should wait subtasks to finish.
        Stop-time exceptions may be raised, run exceptions should not (accessible via get())
        """
        raise NotImplementedError

    def on_error(self, subtask: Greenlet) -> None:
        """ Default callback for substasks link_exception

        Default callback re-raises the exception inside _run() """
        log.error(
            "Runnable subtask died!",
            this=self,
            running=bool(self),
            subtask=subtask,
            exc=subtask.exception,
        )
        if not self.greenlet:
            return

        exception = subtask.exception or GreenletExit()
        self.greenlet.kill(exception)

    def _schedule_new_greenlet(self,
                               func: Callable,
                               *args: Any,
                               in_seconds_from_now: int = None,
                               **kwargs: Any) -> Greenlet:
        """ Spawn a sub-task and ensures an error on it crashes self/main greenlet """
        def on_success(greenlet: Greenlet) -> None:
            if greenlet in self.greenlets:
                self.greenlets.remove(greenlet)

        greenlet = Greenlet(func, *args, **kwargs)
        greenlet.link_exception(self.on_error)
        greenlet.link_value(on_success)
        self.greenlets.append(greenlet)
        if in_seconds_from_now:
            greenlet.start_later(in_seconds_from_now)
        else:
            greenlet.start()
        return greenlet

    def __bool__(self) -> bool:
        return bool(self.greenlet)

    def rawlink(self, callback: Callable) -> None:
        if not self.greenlet:
            return
        self.greenlet.rawlink(callback)
Example #29
0
class SlackAdapter(object):
    """
        This adapter exposes a webhook that listens for slack messages.

        The web listener for this is independent of Jeev's WSGI server. They cannot run on the same port.

        This adapter works the same as Slack's Hubot adapter. So, when integrating with Jeev, from Slack's integration,
        use Hubot, and point it to the the adapter's listen host and port.
    """

    class SlackObject(object):
        def __init__(self, data):
            self.data = data
            self._in_name_sets = set()

        @property
        def id(self):
            return self.data['id']

        @property
        def name(self):
            return self.data['name']

        def _iter_name_sets(self):
            pass

        def _link(self, name):
            self._in_name_sets.add(name)

        def _unlink(self):
            self._in_name_sets.clear()

        def iter_names(self):
            return iter(self._in_name_sets)

        def _update(self, **kwargs):
            for k, v in kwargs.iteritems():
                if k == 'ok':
                    continue

                self.data[k] = v

        def __str__(self):
            return self.name

    class SlackUser(SlackObject):
        @property
        def presence(self):
            return self.data['presence']

        def __repr__(self):
            return '<SlackUser id=%r, name=%r, presence=%s>' % (self.id, self.name, self.presence)

    class _SlackChannelBase(SlackObject):
        is_direct_message = False

        def __init__(self, data, adapter):
            super(SlackAdapter._SlackChannelBase, self).__init__(data)
            self._adapter = adapter

        @property
        def topic(self):
            if 'topic' in self.data:
                return self.data['topic']['value']

        @topic.setter
        def topic(self, val):
            if val != self.data['topic']:
                self._adapter.api.channels.setTopic(channel=self, topic=val)

        @property
        def purpose(self):
            if 'purpose' in self.data:
                return self.data['purpose']['value']

        @purpose.setter
        def purpose(self, val):
            raise NotImplementedError("Bots cannot set channel purpose.")

    class SlackChannel(_SlackChannelBase):
        @property
        def members(self):
            members = []
            for m in self.data['members']:
                members.append(self._adapter._users[m])

            return members

        def _left(self, archive=False):
            keep_keys = 'created', 'creator', 'id', 'is_archived', 'is_channel', 'is_general'
            for k in self.data.keys():
                if k not in keep_keys:
                    del self.data[k]

            self.data.update(
                members=[],
                is_member=False
            )
            if archive:
                self.data['is_archived'] = True

        def __repr__(self):
            return "<SlackChannel id=%r, name=%r, members=%r>" % (
                self.id, self.name, self.members
            )

    class _SlackGroupBase(SlackObject):
        is_direct_message = False

        def __init__(self, data, adapter):
            super(SlackAdapter._SlackGroupBase, self).__init__(data)
            self._adapter = adapter

        @property
        def topic(self):
            if 'topic' in self.data:
                return self.data['topic']['value']

        @topic.setter
        def topic(self, val):
            if val != self.data['topic']:
                self._adapter.api.groups.setTopic(channel=self, topic=val)

        @property
        def purpose(self):
            if 'purpose' in self.data:
                return self.data['purpose']['value']

        @purpose.setter
        def purpose(self, val):
            raise NotImplementedError("Bots cannot set group purpose.")

    class SlackGroup(_SlackGroupBase):
        @property
        def members(self):
            members = []
            for m in self.data['members']:
                members.append(self._adapter._users[m])

            return members

        def _left(self, archive=False):
            keep_keys = 'created', 'creator', 'id', 'is_archived', 'is_group', 'is_general'
            for k in self.data.keys():
                if k not in keep_keys:
                    del self.data[k]

            self.data.update(
                members=[],
                is_member=False
            )
            if archive:
                self.data['is_archived'] = True

        def __repr__(self):
            return "<SlackGroup id=%r, name=%r, members=%r>" % (
                self.id, self.name, self.members
            )

    class SlackDirectMessage(_SlackChannelBase):
        is_direct_message = True

        @property
        def user(self):
            return self._adapter._users[self.data['user']]

        @property
        def members(self):
            return [self.user]

        def __repr__(self):
            return '<SlackDirectMessage id=%r, name=%r, user=%r>' % (
                self.id, self.name, self.user
            )

    class SlackObjectList(object):
        def __init__(self):
            self._obj_by_id = {}
            self._obj_by_name = defaultdict(set)

        def clear(self):
            self._obj_by_id.clear()
            self._obj_by_name.clear()

        def add(self, obj):
            if obj in self:
                self.remove(obj)

            self._obj_by_id[obj.id] = obj
            name = obj.name.lower()
            obj._link(name)
            self._obj_by_name[obj.name.lower()].add(obj)

        def remove(self, obj):
            self._obj_by_id.pop(obj.id)
            for name in obj.iter_names():
                self._obj_by_name[name].discard(obj)

            obj._unlink()

        def __contains__(self, item):
            if isinstance(item, SlackAdapter.SlackObject):
                return item.id in self._obj_by_id and self._obj_by_id[item.id] is item

            else:
                return item in self._obj_by_id

        def __getitem__(self, key):
            if key in self._obj_by_id:
                return self._obj_by_id[key]

            raise KeyError(key)

        def __delitem__(self, key):
            if key in self._obj_by_id:
                obj = self._obj_by_id[key]
                self.remove(obj)
            else:
                raise KeyError(key)

        def find(self, name_or_id):
            if name_or_id in self:
                return self[name_or_id]

            name_or_id = name_or_id.lower()
            if name_or_id in self._obj_by_name:
                return next(iter(self._obj_by_name[name_or_id]), None)

        def names(self):
            return [k for k, v in self._obj_by_name.iteritems() if v]

    class SlackApi(object):
        def __init__(self, adapter=None, parent=None, part=None):

            if parent:
                self._adapter = parent._adapter
                self._parts = parent._parts[:]
            else:
                self._parts = []
                self._adapter = adapter

            if part:
                self._parts.append(part)

        def __getattr__(self, item):
            return SlackAdapter.SlackApi(parent=self, part=item)

        def __call__(self, **kwargs):
            for k, v in kwargs.items():
                if isinstance(v, SlackAdapter.SlackObject):
                    kwargs[k] = v.id

            method = '.'.join(self._parts) or '?'
            logger.debug('Making API call %r with args %r', method, kwargs)
            result = json.loads(self._adapter._server.api_call(method, **kwargs))
            logger.debug('Got response %r', result)
            result = self._adapter._process_post_method_hooks(method, kwargs, result)
            return result

    class MutableOutgoingMessage(object):
        def __init__(self, adapter, channel, message):
            self.channel = channel
            self.adapter = adapter
            self.id = adapter._generate_message_id()
            self.message = message
            self.needs_update = False
            self.ts = None

        def _recv_reply(self, data):
            self.ts = data['ts']
            if self.needs_update:
                self._do_update()

        def _do_update(self):
            self.adapter.api.chat.update(
                ts=self.ts,
                channel=self.channel.id,
                text=self.message
            )
            self.needs_update = False

        def update(self, message):
            self.message = message
            if self.ts:
                self._do_update()
            else:
                self.needs_update = True

        def serialize(self):
            return {
                'text': self.message,
                'channel': self.channel.id,
                'type': 'message',
                'id': self.id
            }

        def __repr__(self):
            return "<MutableOutgoingMessage id=%r, channel=%r, message=%r>" % (
                self.id, self.channel, self.message
            )

    def __init__(self, jeev, opts):
        self._jeev = jeev
        self._opts = opts
        self._server = None
        self._greenlet = None
        self._channels = self.SlackObjectList()
        self._dms = self.SlackObjectList()
        self._groups = self.SlackObjectList()
        self._users = self.SlackObjectList()
        self._outgoing_messages = {}
        self._last_id = 1
        self.api = self.SlackApi(self)

    def start(self):
        if self._greenlet:
            raise RuntimeError("SlackAdapter Already Started.")
        self._greenlet = Greenlet(self._run)
        self._greenlet.start()

    def stop(self):
        self._greenlet.kill()

    def _run(self):
        while True:
            self._do_slack_connection()
            sleep(10)

    def _do_slack_connection(self):
        if self._server:
            self._server.websocket.abort()

        self._server = Server(self._opts['slack_token'], False)
        self._server.rtm_connect()
        self._parse_login_data(self._server.login_data)
        self._server.websocket.sock.setblocking(1)
        self.api.im.close(channel='D038BM8HQ')

        try:
            while True:
                frame = self._server.websocket.recv()
                self._handle_frame(frame)
        except Exception as e:
            logger.error(e, exc_info=True)

    def _handle_frame(self, frame):
        data = json.loads(frame)
        logger.debug("Got frame %r", frame)

        if 'reply_to' in data:
            message = self._outgoing_messages.pop(data['reply_to'], None)
            if message:
                logger.debug("Received reply for Message: %r", message)
                message._recv_reply(data)

        if 'type' not in data:
            return

        handler = getattr(self, '_handle_%s' % data['type'], None)
        if handler:
            return handler(data)

        else:
            logger.debug("No handler defined for message type %s", data['type'])

    def _handle_message(self, data):
        if 'subtype' not in data and 'reply_to' not in data:
            message = Message(data, self._get_channel_group_or_dm(data['channel']), self._users[data['user']],
                              data['text'])

            return self._jeev._handle_message(message)

    def _handle_user_change(self, data):
        user = self._get_user(data['user']['id'])
        if user is None:
            return
        user._update(**data['user'])
        self._users.add(user)
        self._broadcast_event(events.User.Changed, user=user)

    def _handle_presence_change(self, data):
        # For reasons that aren't clear, slack does a presence change notification before telling jeev about a new user
        user = self._get_user(data['user'])
        if user is None:
            return
        user._update(presence=data['presence'])
        self._broadcast_event(events.User.PresenceChanged, user=user)

    def _handle_channel_created(self, data):
        channel = data['channel'].copy()
        channel.update(members=[], is_general=False, is_member=False, is_archived=False)
        channel = self.SlackChannel(channel, self)
        self._channels.add(channel)
        self._broadcast_event(events.Channel.Created, channel=channel)

    def _handle_channel_left(self, data):
        channel = self._channels[data['channel']]
        channel._left()
        self._broadcast_event(events.Channel.Left, channel=channel)

    def _handle_channel_deleted(self, data):
        self._handle_channel_left(data)
        channel = self._channels[data['channel']]
        self._channels.remove(channel)
        self._broadcast_event(events.Channel.Deleted, channel=channel)

    def _handle_channel_rename(self, data):
        channel = self._channels[data['channel']['id']]
        channel._update(**data['channel'])
        self._channels.add(channel)
        self._broadcast_event(events.Channel.Renamed, channel=channel)

    def _handle_channel_archive(self, data):
        channel = self._channels[data['channel']]
        channel._left(archive=True)
        self._broadcast_event(events.Channel.Archived, channel=channel)

    def _handle_channel_unarchive(self, data):
        channel = self._channels[data['channel']]
        channel._update(is_archived=False)
        self._broadcast_event(events.Channel.UnArchived, channel=channel)

    def _handle_channel_joined(self, data):
        channel_id = data['channel']['id']
        if channel_id in self._channels:
            channel = self._channels[channel_id]
            channel._update(**data['channel'])
            self._channels.add(channel)
        else:
            channel = self.SlackChannel(data['channel'], self)
            self._channels.add(channel)
            self._broadcast_event(events.Channel.Created, channel=channel)

        self._broadcast_event(events.Channel.Joined, channel=channel)

    def _process_team_join(self, data):
        user = self.SlackUser(data['user'])
        self._users.add(user)
        self._broadcast_event(events.Team.Joined, user=user)

    def _parse_login_data(self, login_data):
        import pprint

        pprint.pprint(login_data)
        self._users.clear()
        self._channels.clear()
        self._dms.clear()
        self._outgoing_messages.clear()

        for user in login_data['users']:
            self._users.add(self.SlackUser(user))

        for group in login_data['groups']:
            self._groups.add(self.SlackGroup(group, self))

        for dm in login_data['ims']:
            self._dms.add(self.SlackDirectMessage(dm, self))

        for channel in login_data['channels']:
            self._channels.add(self.SlackChannel(channel, self))

    def _process_post_method_hooks(self, method, kwargs, data):
        if data['ok']:
            if method == 'channels.setTopic':
                channel = self._channels[kwargs['channel']]
                channel._update(**data)

        return data

    def _broadcast_event(self, event, **kwargs):
        pass

    def send_message(self, channel, message):
        if not isinstance(channel, SlackAdapter._SlackChannelBase) and \
                not isinstance(channel, SlackAdapter._SlackGroupBase):
            channel = self._channels.find(channel)

        if not channel:
            raise RuntimeError("Channel with name or ID of %s not found." % channel)

        message = SlackAdapter.MutableOutgoingMessage(self, channel, message)
        logging.debug("Sending message %r", message)
        self._server.send_to_websocket(message.serialize())
        self._outgoing_messages[message.id] = message
        return message

    def send_messages(self, channel, *messages):
        for message in messages:
            self.send_message(channel, message)

    def send_attachment(self, channel, *attachments):
        if not isinstance(channel, SlackAdapter._SlackChannelBase):
            channel = self._channels.find(channel)

        if not channel:
            raise RuntimeError("Channel with name or ID of %s not found." % channel)

        args = {
            'type': 'message',
            'channel': channel.id,
            'attachments': [a.serialize() for a in attachments]
        }

        for a in attachments:
            if not a.has_message_overrides:
                continue

            for k, v in a.message_overrides.items():
                args[k] = v

        self._server.send_to_websocket(args)

    def _generate_message_id(self):
        self._last_id += 1
        return self._last_id

    def _get_channel_group_or_dm(self, id):
        if id.startswith('D'):
            return self._get_dm(id)
        elif id.startswith('G'):
            return self._get_group(id)
        else:
            return self._get_channel(id)

    def _get_dm(self, id):
        if id not in self._dms:
            self._refresh_dms()
        return self._dms[id]

    def _refresh_dms(self):
        dms = self.api.im.list()
        for dm in dms['ims']:
            self._dms.add(self.SlackDirectMessage(dm, self))

    def _get_group(self, id):
        if id not in self._groups:
            self._refresh_groups()
        return self._groups[id]

    def _refresh_groups(self):
        groups = self.api.groups.list()
        for group in groups['groups']:
            self._groups.add(self.SlackDirectMessage(group, self))

    def _get_channel(self, id):
        if id not in self._channels:
            self._refresh_channels()
        return self._channels[id]

    def _refresh_channels(self):
        channels = self.api.channels.list()
        for channel in channels['channels']:
            self._channels.add(self.SlackDirectMessage(channel, self))

    def _get_user(self, id):
        if id not in self._users:
            user = self.api.user.info(user=id)
            if not user['ok']:
                return None
            self._users.add(self.SlackUser(user['user']))

        return self._users[id]
Example #30
0
 def kill(self, *args, **kwargs):
     """ Override our default kill method and kill our child greenlets as
     well """
     self.logger.info("Auxilury network monitor for {} shutting down..."
                      .format(self.config['name']))
     Greenlet.kill(self, *args, **kwargs)
Example #31
0
class ScheduledJob(j.baseclasses.object):
    def _init(self,
              name,
              method,
              args,
              kwargs,
              period=None,
              time_start=None,
              timeout=1200,
              nr=None,
              event=None,
              retry=None):
        self.name = name
        self.method = method
        self.args = args
        self.kwargs = kwargs
        if not time_start:
            time_start = j.data.time.epoch
        self.time_start = time_start
        self.time_started = 0
        self.time_stopped_last = 0
        self.period = period
        self.greenlet = None
        self.timeout = timeout
        self.error = None
        self.nr = nr
        self.event = event  # is the nr we need to wait for in our scheduling job
        self.result = None
        self.retry = retry
        self.done = False

    def check(self):
        """
        check if the job is running well, if there was timeout or error
        :return:
        """

        now = j.data.time.epoch
        if self.done:
            return
        if self.greenlet != None:
            if self.timeout:
                self._log_debug("timeout:%s" % self.name)
                if self.time_start + self.timeout > now:
                    self.raise_error("timeout")
                    return self._stop()

            if self.greenlet.exception:
                self._log_debug("exception:%s" % self.name)
                # error happened in the greelet
                self.raise_error()
                return self._stop()
            if self.greenlet.successful():
                self._log_debug("ok:%s" % self.name)
                self.error = None
                self.result = self.greenlet.value
                return self._stop()
            if self.greenlet.dead:
                self._log_debug("dead:%s" % self.name)

            self._log_debug("running:%s" % self.name)
        else:
            if self.time_start < now:
                self._log_debug("start:%s" % self.name)
                self.run()

    def create(self):
        """
        create the greenlet
        :param self:
        :return:
        """
        if self.greenlet:
            return
        self.greenlet = Greenlet(self.method, *self.args, **self.kwargs)

    def run(self):
        if not self.greenlet:
            self.create()
        self.greenlet.start()
        self.time_started = j.data.time.epoch

    def _stop(self):
        self.time_stopped_last = j.data.time.epoch
        self.greenlet.kill()
        # remove greenlet
        self.greenlet = None
        if self.error and self.retry and self.retry > 0:
            self.retry -= 1
            self.time_start = j.data.time.epoch
        elif self.period:
            self.time_start = j.data.time.epoch + self.period
        else:
            self.time_start = None
            self.done = True

    def raise_error(self, msg=None):
        if not msg:
            # TODO:get error info from greenlet
            j.shell()
        self.error = msg
Example #32
0
class MPMServer(RPCServer):
    """
    Main MPM RPC class which holds the periph_manager object and translates
    RPC calls to appropiate calls in the periph_manager and dboard_managers.
    """
    # This is a list of methods in this class which require a claim
    default_claimed_methods = ['init', 'update_component', 'reclaim', 'unclaim']

    ###########################################################################
    # RPC Server Initialization
    ###########################################################################
    def __init__(self, state, default_args):
        self.log = get_main_logger().getChild('RPCServer')
        self.log.trace("Launching RPC server with compat num %d.%d",
                       MPM_COMPAT_NUM[0], MPM_COMPAT_NUM[1])
        self._state = state
        self._timer = Greenlet()
        # Setting this to True will disable an unclaim on timeout. Use with
        # care, and make sure to set it to False again when finished.
        self._disable_timeouts = False
        self._timeout_interval = float(default_args.get(
            "rpc_timeout_interval",
            TIMEOUT_INTERVAL
        ))
        self.session_id = None
        # Create the periph_manager for this device
        # This call will be forwarded to the device specific implementation
        # e.g. in periph_manager/n3xx.py
        # Which implementation is called will be determined during
        # configuration with cmake (-DMPM_DEVICE).
        # mgr is thus derived from PeriphManagerBase
        # (see periph_manager/base.py)
        from usrp_mpm.periph_manager import periph_manager
        self._mgr_generator = lambda: periph_manager(default_args)
        self.periph_manager = self._mgr_generator()
        device_info = self.periph_manager.get_device_info()
        self._state.dev_type.value = \
                to_binary_str(device_info.get("type", "n/a"))
        self._state.dev_product.value = \
                to_binary_str(device_info.get("product", "n/a"))
        self._state.dev_serial.value = \
                to_binary_str(device_info.get("serial", "n/a"))
        self._db_methods = []
        self._mb_methods = []
        self.claimed_methods = copy.copy(self.default_claimed_methods)
        self._last_error = ""
        self._init_rpc_calls(self.periph_manager)
        # We call the server __init__ function here, and not earlier, because
        # first the commands need to be registered
        super(MPMServer, self).__init__(
            pack_params={'use_bin_type': True},
        )
        self._state.system_ready.value = True
        self.log.info("RPC server ready!")
        # Optionally spawn watchdog. Note: In order for us to be able to spawn
        # the task from this thread, the main process needs to hand control to
        # us using watchdog.transfer_control().
        if watchdog.has_watchdog():
            self.log.info("Spawning watchdog task...")
            watchdog.spawn_watchdog_task(self._state, self.log)

    def _init_rpc_calls(self, mgr):
        """
        Register all RPC calls for the motherboard and daughterboards.

        First clears out all previously registered RPC calls.
        """
        # Clear old calls:
        for meth_list in (self._db_methods, self._mb_methods):
            for method in meth_list:
                if hasattr(self, method):
                    delattr(self, method)
                else:
                    self.log.warning(
                        "Attempted to remove non-existant method: %s",
                        method
                    )
        self._db_methods = []
        self._mb_methods = []
        # Register new ones:
        self._update_component_commands(mgr, '', '_mb_methods')
        for db_slot, dboard in enumerate(mgr.dboards):
            cmd_prefix = 'db_' + str(db_slot) + '_'
            self._update_component_commands(dboard, cmd_prefix, '_db_methods')
        self.log.debug(
            "Registered %d motherboard methods, %d daughterboard methods.",
            len(self._mb_methods),
            len(self._db_methods),
        )

    def _update_component_commands(self, component, namespace, storage):
        """
        Detect available methods for an object and add them to the RPC server.

        We skip all private methods, and all methods that use the @no_rpc
        decorator.
        """
        for method_name in (
                m for m in dir(component)
                if not m.startswith('_') \
                    and callable(getattr(component, m)) \
                    and not hasattr(self, m) \
                    and not getattr(getattr(component, m), '_norpc', False)
            ):
            new_rpc_method = getattr(component, method_name)
            command_name = namespace + method_name
            if getattr(new_rpc_method, '_notok', False):
                self._add_safe_command(new_rpc_method, command_name)
            else:
                self._add_claimed_command(new_rpc_method, command_name)
                self.claimed_methods.append(command_name)
            getattr(self, storage).append(command_name)


    def _add_claimed_command(self, function, command):
        """
        Adds a method with the name command to the RPC server
        This command will require an acquired claim on the device, and a valid
        token needs to be passed in for it to not fail.

        If the method does not require a token, use _add_safe_command().
        """
        self.log.trace("adding command %s pointing to %s", command, function)
        def new_claimed_function(token, *args):
            " Define a function that requires a claim token check "
            if not self._check_token_valid(token):
                self.log.warning(
                    "Thwarted attempt to access function `{}' with invalid " \
                    "token `{}'.".format(command, token)
                )
                raise RuntimeError("Invalid token!")
            try:
                # Because we can only reach this point with a valid claim,
                # there's no harm in resetting the timer
                self._reset_timer()
                return function(*args)
            except Exception as ex:
                self.log.error(
                    "Uncaught exception in method %s :%s \n %s ",
                    command, str(ex), traceback.format_exc()
                )
                self._last_error = str(ex)
                raise
            finally:
                if not self._state.claim_status.value:
                    self.log.error("Lost claim during API call to `%s'!",
                                   command)
        new_claimed_function.__doc__ = function.__doc__
        setattr(self, command, new_claimed_function)

    def _add_safe_command(self, function, command):
        """
        Add a safe method which does not require a claim on the device.
        If the method should only be called by claimers, use
        _add_claimed_command().
        """
        self.log.trace("adding safe command %s pointing to %s", command, function)
        def new_unclaimed_function(*args):
            " Define a function that does not require a claim token check "
            try:
                return function(*args)
            except Exception as ex:
                self.log.error(
                    "Uncaught exception in method %s :%s\n %s ",
                    command, str(ex), traceback.format_exc()
                )
                self._last_error = str(ex)
                raise
        new_unclaimed_function.__doc__ = function.__doc__
        setattr(self, command, new_unclaimed_function)

    ###########################################################################
    # Diagnostics and introspection
    ###########################################################################
    def list_methods(self):
        """
        Returns a list of tuples: (method_name, docstring, is claim required)

        Every tuple represents one call that's available over RPC.
        """
        return [
            (
                method,
                getattr(self, method).__doc__,
                method in self.claimed_methods
            )
            for method in dir(self)
            if not method.startswith('_') \
                    and callable(getattr(self, method))
        ]

    def ping(self, data=None):
        """
        Take in data as argument and send it back
        This is a safe method which can be called without a claim on the device
        """
        self.log.debug("I was pinged from: %s:%s", self.client_host, self.client_port)
        return data

    ###########################################################################
    # Claiming logic
    ###########################################################################
    def _check_token_valid(self, token):
        """
        Returns True iff:
        - The device is currently claimed
        - The claim token matches the one passed in
        """
        token = to_binary_str(token)
        return self._state.claim_status.value and \
                len(token) == TOKEN_LEN and \
                self._state.claim_token.value == token

    def claim(self, session_id):
        """Claim device

        Tries to claim MPM device and provides a human readable session_id.
        The caller must remember this token, and call reclaim() on regular
        intervals in order not to lose the claim.

        Will return a token on success, or raise an Exception on failure.
        """
        self._state.lock.acquire()
        if self._state.claim_status.value:
            error_msg = \
                "Someone tried to claim this device again (From: {})".format(
                    self.client_host)
            self.log.warning(error_msg)
            self._last_error = error_msg
            self._state.lock.release()
            raise RuntimeError("Double-claim")
        self.log.debug(
            "Claiming from: %s, Session ID: %s",
            self.client_host,
            session_id
        )
        self._state.claim_token.value = bytes(''.join(
            choice(ascii_letters + digits) for _ in range(TOKEN_LEN)
        ), 'ascii')
        self._state.claim_status.value = True
        self.periph_manager.claimed = True
        self.periph_manager.claim()
        if self.periph_manager.clear_rpc_method_registry_on_unclaim:
            self._init_rpc_calls(self.periph_manager)
        self._state.lock.release()
        self.session_id = session_id + " ({})".format(self.client_host)
        self._reset_timer()
        self.log.debug(
            "giving token: %s to host: %s",
            self._state.claim_token.value,
            self.client_host
        )
        if self.client_host in net.get_local_ip_addrs():
            self.periph_manager.set_connection_type("local")
        else:
            self.periph_manager.set_connection_type("remote")
        return self._state.claim_token.value

    def reclaim(self, token):
        """
        Reclaim a MPM device with a token. This operation will fail if the
        device is claimed and the token doesn't match, or if the device is not
        claimed at all.
        """
        if self._state.claim_status.value:
            self._state.lock.acquire()
            if self._check_token_valid(token):
                self._state.lock.release()
                self.log.debug("reclaimed from: %s", self.client_host)
                self._reset_timer()
                return True
            self._state.lock.release()
            self.log.debug(
                "reclaim failed from: %s  Invalid token: %s",
                self.client_host, token[:TOKEN_LEN]
            )
            return False
        self.log.debug(
            "trying to reclaim unclaimed device from: %s",
            self.client_host
        )
        return False

    def _unclaim(self):
        """
        Unconditional unclaim - for internal use

        Resets and deinitalizes the periph manager as well.
        """
        self.log.debug("Releasing claim on session `{}'".format(
            self.session_id
        ))
        self._state.claim_status.value = False
        self._state.claim_token.value = b''
        self.session_id = None
        if self.periph_manager.clear_rpc_method_registry_on_unclaim:
            self.clear_method_registry()
        try:
            self.periph_manager.claimed = False
            self.periph_manager.unclaim()
            self.periph_manager.set_connection_type(None)
            self.periph_manager.deinit()
        except Exception as ex:
            self._last_error = str(ex)
            self.log.error("deinit() failed: %s", str(ex))
            # Don't want to propagate this failure -- the session is over
        self._timer.kill()

    def unclaim(self, token):
        """
        unclaim `token` - unclaims the MPM device if it is claimed with this
        token
        """
        if self._check_token_valid(token):
            self._unclaim()
            return True
        self.log.warning("Attempt to unclaim session with invalid token!")
        return False

    def _timeout_event(self):
        " Callback for the claim timeout. "
        if self._disable_timeouts:
            self.log.debug("Timeouts are disabled: Snoozing")
            self._reset_timer()
        else:
            self.log.warning("A timeout event occured!")
            self._unclaim()

    def _reset_timer(self):
        """
        Reset unclaim timer. After calling this, call this function again
        within 'timeout' seconds to avoid a timeout event.
        """
        self._timer.kill()
        self._timer = spawn_later(self._timeout_interval, self._timeout_event)

    ###########################################################################
    # Status queries
    ###########################################################################
    def get_mpm_compat_num(self):
        """Get the MPM compatibility number"""
        return MPM_COMPAT_NUM

    def get_device_info(self):
        """
        get device information
        This is as safe method which can be called without a claim on the device
        """
        info = self.periph_manager.get_device_info()
        info["mpm_version"] = "{}.{}".format(*MPM_COMPAT_NUM)
        if self.client_host in net.get_local_ip_addrs():
            info["connection"] = "local"
        else:
            info["connection"] = "remote"
        return info

    def get_last_error(self):
        """
        Return the 'last error' string, which gets set when RPC calls fail.
        """
        return self._last_error

    def get_log_buf(self, token):
        """
        Return the contents of the log buffer as a list of str -> str
        dictionaries.
        """
        if not self._check_token_valid(token):
            self.log.warning(
                "Attempt to read logs without valid claim from {}".format(
                    self.client_host
                )
            )
            err_msg = "get_log_buf() called without valid claim."
            self._last_error = err_msg
            raise RuntimeError(err_msg)
        log_records = get_main_logger().get_log_buf()
        self.log.trace("Returning %d log records.", len(log_records))
        return [
            {k: str(v) for k, v in iteritems(record)}
            for record in log_records
        ]

    ###########################################################################
    # Session initialization
    ###########################################################################
    def init(self, token, args):
        """
        Initialize device. See PeriphManagerBase for details. This is forwarded
        from here import to give extra control over the claim release timeout.
        """
        if not self._check_token_valid(token):
            self.log.warning(
                "Attempt to init without valid claim from {}".format(
                    self.client_host
                )
            )
            self._last_error = "init() called without valid claim."
            raise RuntimeError("init() called without valid claim.")
        try:
            result = self.periph_manager.init(args)
        except Exception as ex:
            self._last_error = str(ex)
            self.log.error("init() failed with error: %s", str(ex))
        finally:
            self.log.debug("init() result: {}".format(result))
        return result

    ###########################################################################
    # Update components
    ###########################################################################
    def clear_method_registry(self):
        """
        Clear all the methods in the RPC server method cache.
        """
        # RPCServer caches RPC methods, but that cache is not accessible here
        # (because Cython). Re-running `RPCServer.__init__` clears that cache,
        # and allows us to register new RPC methods.
        # A note on maintenance: This has been deemed safe through inspection of
        # the RPCServer source code. However, this is not typical Python, and
        # changes in future versions of RPCServer may cause issues.
        super(MPMServer, self).__init__(
            pack_params={'use_bin_type': True},
        )

    def reset_mgr(self):
        """
        Reset the Peripheral Manager for this RPC server.
        """
        self.log.info("Resetting peripheral manager.")
        self.periph_manager.tear_down()
        self.periph_manager = None
        self.periph_manager = self._mgr_generator()
        self._init_rpc_calls(self.periph_manager)
        # Clear the method cache in order to remove stale references to
        # methods from the old peripheral manager (the one before reset)
        self.clear_method_registry()

    def update_component(self, token, file_metadata_l, data_l):
        """"
        Updates the device component files specified by the metadata and data
        :param file_metadata_l: List of dictionary of strings containing metadata
        :param data_l: List of binary string with the file contents to be written
        """
        # Stop the timer, update_component can take some time:
        self._disable_timeouts = True
        # Check the claimed status
        if not self._check_token_valid(token):
            self._last_error =\
                "Attempt to update component without valid claim from {}".format(
                    self.client_host
                )
            self.log.error(self._last_error)
            raise RuntimeError("Attempt to update component without valid claim.")
        result = self.periph_manager.update_component(file_metadata_l, data_l)
        if not result:
            component_ids = [metadata['id'] for metadata in file_metadata_l]
            raise RuntimeError("Failed to update components: {}".format(component_ids))

        # Check if we need to reset the peripheral manager
        reset_now = False
        for metadata, data in zip(file_metadata_l, data_l):
            # Make sure the component is in the updateable_components
            component_id = metadata['id']
            if component_id in self.periph_manager.updateable_components:
                # Check if that updating that component means the PM should be reset
                if self.periph_manager.updateable_components[component_id]['reset']:
                    reset_now = True
            else:
                self.log.debug("ID {} not in updateable components ({})".format(
                    component_id, self.periph_manager.updateable_components))

        try:
            self.log.trace("Reset after updating component? {}".format(reset_now))
            if reset_now:
                self.reset_mgr()
                self.log.debug("Reset the periph manager")
        except Exception as ex:
            self.log.error(
                "Error in update_component while resetting: {}".format(
                    ex
                ))
            self._last_error = str(ex)

        self.log.debug("End of update_component")
        self._reset_timer()
 def stop(self):
     self.is_stopped = True
     Greenlet.kill(self)
Example #34
0
class Controller(object):

    """Manages a program's main loop in a Greenlet."""

    def __init__(self, program_id=DEFAULT_PROGRAM):
        """Creates a controller to control the specified program. The program
        doesn't start executing until the start method is called."""
        self.messages = Queue()
        self.program_id = program_id
        self.program = PROGRAMS[program_id]()
        self.green = None
        self.can_reset = False

    def start(self):
        """Starts (or resumes) the execution of the program."""
        self.green = Greenlet(self.main_loop)
        self.green.start()
        self.program.start()
        self.can_reset = True

    def stop(self):
        """Stops the execution of the program."""
        self.program.stop()
        if self.green:
            self.green.kill()

    def reset(self):
        """Stops the program and resets it to its initial state."""
        self.stop()
        self.program.reset()
        self.can_reset = False

    def switch_program(self, program_id):
        """Stops execution and switches to a new program."""
        self.stop()
        self.program_id = program_id
        self.program = PROGRAMS[program_id]()
        self.can_reset = False

    def main_loop(self):
        """Runs the program's loop method continously, collecting any returned
        messages into the messages queue."""
        while True:
            msg = self.program.loop()
            if msg:
                self.messages.put(msg)
            sleep(LOOP_DELAY)

    def __call__(self, command):
        """Accepts a command and either performs the desired action or passes
        the message on to the program. Returns a status message."""
        if command == 'short:sync':
            pid = self.program_id
            running = bool(self.green)
            can_reset = self.can_reset
            return "{} {} {}".format(pid, running, can_reset)
        if command == 'long:status':
            return self.messages.get()
        if command.startswith(PROGRAM_PREFIX):
            prog = command[len(PROGRAM_PREFIX):]
            self.switch_program(prog)
            return "switched to {}".format(prog)
        if command == 'control:start':
            if self.green:
                return "already running"
            self.start()
            return "program resumed"
        if command == 'control:stop':
            if not self.green:
                return "not running"
            self.stop()
            return "program paused"
        if command == 'control:reset':
            self.reset()
            return "program reset"
        return self.program(command)
 def kill(self, *args, **kwargs):
     self.logger.info("ZeroMQ job bridge shutting down...")
     self.heartbeat.kill()
     Greenlet.kill(self, *args, **kwargs)
Example #36
0
class FakeClient(object):
    """
    A fake client with persistent connection.

    Driven by a dedicated greenlet, it will die trying to operate by the rules
    from the script orderly, round and round.
    """
    def __init__(self, swarm, server, script_):
        self._swarm = swarm
        self._server = server
        self._socket = None
        self._greenlet = Greenlet(self._run)

        self._status = INITIAL
        self._prev_status = None
        self._script = script_
        self._id = id(self)

    def _report(self, status):
        """
        Report to swarm immediately on status change
        """
        if status != self._status:
            self._swarm.commit(CommandSTATUS(self._id, status, self._status))
            self._status, self._prev_status = (status, self._status)

    def _reconnect_server(self):
        """
        Die trying
        """
        while True:
            try:
                # To scatter connect requests
                time.sleep(randint(1, 20))

                self._report(CONNECT)
                self._disconnect_server()
                self._socket = create_connection(self._server, 3)
                self._socket.setsockopt(SOL_SOCKET, SO_RCVBUF, 128)
                self._socket.setsockopt(SOL_SOCKET, SO_SNDBUF, 1024)

                break

            except socket.error as e:
                # A fact: `socket.timeout`, `socket.herror`, and
                # `socket.gaierror` are all subclasses of `socket.error`.
                self._report(e.args[0])
                continue

    def _disconnect_server(self):
        if self._socket:
            self._socket.close()
            self._socket = None

    def _run(self):
        try:
            self._report(STARTED)
            self._reconnect_server()

            while True:
                try:
                    self._report(ACTING)
                    script.execute(self, self._script)
                    self._report(STANDBY)

                except (socket.error, BenchException) as e:
                    self._report(e.args[0])
                    self._reconnect_server()

        except GreenletExit:
            self._report(KILLED)

        except:
            self._report(FATAL)
            # let gevent print this exception
            raise

        finally:
            self._disconnect_server()

    def start(self):
        self._greenlet.start()

    def stop(self):
        self._greenlet.kill()
        self._greenlet.join()

    def send_for_reply(self, data, reply_parser):
        """
        Called by object of Script.

        Exceptions raised here should be handled in `_run`.
        """
        self._socket.sendall(data)

        need = reply_parser(None)
        while need > 0:
            data = self._socket.recv(need)
            if not data:
                raise ServerClosed("server closed")

            need = reply_parser(data)

    def send_noreply(self, data):
        self._socket.sendall(data)

    def close_connection(self):
        raise CloseForcibly("client closed")
Example #37
0
class MPMServer(RPCServer):
    """
    Main MPM RPC class which holds the periph_manager object and translates
    RPC calls to appropiate calls in the periph_manager and dboard_managers.
    """
    # This is a list of methods in this class which require a claim
    default_claimed_methods = [
        'init', 'update_component', 'reclaim', 'unclaim'
    ]

    ###########################################################################
    # RPC Server Initialization
    ###########################################################################
    def __init__(self, state, default_args):
        self.log = get_main_logger().getChild('RPCServer')
        self.log.trace("Launching RPC server with compat num %d.%d",
                       MPM_COMPAT_NUM[0], MPM_COMPAT_NUM[1])
        self._state = state
        self._timer = Greenlet()
        # Setting this to True will disable an unclaim on timeout. Use with
        # care, and make sure to set it to False again when finished.
        self._disable_timeouts = False
        self._timeout_interval = float(
            default_args.get("rpc_timeout_interval", TIMEOUT_INTERVAL))
        self.session_id = None
        # Create the periph_manager for this device
        # This call will be forwarded to the device specific implementation
        # e.g. in periph_manager/n3xx.py
        # Which implementation is called will be determined during
        # configuration with cmake (-DMPM_DEVICE).
        # mgr is thus derived from PeriphManagerBase
        # (see periph_manager/base.py)
        from usrp_mpm.periph_manager import periph_manager
        self._mgr_generator = lambda: periph_manager(default_args)
        self.periph_manager = self._mgr_generator()
        device_info = self.periph_manager.get_device_info()
        self._state.dev_type.value = \
                to_binary_str(device_info.get("type", "n/a"))
        self._state.dev_product.value = \
                to_binary_str(device_info.get("product", "n/a"))
        self._state.dev_serial.value = \
                to_binary_str(device_info.get("serial", "n/a"))
        self._db_methods = []
        self._mb_methods = []
        self.claimed_methods = copy.copy(self.default_claimed_methods)
        self._last_error = ""
        self._init_rpc_calls(self.periph_manager)
        # We call the server __init__ function here, and not earlier, because
        # first the commands need to be registered
        super(MPMServer, self).__init__(pack_params={'use_bin_type': True}, )
        self._state.system_ready.value = True
        self.log.info("RPC server ready!")
        # Optionally spawn watchdog. Note: In order for us to be able to spawn
        # the task from this thread, the main process needs to hand control to
        # us using watchdog.transfer_control().
        if watchdog.has_watchdog():
            self.log.info("Spawning watchdog task...")
            watchdog.spawn_watchdog_task(self._state, self.log)

    def _init_rpc_calls(self, mgr):
        """
        Register all RPC calls for the motherboard and daughterboards.

        First clears out all previously registered RPC calls.
        """
        # Clear old calls:
        for meth_list in (self._db_methods, self._mb_methods):
            for method in meth_list:
                if hasattr(self, method):
                    delattr(self, method)
                else:
                    self.log.warning(
                        "Attempted to remove non-existant method: %s", method)
        self._db_methods = []
        self._mb_methods = []
        # Register new ones:
        self._update_component_commands(mgr, '', '_mb_methods')
        for db_slot, dboard in enumerate(mgr.dboards):
            cmd_prefix = 'db_' + str(db_slot) + '_'
            self._update_component_commands(dboard, cmd_prefix, '_db_methods')
        self.log.debug(
            "Registered %d motherboard methods, %d daughterboard methods.",
            len(self._mb_methods),
            len(self._db_methods),
        )

    def _update_component_commands(self, component, namespace, storage):
        """
        Detect available methods for an object and add them to the RPC server.

        We skip all private methods, and all methods that use the @no_rpc
        decorator.
        """
        for method_name in (
                m for m in dir(component)
                if not m.startswith('_') \
                    and callable(getattr(component, m)) \
                    and not hasattr(self, m) \
                    and not getattr(getattr(component, m), '_norpc', False)
            ):
            new_rpc_method = getattr(component, method_name)
            command_name = namespace + method_name
            if getattr(new_rpc_method, '_notok', False):
                self._add_safe_command(new_rpc_method, command_name)
            else:
                self._add_claimed_command(new_rpc_method, command_name)
                self.claimed_methods.append(command_name)
            getattr(self, storage).append(command_name)

    def _add_claimed_command(self, function, command):
        """
        Adds a method with the name command to the RPC server
        This command will require an acquired claim on the device, and a valid
        token needs to be passed in for it to not fail.

        If the method does not require a token, use _add_safe_command().
        """
        self.log.trace("adding command %s pointing to %s", command, function)

        def new_claimed_function(token, *args):
            " Define a function that requires a claim token check "
            if not self._check_token_valid(token):
                self.log.warning(
                    "Thwarted attempt to access function `{}' with invalid " \
                    "token `{}'.".format(command, token)
                )
                raise RuntimeError("Invalid token!")
            try:
                # Because we can only reach this point with a valid claim,
                # there's no harm in resetting the timer
                self._reset_timer()
                return function(*args)
            except Exception as ex:
                self.log.error("Uncaught exception in method %s :%s \n %s ",
                               command, str(ex), traceback.format_exc())
                self._last_error = str(ex)
                raise
            finally:
                if not self._state.claim_status.value:
                    self.log.error("Lost claim during API call to `%s'!",
                                   command)

        new_claimed_function.__doc__ = function.__doc__
        setattr(self, command, new_claimed_function)

    def _add_safe_command(self, function, command):
        """
        Add a safe method which does not require a claim on the device.
        If the method should only be called by claimers, use
        _add_claimed_command().
        """
        self.log.trace("adding safe command %s pointing to %s", command,
                       function)

        def new_unclaimed_function(*args):
            " Define a function that does not require a claim token check "
            try:
                return function(*args)
            except Exception as ex:
                self.log.error("Uncaught exception in method %s :%s\n %s ",
                               command, str(ex), traceback.format_exc())
                self._last_error = str(ex)
                raise

        new_unclaimed_function.__doc__ = function.__doc__
        setattr(self, command, new_unclaimed_function)

    ###########################################################################
    # Diagnostics and introspection
    ###########################################################################
    def list_methods(self):
        """
        Returns a list of tuples: (method_name, docstring, is claim required)

        Every tuple represents one call that's available over RPC.
        """
        return [
            (
                method,
                getattr(self, method).__doc__,
                method in self.claimed_methods
            )
            for method in dir(self)
            if not method.startswith('_') \
                    and callable(getattr(self, method))
        ]

    def ping(self, data=None):
        """
        Take in data as argument and send it back
        This is a safe method which can be called without a claim on the device
        """
        self.log.debug("I was pinged from: %s:%s", self.client_host,
                       self.client_port)
        return data

    ###########################################################################
    # Claiming logic
    ###########################################################################
    def _check_token_valid(self, token):
        """
        Returns True iff:
        - The device is currently claimed
        - The claim token matches the one passed in
        """
        token = to_binary_str(token)
        return self._state.claim_status.value and \
                len(token) == TOKEN_LEN and \
                self._state.claim_token.value == token

    def claim(self, session_id):
        """Claim device

        Tries to claim MPM device and provides a human readable session_id.
        The caller must remember this token, and call reclaim() on regular
        intervals in order not to lose the claim.

        Will return a token on success, or raise an Exception on failure.
        """
        self._state.lock.acquire()
        if self._state.claim_status.value:
            error_msg = \
                "Someone tried to claim this device again (From: {})".format(
                    self.client_host)
            self.log.warning(error_msg)
            self._last_error = error_msg
            self._state.lock.release()
            raise RuntimeError("Double-claim")
        self.log.debug("Claiming from: %s, Session ID: %s", self.client_host,
                       session_id)
        self._state.claim_token.value = bytes(
            ''.join(choice(ascii_letters + digits) for _ in range(TOKEN_LEN)),
            'ascii')
        self._state.claim_status.value = True
        self.periph_manager.claimed = True
        self.periph_manager.claim()
        if self.periph_manager.clear_rpc_registry_on_unclaim:
            self._init_rpc_calls(self.periph_manager)
        self._state.lock.release()
        self.session_id = session_id + " ({})".format(self.client_host)
        self._reset_timer()
        self.log.debug("giving token: %s to host: %s",
                       self._state.claim_token.value, self.client_host)
        if self.client_host in net.get_local_ip_addrs():
            self.periph_manager.set_connection_type("local")
        else:
            self.periph_manager.set_connection_type("remote")
        return self._state.claim_token.value

    def reclaim(self, token):
        """
        Reclaim a MPM device with a token. This operation will fail if the
        device is claimed and the token doesn't match, or if the device is not
        claimed at all.
        """
        if self._state.claim_status.value:
            self._state.lock.acquire()
            if self._check_token_valid(token):
                self._state.lock.release()
                self.log.debug("reclaimed from: %s", self.client_host)
                self._reset_timer()
                return True
            self._state.lock.release()
            self.log.debug("reclaim failed from: %s  Invalid token: %s",
                           self.client_host, token[:TOKEN_LEN])
            return False
        self.log.debug("trying to reclaim unclaimed device from: %s",
                       self.client_host)
        return False

    def _unclaim(self):
        """
        Unconditional unclaim - for internal use

        Resets and deinitalizes the periph manager as well.
        """
        self.log.debug("Releasing claim on session `{}'".format(
            self.session_id))
        self._state.claim_status.value = False
        self._state.claim_token.value = b''
        self.session_id = None
        if self.periph_manager.clear_rpc_registry_on_unclaim:
            self.clear_method_registry()
        try:
            self.periph_manager.claimed = False
            self.periph_manager.unclaim()
            self.periph_manager.set_connection_type(None)
            self.periph_manager.deinit()
        except BaseException as ex:
            self._last_error = str(ex)
            self.log.error("deinit() failed: %s", str(ex))
            # Don't want to propagate this failure -- the session is over
        self._timer.kill()

    def unclaim(self, token):
        """
        unclaim `token` - unclaims the MPM device if it is claimed with this
        token
        """
        if self._check_token_valid(token):
            self._unclaim()
            return True
        self.log.warning("Attempt to unclaim session with invalid token!")
        return False

    def _timeout_event(self):
        " Callback for the claim timeout. "
        if self._disable_timeouts:
            self.log.debug("Timeouts are disabled: Snoozing")
            self._reset_timer()
        else:
            self.log.warning("A timeout event occured!")
            self._unclaim()

    def _reset_timer(self):
        """
        Reset unclaim timer. After calling this, call this function again
        within 'timeout' seconds to avoid a timeout event.
        """
        self._timer.kill()
        self._timer = spawn_later(self._timeout_interval, self._timeout_event)

    ###########################################################################
    # Status queries
    ###########################################################################
    def get_mpm_compat_num(self):
        """Get the MPM compatibility number"""
        return MPM_COMPAT_NUM

    def get_device_info(self):
        """
        get device information
        This is as safe method which can be called without a claim on the device
        """
        info = self.periph_manager.get_device_info()
        info["mpm_version"] = "{}.{}".format(*MPM_COMPAT_NUM)
        if self.client_host in net.get_local_ip_addrs():
            info["connection"] = "local"
        else:
            info["connection"] = "remote"
        return info

    def get_last_error(self):
        """
        Return the 'last error' string, which gets set when RPC calls fail.
        """
        return self._last_error

    def get_log_buf(self, token):
        """
        Return the contents of the log buffer as a list of str -> str
        dictionaries.
        """
        if not self._check_token_valid(token):
            self.log.warning(
                "Attempt to read logs without valid claim from {}".format(
                    self.client_host))
            err_msg = "get_log_buf() called without valid claim."
            self._last_error = err_msg
            raise RuntimeError(err_msg)
        log_records = get_main_logger().get_log_buf()
        self.log.trace("Returning %d log records.", len(log_records))
        return [{k: str(v)
                 for k, v in iteritems(record)} for record in log_records]

    ###########################################################################
    # Session initialization
    ###########################################################################
    def init(self, token, args):
        """
        Initialize device. See PeriphManagerBase for details. This is forwarded
        from here import to give extra control over the claim release timeout.
        """
        if not self._check_token_valid(token):
            self.log.warning(
                "Attempt to init without valid claim from {}".format(
                    self.client_host))
            self._last_error = "init() called without valid claim."
            raise RuntimeError("init() called without valid claim.")
        try:
            result = self.periph_manager.init(args)
        except Exception as ex:
            self._last_error = str(ex)
            self.log.error("init() failed with error: %s", str(ex))
        finally:
            self.log.debug("init() result: {}".format(result))
        return result

    ###########################################################################
    # Update components
    ###########################################################################
    def clear_method_registry(self):
        """
        Clear all the methods in the RPC server method cache.
        """
        # RPCServer caches RPC methods, but that cache is not accessible here
        # (because Cython). Re-running `RPCServer.__init__` clears that cache,
        # and allows us to register new RPC methods.
        # A note on maintenance: This has been deemed safe through inspection of
        # the RPCServer source code. However, this is not typical Python, and
        # changes in future versions of RPCServer may cause issues.
        super(MPMServer, self).__init__(pack_params={'use_bin_type': True}, )

    def reset_mgr(self):
        """
        Reset the Peripheral Manager for this RPC server.
        """
        self.log.info("Resetting peripheral manager.")
        self.periph_manager.tear_down()
        self.periph_manager = None
        self.periph_manager = self._mgr_generator()
        self._init_rpc_calls(self.periph_manager)
        # Clear the method cache in order to remove stale references to
        # methods from the old peripheral manager (the one before reset)
        self.clear_method_registry()

    def update_component(self, token, file_metadata_l, data_l):
        """"
        Updates the device component files specified by the metadata and data
        :param file_metadata_l: List of dictionary of strings containing metadata
        :param data_l: List of binary string with the file contents to be written
        """
        # Stop the timer, update_component can take some time:
        self._disable_timeouts = True
        # Check the claimed status
        if not self._check_token_valid(token):
            self._last_error =\
                "Attempt to update component without valid claim from {}".format(
                    self.client_host
                )
            self.log.error(self._last_error)
            raise RuntimeError(
                "Attempt to update component without valid claim.")
        result = self.periph_manager.update_component(file_metadata_l, data_l)
        if not result:
            component_ids = [metadata['id'] for metadata in file_metadata_l]
            raise RuntimeError(
                "Failed to update components: {}".format(component_ids))

        # Check if we need to reset the peripheral manager
        reset_now = False
        for metadata, data in zip(file_metadata_l, data_l):
            # Make sure the component is in the updateable_components
            component_id = metadata['id']
            if component_id in self.periph_manager.updateable_components:
                # Check if that updating that component means the PM should be reset
                if self.periph_manager.updateable_components[component_id][
                        'reset']:
                    reset_now = True
            else:
                self.log.debug(
                    "ID {} not in updateable components ({})".format(
                        component_id,
                        self.periph_manager.updateable_components))

        try:
            self.log.trace(
                "Reset after updating component? {}".format(reset_now))
            if reset_now:
                self.reset_mgr()
                self.log.debug("Reset the periph manager")
        except Exception as ex:
            self.log.error(
                "Error in update_component while resetting: {}".format(ex))
            self._last_error = str(ex)

        # Re-enable timeouts before we reset the timer, so the MPM session can
        # timeout if something goes wrong
        self._disable_timeouts = False
        self.log.debug("End of update_component")
        self._reset_timer()
Example #38
0
class ScheduledRun(j.baseclasses.object):
    def _init(self, name, timeout=1200):
        self.name = name
        self.jobs = {}
        self.mainloop = Greenlet(self._mainloop)
        self.mainloop.start()
        self.timeout = timeout
        self.error = ""
        self.sleep_time = 0.1
        self.nr_scheduled = 0  # last nr as used for scheduling
        self.nr_done = 0  # nr of last job succesfully executed
        self.events = {}

    def _mainloop(self):
        while True:
            # need to do in careful way so we never kill the greenlet
            self.check()
            gevent.sleep(self.sleep_time)

    # def greenlet_add(self, name, method, *args, **kwargs):
    #     if name in self.greenlets:
    #         raise j.exceptions.BASE("cannot add greenlet: %s already exists" % name)
    #     g = Greenlet(method, *args, **kwargs)
    #     self.greenlets[name] = g
    #     self.greenlets[name].start()

    def schedule(self,
                 name,
                 method,
                 period=0,
                 time_start=0,
                 timeout=0,
                 event=None,
                 retry=None,
                 **kwargs):
        """

        :param self:
        :param name:
        :param method: method to execute
        :param event: number of nr to wait untill finished (waitgroup like in GOLANG)
        :param period: recurring period in seconds, 0 means only run once
        :param start_time: time to start in epoch, in <100000 then will do current epoch + this time
        :param timeout: in seconds after start
        :param args:
        :param kwargs:
        :return:
        """
        if time_start < 100000 and time_start != 0:
            time_start = j.data.time.epoch + time_start

        nr = 1
        name_ = name
        while name_ in self.jobs:
            nr += 1
            name_ = name + str(nr)

        name = name_
        self.nr_scheduled += 1
        sj = ScheduledJob(
            name=name,
            method=method,
            args=[],
            kwargs=kwargs,
            period=period,
            time_start=time_start,
            timeout=timeout,
            nr=self.nr_scheduled,
            event=event,
            retry=retry,
        )
        self.jobs[name] = sj

    def check(self):
        now = j.data.time.epoch
        keys = [key for key in self.jobs.keys()]
        for key in keys:
            if key in self.jobs:
                gs = self.jobs[key]
                if gs.event:
                    if self.nr_done < gs.event:
                        # means we cannot start yet
                        continue
                gs.check()
                if gs.done:
                    self.nr_done += 1

    def event_get(self, name):
        self.events[name] = self.nr_scheduled
        return self.events[name]

    def stop(self):
        self.mainloop.kill()
Example #39
0
class Runnable:
    """Greenlet-like class, __run() inside one, but can be stopped and restarted

    Allows subtasks to crash self, and bubble up the exception in the greenlet
    In the future, when proper restart is implemented, may be replaced by actual greenlet
    """
    greenlet: Greenlet = None
    args: Sequence = tuple()  # args for _run()
    kwargs: dict = dict()  # kwargs for _run()

    def __init__(self, run=None, *args, **kwargs):
        if run is not None:
            self._run = run
        self.args = args
        self.kwargs = kwargs

        self.greenlet = Greenlet(self._run, *self.args, **self.kwargs)
        self.greenlet.name = f'{self.__class__.__name__}|{self.greenlet.name}'

    def start(self):
        """ Synchronously start task

        Reimplements in children an call super().start() at end to start _run()
        Start-time exceptions may be raised
        """
        if self.greenlet:
            raise RuntimeError(f'Greenlet {self.greenlet!r} already started')
        pristine = (
            not self.greenlet.dead and
            tuple(self.greenlet.args) == tuple(self.args) and
            self.greenlet.kwargs == self.kwargs
        )
        if not pristine:
            self.greenlet = Greenlet(self._run, *self.args, **self.kwargs)
            self.greenlet.name = f'{self.__class__.__name__}|{self.greenlet.name}'
        self.greenlet.start()

    def _run(self, *args, **kwargs):
        """ Reimplements in children to busy wait here

        This busy wait should be finished gracefully after stop(),
        or be killed and re-raise on subtasks exception """
        raise NotImplementedError

    def stop(self):
        """ Synchronous stop, gracefully tells _run() to exit

        Should wait subtasks to finish.
        Stop-time exceptions may be raised, run exceptions should not (accessible via get())
        """
        raise NotImplementedError

    def on_error(self, subtask: Greenlet):
        """ Default callback for substasks link_exception

        Default callback re-raises the exception inside _run() """
        log.error(
            'Runnable subtask died!',
            this=self,
            running=bool(self),
            subtask=subtask,
            exc=subtask.exception,
        )
        if not self.greenlet:
            return
        self.greenlet.kill(subtask.exception)

    # redirect missing members to underlying greenlet for compatibility
    # but better use greenlet directly for now, to make use of the c extension optimizations
    def __getattribute__(self, name):
        try:
            return super().__getattribute__(name)
        except AttributeError as ex:
            try:
                return getattr(self.greenlet, name)
            except AttributeError:
                raise ex from None

    def __bool__(self):
        return bool(self.greenlet)
Example #40
0
class ConsoleAdapter(object):
    """
        This adapter will run Jeev in console mode, listening to stdin for messages,
        and writing outgoing messages to stdout.
    """
    def __init__(self, jeev, opts):
        self._jeev = jeev
        self._opts = opts
        self._stdin = None
        self._stdout = None
        self._reader = None
        self._channel = opts.get('console_channel', 'console')
        self._user = opts.get('console_user', 'user')

    def _read_stdin(self):
        self._stdout.write(">>> Jeev Console Adapater\n")
        self._stdout.write(">>> Switch channel using \c channel_name\n")
        self._stdout.write(">>> Switch user using \u user_name\n")
        self._stdout.flush()
        
        while True:
            self._stdout.write('[%s@%s] > ' % (self._user, self._channel))
            self._stdout.flush()

            line = self._stdin.readline()
            if not line:
                break

            if line.startswith('\c'):
                self._channel = line[2:].strip().lstrip('#')
                self._stdout.write("Switched channel to #%s\n" % self._channel)
                self._stdout.flush()

            elif line.startswith('\u'):
                self._user = line[2:].strip()
                self._stdout.write("Switched user %s\n" % self._user)
                self._stdout.flush()

            else:
                message = Message({}, self._channel, self._user, line.strip())
                self._jeev._handle_message(message)

    def start(self):
        self._reader = Greenlet(self._read_stdin)
        self._stdin = FileObject(sys.stdin)
        self._stdout = FileObject(sys.stdout)
        self._reader.start()

    def stop(self):
        self._reader.kill()
        self._reader = None

    def join(self):
        self._reader.join()

    def send_message(self, channel, message):
        self._stdout.write('\r< [#%s] %s\n' % (channel, message))
        self._stdout.write('[%s@%s] > ' % (self._user, self._channel))
        self._stdout.flush()

    def send_messages(self, channel, *messages):
        for message in messages:
            self.send_message(channel, message)
Example #41
0
 def stop(self):
     self.state = States.Stopped
     self.running = False
     Greenlet.kill(self)
Example #42
0
class Controller(object):
    """Manages a program's main loop in a Greenlet."""
    def __init__(self, program_id=DEFAULT_PROGRAM):
        """Creates a controller to control the specified program. The program
        doesn't start executing until the start method is called."""
        self.messages = Queue()
        self.program_id = program_id
        self.program = PROGRAMS[program_id]()
        self.green = None
        self.can_reset = False

    def start(self):
        """Starts (or resumes) the execution of the program."""
        self.green = Greenlet(self.main_loop)
        self.green.start_later(START_DELAY)
        self.program.start()
        self.can_reset = True

    def stop(self):
        """Stops the execution of the program."""
        self.program.stop()
        if self.green:
            self.green.kill()

    def reset(self):
        """Stops the program and resets it to its initial state."""
        self.stop()
        self.program.reset()
        self.can_reset = False

    def switch_program(self, program_id):
        """Stops execution and switches to a new program."""
        self.stop()
        self.program_id = program_id
        self.program = PROGRAMS[program_id]()
        self.can_reset = False

    def main_loop(self):
        """Runs the program's loop method continously, collecting any returned
        messages into the messages queue."""
        while True:
            msg = self.program.loop()
            if msg:
                self.messages.put(msg)
            sleep(LOOP_DELAY)

    def __call__(self, command):
        """Accepts a command and either performs the desired action or passes
        the message on to the program. Returns a status message."""
        if command == 'short:sync':
            pid = self.program_id
            running = bool(self.green)
            can_reset = self.can_reset
            return "{} {} {}".format(pid, running, can_reset)
        if command == 'short:param-help':
            return json.dumps(self.program.codes)
        if command == 'long:status':
            try:
                msg = self.messages.get(timeout=STATUS_POLL_TIMEOUT)
            except Empty:
                return None
            return msg
        if command.startswith(PROGRAM_PREFIX):
            prog = command[len(PROGRAM_PREFIX):]
            self.switch_program(prog)
            return "switched to {}".format(prog)
        if command == 'control:start':
            reason = self.program.no_start()
            if reason:
                return reason
            if self.green:
                return "already running"
            self.start()
            return "program resumed"
        if command == 'control:stop':
            if not self.green:
                return "not running"
            self.stop()
            return "program paused"
        if command == 'control:reset':
            self.reset()
            return "program reset"
        return self.program(command)