Example #1
0
    def __stop(self):
        """ try to stop all of this Role's services """

        # send commands
        poller = Poller()
        for (pipe, svc) in self.__services.items():
            pipe.send_string('STOP')
            self.logger.debug('sent STOP command to %s service' % svc)
            poller.register(pipe, POLLIN)

        # give services a few seconds to cleanup and exit before checking responses
        sleep(1)

        max_attempts = len(self.__services)
        attempts = 0

        while self.__some_alive() and attempts < max_attempts:
            attempts += 1

            # poll for any replies
            items = dict(poller.poll(60000))  # wait for messages

            # mark responding services as stopped
            alive = dict(self.__services)  # make copy
            for (pipe, svc) in alive.items():
                if pipe in items:
                    reply = pipe.recv_string()
                    if 'STOPPED' == reply:
                        self.logger.debug('received STOPPED control reply from %s service' % svc)
                        svc.join(timeout=5)  # STOPPED response should be sent right before svc exit
                        if svc.is_alive():
                            self.logger.error('%s service is still alive; not waiting' % svc)
                        else:
                            self.logger.debug('%s service thread stopped' % svc)
                        poller.unregister(pipe)
                        pipe.close()
                        del (self.__services[pipe])
                    else:
                        self.logger.debug('unknown control reply: %s' % reply)

            # log some useful info
            if len(self.__services) > 0:
                msg = '%s services still alive after %d cycles; ' % (
                    [str(s) for s in self.__services.values()], attempts)
                if attempts < max_attempts:
                    msg += 'waiting'
                else:
                    msg += 'giving up'
                self.logger.debug(msg)
Example #2
0
class ZMQPoller(object):
    """A poller that can be used in the tornado IOLoop.
    
    This simply wraps a regular zmq.Poller, scaling the timeout
    by 1000, so that it is in seconds rather than milliseconds.
    """
    
    def __init__(self):
        self._poller = Poller()
    
    @staticmethod
    def _map_events(events):
        """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
        z_events = 0
        if events & IOLoop.READ:
            z_events |= POLLIN
        if events & IOLoop.WRITE:
            z_events |= POLLOUT
        if events & IOLoop.ERROR:
            z_events |= POLLERR
        return z_events
    
    @staticmethod
    def _remap_events(z_events):
        """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
        events = 0
        if z_events & POLLIN:
            events |= IOLoop.READ
        if z_events & POLLOUT:
            events |= IOLoop.WRITE
        if z_events & POLLERR:
            events |= IOLoop.ERROR
        return events
    
    def register(self, fd, events):
        return self._poller.register(fd, self._map_events(events))
    
    def modify(self, fd, events):
        return self._poller.modify(fd, self._map_events(events))
    
    def unregister(self, fd):
        return self._poller.unregister(fd)
    
    def poll(self, timeout):
        """poll in seconds rather than milliseconds.
        
        Event masks will be IOLoop.READ/WRITE/ERROR
        """
        z_events = self._poller.poll(1000*timeout)
        return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
    
    def close(self):
        pass
Example #3
0
class ZMQPoller(object):
    """A poller that can be used in the tornado IOLoop.
    
    This simply wraps a regular zmq.Poller, scaling the timeout
    by 1000, so that it is in seconds rather than milliseconds.
    """
    
    def __init__(self):
        self._poller = Poller()
    
    @staticmethod
    def _map_events(events):
        """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
        z_events = 0
        if events & IOLoop.READ:
            z_events |= POLLIN
        if events & IOLoop.WRITE:
            z_events |= POLLOUT
        if events & IOLoop.ERROR:
            z_events |= POLLERR
        return z_events
    
    @staticmethod
    def _remap_events(z_events):
        """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
        events = 0
        if z_events & POLLIN:
            events |= IOLoop.READ
        if z_events & POLLOUT:
            events |= IOLoop.WRITE
        if z_events & POLLERR:
            events |= IOLoop.ERROR
        return events
    
    def register(self, fd, events):
        return self._poller.register(fd, self._map_events(events))
    
    def modify(self, fd, events):
        return self._poller.modify(fd, self._map_events(events))
    
    def unregister(self, fd):
        return self._poller.unregister(fd)
    
    def poll(self, timeout):
        """poll in seconds rather than milliseconds.
        
        Event masks will be IOLoop.READ/WRITE/ERROR
        """
        z_events = self._poller.poll(1000*timeout)
        return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
    
    def close(self):
        pass
Example #4
0
class PushRequester(object):
    """Base requester class.
    """

    request_retries = 3

    def __init__(self, host, port):
        self._socket = None
        self._reqaddress = "tcp://" + host + ":" + str(port)
        self._poller = Poller()
        self._lock = Lock()
        self.failures = 0
        self.jammed = False
        self.running = True

        self.connect()

    def connect(self):
        """Connect to the server
        """
        self._socket = context.socket(REQ)
        self._socket.connect(self._reqaddress)
        self._poller.register(self._socket, POLLIN)

    def stop(self):
        """Close the connection to the server
        """
        self.running = False
        self._socket.setsockopt(LINGER, 0)
        self._socket.close()
        self._poller.unregister(self._socket)

    def reset_connection(self):
        """Reset the socket
        """
        self.stop()
        self.connect()

    def __del__(self, *args, **kwargs):
        self.stop()

    def send_and_recv(self, msg, timeout=DEFAULT_REQ_TIMEOUT):

        with self._lock:
            retries_left = self.request_retries
            request = str(msg)
            self._socket.send(request)
            rep = None
            small_timeout = 0.1
            while retries_left and self.running:
                now = time.time()
                while time.time() < now + timeout:
                    if not self.running:
                        return rep
                    socks = dict(self._poller.poll(small_timeout))
                    if socks.get(self._socket) == POLLIN:
                        reply = self._socket.recv()
                        if not reply:
                            LOGGER.error("Empty reply!")
                            break
                        try:
                            rep = Message(rawstr=reply)
                        except MessageError as err:
                            LOGGER.error('Message error: %s', str(err))
                            break
                        LOGGER.debug("Receiving (REQ) %s", str(rep))
                        self.failures = 0
                        self.jammed = False
                        return rep
                    # During big file transfers, give some time to a friend.
                    time.sleep(0.1)

                LOGGER.warning("Timeout from " + str(self._reqaddress) +
                               ", retrying...")
                # Socket is confused. Close and remove it.
                self.stop()
                retries_left -= 1
                if retries_left <= 0:
                    LOGGER.error("Server doesn't answer, abandoning... " +
                                 str(self._reqaddress))
                    self.connect()
                    self.failures += 1
                    if self.failures == 5:
                        LOGGER.critical("Server jammed ? %s", self._reqaddress)
                        self.jammed = True
                    break
                LOGGER.info("Reconnecting and resending " + str(msg))
                # Create new connection
                self.connect()
                self._socket.send(request)

        return rep
Example #5
0
class ZmqSelector(BaseSelector):
    """A selector that can be used with asyncio's selector base event loops."""
    def __init__(self):
        # this maps file descriptors to keys
        self._fd_to_key = {}
        # read-only mapping returned by get_map()
        self._map = _SelectorMapping(self)
        self._poller = ZMQPoller()

    def _fileobj_lookup(self, fileobj):
        """Return a file descriptor from a file object.

        This wraps _fileobj_to_fd() to do an exhaustive search in case
        the object is invalid but we still have it in our map.  This
        is used by unregister() so we can unregister an object that
        was previously registered even if it is closed.  It is also
        used by _SelectorMapping.
        """
        try:
            return _fileobj_to_fd(fileobj)
        except ValueError:
            # Do an exhaustive search.
            for key in self._fd_to_key.values():
                if key.fileobj is fileobj:
                    return key.fd
            # Raise ValueError after all.
            raise

    def register(self, fileobj, events, data=None):
        if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
            raise ValueError("Invalid events: {!r}".format(events))

        key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)

        if key.fd in self._fd_to_key:
            raise KeyError("{!r} (FD {}) is already registered".format(
                fileobj, key.fd))

        z_events = 0
        if events & EVENT_READ:
            z_events |= POLLIN
        if events & EVENT_WRITE:
            z_events |= POLLOUT
        try:
            self._poller.register(key.fd, z_events)
        except ZMQError as exc:
            raise OSError(exc.errno, exc.strerror) from exc

        self._fd_to_key[key.fd] = key
        return key

    def unregister(self, fileobj):
        try:
            key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
        except KeyError:
            raise KeyError("{!r} is not registered".format(fileobj)) from None
        try:
            self._poller.unregister(key.fd)
        except ZMQError as exc:
            self._fd_to_key[key.fd] = key
            raise OSError(exc.errno, exc.strerror) from exc
        return key

    def modify(self, fileobj, events, data=None):
        try:
            fd = self._fileobj_lookup(fileobj)
            key = self._fd_to_key[fd]
        except KeyError:
            raise KeyError("{!r} is not registered".format(fileobj)) from None
        if data == key.data and events == key.events:
            return key
        if events != key.events:
            z_events = 0
            if events & EVENT_READ:
                z_events |= POLLIN
            if events & EVENT_WRITE:
                z_events |= POLLOUT
            try:
                self._poller.modify(fd, z_events)
            except ZMQError as exc:
                raise OSError(exc.errno, exc.strerror) from exc

        key = key._replace(data=data, events=events)
        self._fd_to_key[key.fd] = key
        return key

    def close(self):
        self._fd_to_key.clear()
        self._poller = None

    def get_map(self):
        return self._map

    def _key_from_fd(self, fd):
        """Return the key associated to a given file descriptor.

        Parameters:
        fd -- file descriptor

        Returns:
        corresponding key, or None if not found
        """
        try:
            return self._fd_to_key[fd]
        except KeyError:
            return None

    def select(self, timeout=None):
        if timeout is None:
            timeout = None
        elif timeout <= 0:
            timeout = 0
        else:
            # poll() has a resolution of 1 millisecond, round away from
            # zero to wait *at least* timeout seconds.
            timeout = math.ceil(timeout * 1e3)

        ready = []
        try:
            z_events = self._poller.poll(timeout)
        except ZMQError as exc:
            if exc.errno == EINTR:
                return ready
            else:
                raise OSError(exc.errno, exc.strerror) from exc

        for fd, evt in z_events:
            events = 0
            if evt & POLLIN:
                events |= EVENT_READ
            if evt & POLLOUT:
                events |= EVENT_WRITE
            if evt & POLLERR:
                events = EVENT_READ | EVENT_WRITE

            key = self._key_from_fd(fd)
            if key:
                ready.append((key, events & key.events))

        return ready
Example #6
0
class Subscriber(object):

    """Subscriber

    Subscribes to *addresses* for *topics*, and perform address translation of
    *translate* is true. The function *message_filter* can be used to
    discriminate some messages on the subscriber side. *topics* on the other
    hand performs filtering on the publishing side (from zeromq 3).

    Example::

        from posttroll.subscriber import Subscriber, get_pub_address

        addr = get_pub_address(service, timeout=2)
        sub = Subscriber([addr], 'my_topic')
        try:
            for msg in sub(timeout=2):
                print("Consumer got", msg)

        except KeyboardInterrupt:
            print("terminating consumer...")
            sub.close()

    """

    def __init__(self, addresses, topics='', message_filter=None,
                 translate=False):
        self._topics = self._magickfy_topics(topics)
        self._filter = message_filter
        self._translate = translate

        self.sub_addr = {}
        self.addr_sub = {}
        self.poller = None

        self._hooks = []
        self._hooks_cb = {}

        self.poller = Poller()
        self._lock = Lock()

        self.update(addresses)

        self._loop = True

    def add(self, address, topics=None):
        """Add *address* to the subscribing list for *topics*.

        It topics is None we will subscibe to already specified topics.
        """
        with self._lock:
            if address in self.addresses:
                return False

            topics = self._magickfy_topics(topics) or self._topics
            LOGGER.info("Subscriber adding address %s with topics %s",
                        str(address), str(topics))
            subscriber = get_context().socket(SUB)
            for t__ in topics:
                subscriber.setsockopt_string(SUBSCRIBE, six.text_type(t__))
            subscriber.connect(address)
            self.sub_addr[subscriber] = address
            self.addr_sub[address] = subscriber
            if self.poller:
                self.poller.register(subscriber, POLLIN)
            return True

    def remove(self, address):
        """Remove *address* from the subscribing list for *topics*.
        """
        with self._lock:
            try:
                subscriber = self.addr_sub[address]
            except KeyError:
                return False
            LOGGER.info("Subscriber removing address %s", str(address))
            if self.poller:
                self.poller.unregister(subscriber)
            del self.addr_sub[address]
            del self.sub_addr[subscriber]
            subscriber.close()
            return True

    def update(self, addresses):
        """Updating with a set of addresses.
        """
        if isinstance(addresses, six.string_types):
            addresses = [addresses, ]
        s0_, s1_ = set(self.addresses), set(addresses)
        sr_, sa_ = s0_.difference(s1_), s1_.difference(s0_)
        for a__ in sr_:
            self.remove(a__)
        for a__ in sa_:
            self.add(a__)
        return bool(sr_ or sa_)

    def add_hook_sub(self, address, topics, callback):
        """Specify a *callback* in the same stream (thread) as the main receive
        loop. The callback will be called with the received messages from the
        specified subscription.

        Good for operations, which is required to be done in the same thread as
        the main recieve loop (e.q operations on the underlying sockets).
        """
        LOGGER.info("Subscriber adding SUB hook %s for topics %s",
                    str(address), str(topics))
        socket = get_context().socket(SUB)
        for t__ in self._magickfy_topics(topics):
            socket.setsockopt_string(SUBSCRIBE, six.text_type(t__))
        socket.connect(address)
        self._add_hook(socket, callback)

    def add_hook_pull(self, address, callback):
        """Same as above, but with a PULL socket.
        (e.g good for pushed 'inproc' messages from another thread).
        """
        LOGGER.info("Subscriber adding PULL hook %s", str(address))
        socket = get_context().socket(PULL)
        socket.connect(address)
        self._add_hook(socket, callback)

    def _add_hook(self, socket, callback):
        """Generic hook. The passed socket has to be "receive only".
        """
        self._hooks.append(socket)
        self._hooks_cb[socket] = callback
        if self.poller:
            self.poller.register(socket, POLLIN)

    @property
    def addresses(self):
        """Get the addresses
        """
        return self.sub_addr.values()

    @property
    def subscribers(self):
        """Get the subscribers
        """
        return self.sub_addr.keys()

    def recv(self, timeout=None):
        """Receive, optionally with *timeout* in seconds.
        """
        if timeout:
            timeout *= 1000.

        for sub in list(self.subscribers) + self._hooks:
            self.poller.register(sub, POLLIN)
        self._loop = True
        try:
            while self._loop:
                sleep(0)
                try:
                    socks = dict(self.poller.poll(timeout=timeout))
                    if socks:
                        for sub in self.subscribers:
                            if sub in socks and socks[sub] == POLLIN:
                                m__ = Message.decode(sub.recv_string(NOBLOCK))
                                if not self._filter or self._filter(m__):
                                    if self._translate:
                                        url = urlsplit(self.sub_addr[sub])
                                        host = url[1].split(":")[0]
                                        m__.sender = (m__.sender.split("@")[0]
                                                      + "@" + host)
                                    yield m__

                        for sub in self._hooks:
                            if sub in socks and socks[sub] == POLLIN:
                                m__ = Message.decode(sub.recv_string(NOBLOCK))
                                self._hooks_cb[sub](m__)
                    else:
                        # timeout
                        yield None
                except ZMQError as err:
                    LOGGER.exception("Receive failed: %s", str(err))
        finally:
            for sub in list(self.subscribers) + self._hooks:
                self.poller.unregister(sub)

    def __call__(self, **kwargs):
        return self.recv(**kwargs)

    def stop(self):
        """Stop the subscriber.
        """
        self._loop = False

    def close(self):
        """Close the subscriber: stop it and close the local subscribers.
        """
        self.stop()
        for sub in list(self.subscribers) + self._hooks:
            try:
                sub.setsockopt(LINGER, 1)
                sub.close()
            except ZMQError:
                pass

    @staticmethod
    def _magickfy_topics(topics):
        """Add the magick to the topics if missing.
        """
        # If topic does not start with messages._MAGICK (pytroll:/), it will be
        # prepended.
        if topics is None:
            return None
        if isinstance(topics, six.string_types):
            topics = [topics, ]
        ts_ = []
        for t__ in topics:
            if not t__.startswith(_MAGICK):
                if t__ and t__[0] == '/':
                    t__ = _MAGICK + t__
                else:
                    t__ = _MAGICK + '/' + t__
            ts_.append(t__)
        return ts_

    def __del__(self):
        for sub in list(self.subscribers) + self._hooks:
            try:
                sub.close()
            except:
                pass
Example #7
0
class Cornerstone(Scaffold):
    """ Cornerstone can be used to create a 0mq poll loop.

    Upon creation of a Cornerstone instance, the initial state of the instance
    internal xmq poll loop is passive. To start the loop call Cornerstone
    run(). To stop the Cornerstone instance call Cornerstone.kill().

    Cornerstone only allows for one zmq input port and one zmq output port.
    Cornerstone support respectively; Cornerstone.register_input_sock() and
    Cornerstone.register_output_sock() methods.

    Cornerstone implements an internal signal handler for detection of
    interrupt signals to handle shutdown of connection resources.

    Example Usage:
    >>> import threading
    >>> import time
    >>> from zmq import Context, SUB, SUBSCRIBE

    >>> # create, configure, and run a Cornerstone instance
    >>> foo = Cornerstone()
    >>> property_bag = foo.__create_property_bag__()
    >>> property_bag.heartbeat = 1
    >>> foo.configure(args=property_bag)
    >>> t = threading.Thread(target=foo.run)
    >>> t.start()
    >>> time.sleep(1)
    >>> assert t.is_alive()
    >>> foo.kill()
    >>> t.join(1)
    >>> assert not t.is_alive()

    >>> # register an input socket
    >>> ctx = foo.zmq_ctx
    >>> sock = ctx.socket(SUB)
    >>> sock.connect('tcp://localhost:6670')
    >>> sock.setsockopt(SUBSCRIBE, "")
    >>> foo.register_input_sock(sock)
    >>> t = threading.Thread(target=foo.run)
    >>> t.start()
    >>> time.sleep(1)
    >>> foo.kill()
    >>> t.join(1)
    >>> assert not t.is_alive()
    """


    def __init__(self, **kwargs):
        self._input_sock = None
        self._output_sock = None
        self._control_sock = None

        # determine if outgoing messages should enable NOBLOCK on send
        # default behaviour is to block on a send call till receiver is present
        self.no_block_send = False

        # configure the interrupt handling
        self._stop = True
        signal.signal(signal.SIGINT, self._signal_interrupt_handler)

        # a regular hearbeat interval must be set to the default.
        self.heartbeat = 3 # seconds

        # create the zmq context
        self.zmq_ctx = Context()

        # set the default input receive handler, if none has been assigned
        if not hasattr(self, 'input_recv_handler'):
            self.input_recv_handler = self._default_recv_handler

        # set the default handler, if none has been assigned.
        if not hasattr(self, '_command_handler'):
            self._command_handler = self._default_command_handler

        # construct the poller
        self._poll = Poller()

        # monitoring of message stream is off by default
        self.monitor_stream = False

        Scaffold.__init__(self, **kwargs)


    def configuration_options(self, arg_parser=None):
        """
        The configuration_options method utilizes the arg_parser parameter to
        add arguments that should be handled during configuration.

        Keyword Arguments:
        arg_parser - argparse.ArgumentParser object.

        Sample invocation:
        >>> import argparse
        >>> parser = argparse.ArgumentParser(prog='app.py')
        >>> foo = Cornerstone()
        >>> foo.configuration_options(arg_parser=parser)
        >>> args = parser.print_usage() # doctest: +NORMALIZE_WHITESPACE
        usage: app.py [-h] [--heartbeat HEARTBEAT] [--monitor_stream]
                  [--no_block_send]
        """
        assert arg_parser

        arg_parser.add_argument('--heartbeat',
                                type=int,
                                default=3,
                                help="Set the heartbeat rate in seconds of "
                                     "the core 0mq poller timeout.")
        arg_parser.add_argument('--monitor_stream',
                                action='store_true',
                                help='Enable the sampling of message flow.')
        arg_parser.add_argument('--no_block_send',
                                action='store_true',
                                help='Enable NOBLOCK on the sending of messages.'
                                     ' This will cause an message to be dropped '
                                     'if no receiver is present.')


    def configure(self, args=None):
        """
        The configure method configures a Cornerstone instance by
        prior to the invocation of start.

        Keyword Arguments:
        args - an object with attributes set to the argument values.e
        """
        assert args


    def register_input_sock(self, sock):
        """
        Register a given input socket as the ingest point for a Cornerstone
        instance.

        Keyward Arguments:
        sock - the input socket that is to be registered.

        Return: None

        Cornerstone does not support multiple input sockets, so any currently
        registered input socket will be discarded. This is a per instance
        limitation, in which case the primary concern is ipaddress collision.

        Example Usage:
        >>> from zmq import SUB, SUBSCRIBE
        >>> foo = Cornerstone()
        >>> ctx = foo.zmq_ctx
        >>> sock1 = ctx.socket(SUB)
        >>> sock1.connect('tcp://localhost:2880')
        >>> sock1.setsockopt(SUBSCRIBE, "")
        >>> assert foo._poll.sockets == {}
        >>> foo.register_input_sock(sock1)
        >>> assert foo._poll.sockets.has_key(sock1)
        >>> sock2 = ctx.socket(SUB)
        >>> sock2.connect('tcp://localhost:2881')
        >>> sock2.setsockopt(SUBSCRIBE, "")
        >>> foo.register_input_sock(sock2)
        >>> assert not foo._poll.sockets.has_key(sock1)
        >>> assert foo._poll.sockets.has_key(sock2)
        """
        # if there is an existing input socket, then it will be removed.
        if self._input_sock is not None:
            self._poll.unregister(self._input_sock)
            self._input_sock.close()
            self._input_sock = None

        self._input_sock = sock
        if self._input_sock is not None:
            self._poll.register(self._input_sock, POLLIN)


    def register_output_sock(self, sock):
        """
        Register a given output socket as the egress point for a Cornerstone
        instance.

        Keyward Arguments:
        sock - the output socket that is to be registered.

        Return: none

        Cornerstone does not support multiple output sockets,
        so any currently registered output socket will be discarded. This is
        a per instance limitation. In which case the primary concern is
        ipaddress collision.

        Example Usage:
        >>> from zmq import PUB
        >>> foo = Cornerstone()
        >>> ctx = foo.zmq_ctx
        >>> sock1 = ctx.socket(PUB)
        >>> sock1.bind('tcp://*:2880')
        >>> assert foo._output_sock == None
        >>> foo.register_output_sock(sock1)
        >>> assert foo._output_sock == sock1
        >>> sock2 = ctx.socket(PUB)
        >>> sock2.bind('tcp://*:28881')
        >>> foo.register_output_sock(sock2)
        >>> assert foo._output_sock == sock2
        """
        # if there is an existing output socket, then it will be removed.
        if self._output_sock is not None:
            self._output_sock.close()
            self._output_sock = None

        self._output_sock = sock


    def send(self, msg):
        assert msg
        if self.monitor_stream:
            self.log.info('o: %s', msg)

        if not self.no_block_send:
            self._output_sock.send(msg)
        else:
            try:
                self._output_sock.send(msg, NOBLOCK)
            except:
                self.log.error("Unexpected error:", sys.exc_info()[0])


    def setRun(self):
        self._stop = False


    def isStopped(self):
        return self._stop


    def run(self):
        """
        Comment: -- AAA --
        What needs to occur here si to see if there is a 0mq connection
        configured. If so, then we will simply push to that connector. This
        will be the default behavior, at least for now. There should be a
        mechanism for transmitting the data out to a registered handler.
        """
        self._stop = False

        self.log.info('Beginning run() with configuration: %s', self._args)

        #todo: raul - move this section to command configuraiton layer
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # of course this is when a command configuration layer get's added
        controller = self.zmq_ctx.socket(SUB)
        controller.connect('tcp://localhost:7885')
        controller.setsockopt(SUBSCRIBE, "")
        self._control_sock = controller
        self._poll.register(self._control_sock)
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        loop_count = 0
        input_count = 0
        while True:
            try:
                socks = dict(self._poll.poll(timeout=self.heartbeat))
                loop_count += 1
                if self.monitor_stream and (loop_count % 1000) == 0:
                    sys.stdout.write('loop(%s)' % loop_count)
                    sys.stdout.flush()

                if self._input_sock and socks.get(self._input_sock) == POLLIN:
                    #todo: raul - this whole section needs to be redone,
                    # see additional comment AAA above.
                    msg = self.input_recv_handler(self._input_sock)
                    input_count += 1
                    if self.monitor_stream: # and (input_count % 10) == 0:
                        self.log.info('i:%s- %s', input_count, msg)

                if (self._control_sock and
                    socks.get(self._control_sock) == POLLIN):
                    msg = self._control_sock.recv()
                    if self._command_handler is not None:
                        self._command_handler(msg)

                if self._stop:
                    self.log.info('Stop flag triggered ... shutting down.')
                    break

            except ZMQError, ze:
                if ze.errno == 4: # Known exception due to keyboard ctrl+c
                    self.log.info('System interrupt call detected.')
                else: # exit hard on unhandled exceptions
                    self.log.error('Unhandled exception in run execution:%d - %s'
                                   % (ze.errno, ze.strerror))
                    exit(-1)

        # close the sockets held by the poller
        self._control_sock.close()
        self.register_input_sock(sock=None)
        self.register_output_sock(sock=None)

        self.log.info('Run terminated for %s', self.name)
Example #8
0
class Subscriber(object):

    def __init__(self, addresses, translate=False):
        self._addresses = addresses
        self._translate = translate
        self.subscribers = []
        self._poller = Poller()
        for addr in self._addresses:
            subscriber = context.socket(SUB)
            subscriber.setsockopt(SUBSCRIBE, "pytroll")
            subscriber.connect(addr)
            self.subscribers.append(subscriber)
            self._poller.register(subscriber)
        self._lock = Lock()
        self._loop = True

    @property
    def sub_addr(self):
        return dict(zip(self.subscribers, self._addresses))

    @property
    def addr_sub(self):
        return dict(zip(self._addresses, self.subscribers))

    def reset(self, addr):
        with self._lock:
            idx = self._addresses.index(addr)
            self._poller.unregister(self.subscribers[idx])
            self.subscribers[idx].setsockopt(LINGER, 0)
            self.subscribers[idx].close()
            self.subscribers[idx] = context.socket(SUB)
            self.subscribers[idx].setsockopt(SUBSCRIBE, "pytroll")
            self.subscribers[idx].connect(addr)
            self._poller.register(self.subscribers[idx], POLLIN)

    def recv(self, timeout=None):
        """Receive a message, timeout in seconds.
        """

        if timeout:
            timeout *= 1000

        while(self._loop):
            with self._lock:
                if not self._loop:
                    break
                subs = dict(self._poller.poll(timeout=timeout))
                if subs:
                    for sub in self.subscribers:
                        if sub in subs and subs[sub] == POLLIN:
                            msg = Message.decode(sub.recv())
                            if self._translate:
                                url = urlsplit(self.sub_addr[sub])
                                host = url[1].split(":")[0]
                                msg.sender = (msg.sender.split("@")[0]
                                              + "@" + host)
                            yield msg
                else:
                    yield None

    def stop(self):
        """Stop the subscriber
        """
        with self._lock:
            self._loop = False
            for sub in self.subscribers:
                self._poller.unregister(sub)
                sub.setsockopt(LINGER, 0)
                sub.close()
Example #9
0
class SimpleRequester(object):

    """Base requester class.
    """

    request_retries = 3

    def __init__(self, host, port):
        self._socket = None
        self._reqaddress = "tcp://" + host + ":" + str(port)
        self._poller = Poller()
        self._lock = Lock()
        self.failures = 0
        self.jammed = False

        self.connect()

    def connect(self):
        """Connect to the server
        """
        self._socket = context.socket(REQ)
        self._socket.connect(self._reqaddress)
        self._poller.register(self._socket, POLLIN)

    def stop(self):
        """Close the connection to the server
        """
        self._socket.setsockopt(LINGER, 0)
        self._socket.close()
        self._poller.unregister(self._socket)

    def reset_connection(self):
        """Reset the socket
        """
        self.stop()
        self.connect()

    def __del__(self, *args, **kwargs):
        self.stop()

    def send_and_recv(self, msg, timeout=REQ_TIMEOUT):

        logger.debug("Locking and requesting: " + str(msg))
        with self._lock:
            retries_left = self.request_retries
            request = str(msg)
            self._socket.send(request)
            rep = None
            while retries_left:
                socks = dict(self._poller.poll(timeout))
                if socks.get(self._socket) == POLLIN:
                    reply = self._socket.recv()
                    if not reply:
                        logger.error("Empty reply!")
                        break
                    rep = Message(rawstr=reply)
                    if rep.binary:
                        logger.debug("Got reply: "
                                     + " ".join(str(rep).split()[:6]))
                    else:
                        logger.debug("Got reply: " + str(rep))
                    self.failures = 0
                    self.jammed = False
                    break
                else:
                    logger.warning("Timeout from " + str(self._reqaddress)
                                   + ", retrying...")
                    # Socket is confused. Close and remove it.
                    self.stop()
                    retries_left -= 1
                    if retries_left <= 0:
                        logger.error("Server doesn't answer, abandoning... " +
                                     str(self._reqaddress))
                        self.connect()
                        self.failures += 1
                        if self.failures == 5:
                            logger.critical("Server jammed ? %s",
                                            self._reqaddress)
                            self.jammed = True
                        break
                    logger.info("Reconnecting and resending " + str(msg))
                    # Create new connection
                    self.connect()
                    self._socket.send(request)
        logger.debug("Release request lock")
        return rep
Example #10
0
class Subscriber(object):
    def __init__(self, addresses, translate=False):
        self._addresses = addresses
        self._translate = translate
        self.subscribers = []
        self._poller = Poller()
        for addr in self._addresses:
            subscriber = context.socket(SUB)
            subscriber.setsockopt(SUBSCRIBE, "pytroll")
            subscriber.connect(addr)
            self.subscribers.append(subscriber)
            self._poller.register(subscriber)
        self._lock = Lock()
        self._loop = True

    @property
    def sub_addr(self):
        return dict(zip(self.subscribers, self._addresses))

    @property
    def addr_sub(self):
        return dict(zip(self._addresses, self.subscribers))

    def reset(self, addr):
        with self._lock:
            idx = self._addresses.index(addr)
            self._poller.unregister(self.subscribers[idx])
            self.subscribers[idx].setsockopt(LINGER, 0)
            self.subscribers[idx].close()
            self.subscribers[idx] = context.socket(SUB)
            self.subscribers[idx].setsockopt(SUBSCRIBE, "pytroll")
            self.subscribers[idx].connect(addr)
            self._poller.register(self.subscribers[idx], POLLIN)

    def recv(self, timeout=None):
        """Receive a message, timeout in seconds.
        """

        if timeout:
            timeout *= 1000

        while self._loop:
            with self._lock:
                if not self._loop:
                    break
                subs = dict(self._poller.poll(timeout=timeout))
                if subs:
                    for sub in self.subscribers:
                        if sub in subs and subs[sub] == POLLIN:
                            msg = Message.decode(sub.recv())
                            if self._translate:
                                url = urlsplit(self.sub_addr[sub])
                                host = url[1].split(":")[0]
                                msg.sender = msg.sender.split("@")[0] + "@" + host
                            yield msg
                else:
                    yield None

    def stop(self):
        """Stop the subscriber
        """
        with self._lock:
            self._loop = False
            for sub in self.subscribers:
                self._poller.unregister(sub)
                sub.setsockopt(LINGER, 0)
                sub.close()
class PushRequester(object):

    """Base requester class.
    """

    request_retries = 3

    def __init__(self, host, port):
        self._socket = None
        self._reqaddress = "tcp://" + host + ":" + str(port)
        self._poller = Poller()
        self._lock = Lock()
        self.failures = 0
        self.jammed = False

        self.connect()

    def connect(self):
        """Connect to the server
        """
        self._socket = context.socket(REQ)
        self._socket.connect(self._reqaddress)
        self._poller.register(self._socket, POLLIN)

    def stop(self):
        """Close the connection to the server
        """
        self._socket.setsockopt(LINGER, 0)
        self._socket.close()
        self._poller.unregister(self._socket)

    def reset_connection(self):
        """Reset the socket
        """
        self.stop()
        self.connect()

    def __del__(self, *args, **kwargs):
        self.stop()

    def send_and_recv(self, msg, timeout=REQ_TIMEOUT):

        with self._lock:
            retries_left = self.request_retries
            request = str(msg)
            self._socket.send(request)
            rep = None
            while retries_left:
                socks = dict(self._poller.poll(timeout))
                if socks.get(self._socket) == POLLIN:
                    reply = self._socket.recv()
                    if not reply:
                        LOGGER.error("Empty reply!")
                        break
                    rep = Message(rawstr=reply)
                    self.failures = 0
                    self.jammed = False
                    break
                else:
                    LOGGER.warning("Timeout from " + str(self._reqaddress)
                                   + ", retrying...")
                    # Socket is confused. Close and remove it.
                    self.stop()
                    retries_left -= 1
                    if retries_left <= 0:
                        LOGGER.error("Server doesn't answer, abandoning... " +
                                     str(self._reqaddress))
                        self.connect()
                        self.failures += 1
                        if self.failures == 5:
                            LOGGER.critical("Server jammed ? %s",
                                            self._reqaddress)
                            self.jammed = True
                        break
                    LOGGER.info("Reconnecting and resending " + str(msg))
                    # Create new connection
                    self.connect()
                    self._socket.send(request)

        return rep
Example #12
0
class Subscriber(object):
    """Subscriber

    Subscribes to *addresses* for *topics*, and perform address translation of
    *translate* is true. The function *message_filter* can be used to
    discriminate some messages on the subscriber side. *topics* on the other
    hand performs filtering on the publishing side (from zeromq 3).

    Example::

        from posttroll.subscriber import Subscriber, get_pub_address

        addr = get_pub_address(service, timeout=2)
        sub = Subscriber([addr], 'my_topic')
        try:
            for msg in sub(timeout=2):
                print("Consumer got", msg)

        except KeyboardInterrupt:
            print("terminating consumer...")
            sub.close()

    """
    def __init__(self,
                 addresses,
                 topics='',
                 message_filter=None,
                 translate=False):
        self._topics = self._magickfy_topics(topics)
        self._filter = message_filter
        self._translate = translate

        self.sub_addr = {}
        self.addr_sub = {}
        self.poller = None

        self._hooks = []
        self._hooks_cb = {}

        self.poller = Poller()
        self._lock = Lock()

        self.update(addresses)

        self._loop = True

    def add(self, address, topics=None):
        """Add *address* to the subscribing list for *topics*.

        It topics is None we will subscibe to already specified topics.
        """
        with self._lock:
            if address in self.addresses:
                return False

            topics = self._magickfy_topics(topics) or self._topics
            LOGGER.info("Subscriber adding address %s with topics %s",
                        str(address), str(topics))
            subscriber = get_context().socket(SUB)
            for t__ in topics:
                subscriber.setsockopt_string(SUBSCRIBE, six.text_type(t__))
            subscriber.connect(address)
            self.sub_addr[subscriber] = address
            self.addr_sub[address] = subscriber
            if self.poller:
                self.poller.register(subscriber, POLLIN)
            return True

    def remove(self, address):
        """Remove *address* from the subscribing list for *topics*.
        """
        with self._lock:
            try:
                subscriber = self.addr_sub[address]
            except KeyError:
                return False
            LOGGER.info("Subscriber removing address %s", str(address))
            if self.poller:
                self.poller.unregister(subscriber)
            del self.addr_sub[address]
            del self.sub_addr[subscriber]
            subscriber.close()
            return True

    def update(self, addresses):
        """Updating with a set of addresses.
        """
        if isinstance(addresses, six.string_types):
            addresses = [
                addresses,
            ]
        s0_, s1_ = set(self.addresses), set(addresses)
        sr_, sa_ = s0_.difference(s1_), s1_.difference(s0_)
        for a__ in sr_:
            self.remove(a__)
        for a__ in sa_:
            self.add(a__)
        return bool(sr_ or sa_)

    def add_hook_sub(self, address, topics, callback):
        """Specify a *callback* in the same stream (thread) as the main receive
        loop. The callback will be called with the received messages from the
        specified subscription.

        Good for operations, which is required to be done in the same thread as
        the main recieve loop (e.q operations on the underlying sockets).
        """
        LOGGER.info("Subscriber adding SUB hook %s for topics %s",
                    str(address), str(topics))
        socket = get_context().socket(SUB)
        for t__ in self._magickfy_topics(topics):
            socket.setsockopt_string(SUBSCRIBE, six.text_type(t__))
        socket.connect(address)
        self._add_hook(socket, callback)

    def add_hook_pull(self, address, callback):
        """Same as above, but with a PULL socket.
        (e.g good for pushed 'inproc' messages from another thread).
        """
        LOGGER.info("Subscriber adding PULL hook %s", str(address))
        socket = get_context().socket(PULL)
        socket.connect(address)
        self._add_hook(socket, callback)

    def _add_hook(self, socket, callback):
        """Generic hook. The passed socket has to be "receive only".
        """
        self._hooks.append(socket)
        self._hooks_cb[socket] = callback
        if self.poller:
            self.poller.register(socket, POLLIN)

    @property
    def addresses(self):
        """Get the addresses
        """
        return self.sub_addr.values()

    @property
    def subscribers(self):
        """Get the subscribers
        """
        return self.sub_addr.keys()

    def recv(self, timeout=None):
        """Receive, optionally with *timeout* in seconds.
        """
        if timeout:
            timeout *= 1000.

        for sub in list(self.subscribers) + self._hooks:
            self.poller.register(sub, POLLIN)
        self._loop = True
        try:
            while self._loop:
                sleep(0)
                try:
                    socks = dict(self.poller.poll(timeout=timeout))
                    if socks:
                        for sub in self.subscribers:
                            if sub in socks and socks[sub] == POLLIN:
                                m__ = Message.decode(sub.recv_string(NOBLOCK))
                                if not self._filter or self._filter(m__):
                                    if self._translate:
                                        url = urlsplit(self.sub_addr[sub])
                                        host = url[1].split(":")[0]
                                        m__.sender = (
                                            m__.sender.split("@")[0] + "@" +
                                            host)
                                    yield m__

                        for sub in self._hooks:
                            if sub in socks and socks[sub] == POLLIN:
                                m__ = Message.decode(sub.recv_string(NOBLOCK))
                                self._hooks_cb[sub](m__)
                    else:
                        # timeout
                        yield None
                except ZMQError as err:
                    LOGGER.exception("Receive failed: %s", str(err))
        finally:
            for sub in list(self.subscribers) + self._hooks:
                self.poller.unregister(sub)

    def __call__(self, **kwargs):
        return self.recv(**kwargs)

    def stop(self):
        """Stop the subscriber.
        """
        self._loop = False

    def close(self):
        """Close the subscriber: stop it and close the local subscribers.
        """
        self.stop()
        for sub in list(self.subscribers) + self._hooks:
            try:
                sub.setsockopt(LINGER, 1)
                sub.close()
            except ZMQError:
                pass

    @staticmethod
    def _magickfy_topics(topics):
        """Add the magick to the topics if missing.
        """
        # If topic does not start with messages._MAGICK (pytroll:/), it will be
        # prepended.
        if topics is None:
            return None
        if isinstance(topics, six.string_types):
            topics = [
                topics,
            ]
        ts_ = []
        for t__ in topics:
            if not t__.startswith(_MAGICK):
                if t__ and t__[0] == '/':
                    t__ = _MAGICK + t__
                else:
                    t__ = _MAGICK + '/' + t__
            ts_.append(t__)
        return ts_

    def __del__(self):
        for sub in list(self.subscribers) + self._hooks:
            try:
                sub.close()
            except:
                pass
Example #13
0
class Distributor(Thread):
    def __init__(self, query):
        Thread.__init__(self)
        self._pool = ThreadPool(processes=4)
        self._workers = HashRing(WORKER_SERVERS)
        self._identity = bytes(uuid.uuid4())
        self._query = query
        self._init_sock()

    def _log(self, text):
        if LOG_DISTRIBUTOR:
            log_debug(self, text)

    def _init_sock(self):
        self._context = Context(1)
        self._poller = Poller()
        self._set_sock()

    def _set_sock(self):
        self._socket = self._context.socket(DEALER)
        self._socket.setsockopt(IDENTITY, self._identity)
        self._poller.register(self._socket, POLLIN)
        self._socket.connect(zmqaddr(self._get_broker(), BROKER_PORT))
        self._socket.send(PPP_READY)

    def _reset_sock(self):
        self._poller.unregister(self._socket)
        self._socket.setsockopt(LINGER, 0)
        self._socket.close()
        self._set_sock()

    def _get_broker(self):
        length = len(BROKER_SERVERS)
        return BROKER_SERVERS[randint(0, length - 1)]

    def _get_worker(self, uid):
        return self._workers.get_node(uid)

    def _get_user(self, buf):
        if len(buf) < USERNAME_SIZE:
            log_err(self, 'failed to get user, invalid length')
            raise Exception(log_get(self, 'failed to get user'))
        name = filter(lambda x:x != '*', buf[:USERNAME_SIZE])
        if not name:
            log_err(self, 'failed to get user')
            raise Exception(log_get(self, 'failed to get user'))
        return name

    def _get_token(self, buf):
        if len(buf) < UID_SIZE:
            log_err(self, 'failed to get token, invalid length')
            raise Exception(log_get(self, 'failed to get token'))
        uid = None
        token = None
        if buf[UID_SIZE - 1] == '*':
            user = self._get_user(buf)
            uid, token = self._query.user.get(user, 'uid', 'password')
        else:
            uid = buf[0:UID_SIZE]
            token = self._query.token.get(uid)
        if uid and token:
            return (uid, token)
        else:
            log_err(self, 'failed to get token')
            raise Exception(log_get(self, 'failed to get token'))

    def _reply(self, identity, seq, buf):
        msg = [identity, '', seq, buf]
        self._socket.send_multipart(msg)

    def _request(self, addr, **args):
        sock = io.connect(addr, WORKER_PORT)
        try:
            buf = bson.dumps(args)
            io.send_pkt(sock, buf)
            res = io.recv_pkt(sock)
            return unicode2str(bson.loads(res)['result'])
        finally:
            io.close(sock)

    def _proc(self, identity, seq, buf):
        uid, token = self._get_token(buf)
        if not uid or not token:
            log_err(self, 'failed to process, cannot get token')
            return
        addr = self._get_worker(uid)
        ret = self._request(addr, uid=uid, token=token, buf=buf)
        self._reply(identity, seq, ret)

    def _proc_safe(self, identity, seq, buf):
        try:
            self._proc(identity, seq, buf)
        except:
            log_err(self, 'failed to process')

    def _handler(self, identity, seq, buf):
        if DEBUG and not SAFE:
            self._proc(identity, seq, buf)
        else:
            self._proc_safe(identity, seq, buf)

    def run(self):
        self._log('start ...')
        liveness = PPP_HEARTBEAT_LIVENESS
        timeout = time.time() + PPP_HEARTBEAT_INTERVAL
        while True:
            sockets = dict(self._poller.poll(PPP_HEARTBEAT_INTERVAL * 1000))
            if sockets.get(self._socket) == POLLIN:
                frames = self._socket.recv_multipart()
                if not frames:
                    log_err(self, 'invalid request')
                    break
                if len(frames) == PPP_NR_FRAMES:
                    self._pool.apply_async(self._proc, args=(frames[PPP_FRAME_ID], frames[PPP_FRAME_SEQ], frames[PPP_FRAME_BUF]))
                elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT:
                    liveness = PPP_HEARTBEAT_LIVENESS
                else:
                    log_err(self, "invalid request")
            else:
                liveness -= 1
                if liveness == 0:
                    time.sleep(random.randint(SLEEP_TIME / 2, SLEEP_TIME))
                    self._reset_sock()
                    liveness = PPP_HEARTBEAT_LIVENESS

            if time.time() > timeout:
                timeout = time.time() + PPP_HEARTBEAT_INTERVAL
                self._socket.send(PPP_HEARTBEAT)
Example #14
0
class Server():
    """A node server.

    Each node has a request URL where request are accepted and a service URL
    where the node periodically publishes its list of known nodes and the time
    of the last contact (last publication from that node on the service URL).

    The request commands are:
    -new: register a new node
    -get: get a key
    -set: set a key

    Each node owns a Nodes object which tells to which node a key belongs and a
    Cache object to store key/value pairs.

    When keys need to be moved (node list did change) then the old and new
    owners are compared for each key and the key gets sent or deleted
    accordingly.
    """

    PUB_INTERVALL = timedelta(seconds=5)

    IO_TIMEOUT = 5 * 1000  # milliseconds

    CMD_NEW = "new"

    CMD_GET = "get"

    CMD_SET = "set"

    def __init__(self, context, req_address, pub_address):
        self._context = context
        self._poller = Poller()
        self._nodes_sockets = {}  # requests and service sockets
        # FIFO request queue for nodes: [in_progress, data, callback(response)]
        self._nodes_requests = defaultdict(list)  # requests queues for nodes
        self._cache = Cache()  # the cache
        node_id = uuid1().hex  # unique node id, based on current time
        _LOG.info("node id: %s", node_id)
        self._nodes = Nodes(node_id, req_address, pub_address)  # other nodes

    def _open_req_socket(self, addr):
        req_socket = self._context.socket(REQ)
        req_socket.setsockopt(RCVTIMEO, self.IO_TIMEOUT)
        req_socket.connect(addr)
        self._poller.register(req_socket, POLLIN)
        return req_socket

    def _open_sub_socket(self, addr):
        sub_socket = self._context.socket(SUB)
        sub_socket.setsockopt_string(SUBSCRIBE, "")
        sub_socket.connect(addr)
        self._poller.register(sub_socket, POLLIN)
        return sub_socket

    def _add_request(self, node_id, data, callback):
        """Adds a new request for a particular node.

        Each request in the queue consists of:
        - in progress flag
        - data to be send
        - function (with one argument, the response) to be called when the
            response is ready
        """
        self._nodes_requests[node_id].append([False, data, callback])

    def _hanlde_subscriptions(self, poll_sockets):
        """Read the nodes list from each other node and merge the information
        into the local node list.

        :return: true if nodes where added
        """
        added = []
        for node_id, (_, pub_socket) in self._nodes_sockets.items():
            if pub_socket in poll_sockets:
                node_id, nodes = pub_socket.recv_json()
                # Convert string to datetime.
                nodes = {
                    i: (req_addr, pub_addr, _str_to_dt(last))
                    for i, (req_addr, pub_addr, last) in nodes.items()}
                # Merge nodes list.
                new_nodes = self._nodes.update_nodes(nodes)
                # Create sockets for new nodes.
                for node_id, req_addr, pub_addr in new_nodes:
                    _LOG.debug("adding node: %s", node_id)
                    req_socket = self._open_req_socket(req_addr)
                    sub_socket = self._open_sub_socket(pub_addr)
                    added.append((node_id, (req_socket, sub_socket)))
        self._nodes_sockets.update(dict(added))  # save sockets
        return len(added) > 0  # nodes list got changed?

    def _add_node(self, node_id, req_addr, pub_addr, req_socket=None):
        """Add one node to the list and create the sockets for the request
        and service URLs (subscribe to the node).
        """
        _LOG.debug("adding node: %s", node_id)
        # Add node to the nodes list.
        self._nodes.add_node(node_id, req_addr, pub_addr)
        if req_socket is None:  # does that not yet exist?
            req_socket = self._open_req_socket(req_addr)
        sub_socket = self._open_sub_socket(pub_addr)
        # Remember sockets.
        self._nodes_sockets[node_id] = (req_socket, sub_socket)

    def _remove_nodes(self, removed_nodes):
        """Unregister and remove a node, remove all pending request, too.
        """
        for node_id in removed_nodes:
            _LOG.debug("removing node: %s", node_id)
            # Unregister sockets from poller.
            req_socket, pub_socket = self._nodes_sockets.get(
                node_id, (None, None))
            if req_socket is not None:
                self._poller.unregister(req_socket)
            if pub_socket is not None:
                self._poller.unregister(pub_socket)
            # Forget the sockets and erase request queue.
            self._nodes_sockets.pop(node_id, None)
            self._nodes_requests.pop(node_id, None)
        return len(removed_nodes) > 0  # nodes list got changed?

    def _handle_responses(self, poll_sockets):
        """When a request from the queue gets an answer call the function for
        that request, if possible.
        """
        for node_id, (req_socket, _) in self._nodes_sockets.items():
            # Some node did replay?
            if req_socket in poll_sockets:
                # Get the answer and call the callback function with it, if
                # there was one.
                response = req_socket.recv_json()
                requests = self._nodes_requests[node_id]
                if requests:
                    in_progress, _, callback = requests[0]
                    if in_progress:
                        callback(response)
                        # Request done, remove it from the queue.
                        self._nodes_requests[node_id] = requests[1:]

    def _handle_request(self, node_changes, action, data):
        # Handle the requested action.
        _LOG.debug("Request %s: %s", action, str(data))
        if action == self.CMD_NEW:  # new node, register it
            node_id, req_addr, pub_addr = data
            self._add_node(node_id, req_addr, pub_addr)
            node_changes[0] = True  # a node was added!
            return (self._nodes.id, self._nodes.pub_address)
        if action == self.CMD_SET:  # set a key and value
            key, timestamp, value = data
            timestamp = _str_to_dt(timestamp)
            return self._cache.set(
                key, value, timestamp,
                self._nodes.get_nodes_for_index(key_index(key)))
        if action == self.CMD_GET:  # return value for a key
            in_cache, timestamp, value = self._cache.get(data)
            return (in_cache, _dt_to_str(timestamp), value)

    def _rebalance(self):
        """ Check if key should be on some other nodes now, move/delete as
        necessary.
        """
        send_entries = defaultdict(list)
        remove_entries = []
        # Loop through all keys and compare on which nodes they should be.
        for key, ts, nodes, value, index in self._cache.key_indices:
            new_nodes = self._nodes.get_nodes_for_index(index)
            if new_nodes != nodes:  # node list for that key changed?
                if self._nodes.id not in new_nodes:
                    remove_entries.append(key)  # no longer local
                for new_node in new_nodes - nodes:  # where should they be?
                    send_entries[new_node].append((key, ts, value))
                self._cache.set(key, value, ts, new_nodes)  # save new nodelist
        # Queue requests to set the keys on the new nodes.
        moves = 0
        for new_node, entries in send_entries.items():
            if new_node != self._nodes.id:
                for key, ts, value in entries:
                    self._add_request(
                        new_node, (self.CMD_SET, (key, _dt_to_str(ts), value)),
                        lambda response: None)
                    moves += 1
        # Remove dead nodes from cache.
        for key in remove_entries:
            self._cache.delete_key(key)
        _LOG.debug(
            "adjusted distribution, %d moves, %d deletes", moves,
            len(remove_entries))

    def _handle_api_get(self, api_pub_socket, req_id, key):
        """Handle a API get request from the local API.

        If the key is not local, send a request to one of the responsible
        nodes. Upon arrival of the response publish the response for the API,
        using the request id, which only the right listens for.
        """
        req_id = req_id.encode("utf-8")

        # Publish the result.
        def send_response(resp, req_id=req_id, api_pub_socket=api_pub_socket):
            found, _, value = resp
            found = b"1" if found else b"0"
            value = value.encode("utf-8") if value else b""
            api_pub_socket.send_multipart([req_id, found, value])

        # Where is the key stored?
        nodes = self._nodes.get_nodes_for_index(key_index(key))
        if nodes:
            if self._nodes.id in nodes:  # local answer directly
                in_cache, _, value = self._cache.get(key)
                send_response((in_cache, None, value))
            else:  # remote, make request
                node_id = choice(list(nodes))
                self._add_request(node_id, (self.CMD_GET, key), send_response)
        else:  # can not be stored in cache, for whatever strange reason
            send_response((False, None, None))

    def _handle_api_set(self, api_pub_socket, req_id, key, value):
        """Handle a API set request from the local API.

        Check where the key should be stored and make the appropriate requests.
        The publish all results to the API, alltough the API only cares about
        the fastest response.
        """
        req_id = req_id.encode("utf-8")

        def send_response(resp, req_id=req_id, api_pub_socket=api_pub_socket):
            resp = str(resp).encode("utf-8")  # "0", "-1", ...
            api_pub_socket.send_multipart([req_id, resp])

        # Where should the key go?
        nodes = self._nodes.get_nodes_for_index(key_index(key))
        if nodes:
            timestamp = datetime.now()
            if self._nodes.id in nodes:  # save locally and publish response
                resp = self._cache.set(key, value, timestamp, nodes)
                send_response(resp)
                nodes.remove(self._nodes.id)  # done with local node!
            for node_id in nodes:  # make requests to all remote nodes
                self._add_request(
                    node_id,
                    (self.CMD_SET, (key, _dt_to_str(timestamp), value)),
                    send_response)
        else:
            send_response(-2)  # no nodes

    def loop(self, api_port, req_addr):
        """Event loop, listen to all sockets and handle all messages.

        If a req_address of an existing node is given the this node will
        register itself there, before entering the loop.

        The request on the api_port are handled by a Python WSGI instance
        running the Flask API app.
        """
        # Register to existing node?
        if req_addr is not None:
            _LOG.debug("Contacting %s", req_addr)
            req_socket = self._open_req_socket(req_addr)
            req_socket.send_json(
                (self.CMD_NEW,
                    (self._nodes.id, self._nodes.req_address,
                        self._nodes.pub_address)))
            node_id, pub_addr = req_socket.recv_json()
            _LOG.debug("Received: %s %s", str(node_id), pub_addr)
            self._add_node(node_id, req_addr, pub_addr, req_socket)
            self._rebalance()

        # Create request and service sockets.
        nodes_publisher = self._context.socket(PUB)
        nodes_publisher.setsockopt(RCVTIMEO, self.IO_TIMEOUT)
        nodes_publisher.setsockopt(SNDTIMEO, self.IO_TIMEOUT)
        _LOG.debug("Publishing on %s", self._nodes.pub_address)
        nodes_publisher.bind(self._nodes.pub_address)
        req_socket = self._context.socket(REP)
        req_socket.setsockopt(RCVTIMEO, self.IO_TIMEOUT)
        req_socket.setsockopt(SNDTIMEO, self.IO_TIMEOUT)
        _LOG.debug("waiting for requests on %s", self._nodes.req_address)
        req_socket.bind(self._nodes.req_address)
        self._poller.register(req_socket, POLLIN)

        # Create in-process sockets to the API app.
        api_pull_socket = self._context.socket(PULL)
        api_pull_socket.bind(PUSH_ENDPOINT)
        self._poller.register(api_pull_socket, POLLIN)
        api_pub_socket = self._context.socket(PUB)
        api_pub_socket.bind(SUB_ENDPOINT)

        _LOG.info("Entering server loop")
        # Start web server.
        set_config(self._context)
        httpd = simple_server.make_server(
            '0.0.0.0', int(api_port), app)
        Thread(target=httpd.serve_forever).start()
        # Enter ZMQ loop.
        stop = False
        last_published = None
        try:
            while not stop:
                try:
                    # Wait for messages, but not too long!
                    sockets = dict(
                        self._poller.poll(self.PUB_INTERVALL.seconds * 1000))
                except KeyboardInterrupt:
                    stop = True
                else:
                    changes = [False]
                    # Handle incoming node updates (subscriptions).
                    if self._hanlde_subscriptions(sockets):
                        changes[0] = True
                    # Handle incoming responses.
                    self._handle_responses(sockets)
                    # Incoming requests?
                    if req_socket in sockets:
                        req_socket.send_json(
                            self._handle_request(
                                changes, *req_socket.recv_json()))
                    # Remove dead nodes?
                    if self._remove_nodes(self._nodes.remove_dead_nodes()):
                        changes[0] = True
                    # Did nodes change?
                    if changes[0]:
                        self._rebalance()
                    # Request something?
                    for node_id, requests in self._nodes_requests.items():
                        if requests and not requests[0][0]:
                            request = requests[0]
                            request[0] = True  # in progress
                            self._nodes_sockets[node_id][0].send_json(
                                request[1])
                    # Publish something?
                    now = datetime.now()
                    if (last_published is None or
                            now - last_published > self.PUB_INTERVALL):
                        # Get nodes, convert last datetime to string
                        nodes = {
                            i: (req_addr, pub_addr, _dt_to_str(last))
                            for i, (req_addr, pub_addr, last)
                            in self._nodes.nodes.items()}
                        _LOG.debug(
                            "publishing:\n%s", "\n".join(
                                "{}: {}".format(i, str(n))
                                for i, n in nodes.items()))
                        nodes_publisher.send_json((self._nodes.id, nodes))
                        last_published = now
                    # Handle API get requests
                    if api_pull_socket in sockets:
                        req_id, action, key, value = (
                            api_pull_socket.recv_json())
                        if action == "get":
                            self._handle_api_get(api_pub_socket, req_id, key)
                        if action == "set":
                            self._handle_api_set(
                                api_pub_socket, req_id, key, value)
        finally:
            httpd.shutdown()
Example #15
0
class Cornerstone(Scaffold):
    """ Cornerstone can be used to create a 0mq poll loop.

    Upon creation of a Cornerstone instance, the initial state of the instance
    internal xmq poll loop is passive. To start the loop call Cornerstone
    run(). To stop the Cornerstone instance call Cornerstone.kill().

    Cornerstone only allows for one zmq input port and one zmq output port.
    Cornerstone support respectively; Cornerstone.register_input_sock() and
    Cornerstone.register_output_sock() methods.

    Cornerstone implements an internal signal handler for detection of
    interrupt signals to handle shutdown of connection resources.

    Example Usage:
    >>> import threading
    >>> import time
    >>> from zmq import Context, SUB, SUBSCRIBE

    >>> # create, configure, and run a Cornerstone instance
    >>> foo = Cornerstone()
    >>> property_bag = foo.__create_property_bag__()
    >>> property_bag.heartbeat = 1
    >>> foo.configure(args=property_bag)
    >>> t = threading.Thread(target=foo.run)
    >>> t.start()
    >>> time.sleep(1)
    >>> assert t.is_alive()
    >>> foo.kill()
    >>> t.join(1)
    >>> assert not t.is_alive()

    >>> # register an input socket
    >>> ctx = foo.zmq_ctx
    >>> sock = ctx.socket(SUB)
    >>> sock.connect('tcp://localhost:6670')
    >>> sock.setsockopt(SUBSCRIBE, "")
    >>> foo.register_input_sock(sock)
    >>> t = threading.Thread(target=foo.run)
    >>> t.start()
    >>> time.sleep(1)
    >>> foo.kill()
    >>> t.join(1)
    >>> assert not t.is_alive()
    """
    def __init__(self, **kwargs):
        self._input_sock = None
        self._output_sock = None
        self._control_sock = None

        # determine if outgoing messages should enable NOBLOCK on send
        # default behaviour is to block on a send call till receiver is present
        self.no_block_send = False

        # configure the interrupt handling
        self._stop = True
        signal.signal(signal.SIGINT, self._signal_interrupt_handler)

        # a regular hearbeat interval must be set to the default.
        self.heartbeat = 3  # seconds

        # create the zmq context
        self.zmq_ctx = Context()

        # set the default input receive handler, if none has been assigned
        if not hasattr(self, 'input_recv_handler'):
            self.input_recv_handler = self._default_recv_handler

        # set the default handler, if none has been assigned.
        if not hasattr(self, '_command_handler'):
            self._command_handler = self._default_command_handler

        # construct the poller
        self._poll = Poller()

        # monitoring of message stream is off by default
        self.monitor_stream = False

        Scaffold.__init__(self, **kwargs)

    def configuration_options(self, arg_parser=None):
        """
        The configuration_options method utilizes the arg_parser parameter to
        add arguments that should be handled during configuration.

        Keyword Arguments:
        arg_parser - argparse.ArgumentParser object.

        Sample invocation:
        >>> import argparse
        >>> parser = argparse.ArgumentParser(prog='app.py')
        >>> foo = Cornerstone()
        >>> foo.configuration_options(arg_parser=parser)
        >>> args = parser.print_usage() # doctest: +NORMALIZE_WHITESPACE
        usage: app.py [-h] [--heartbeat HEARTBEAT] [--monitor_stream]
                  [--no_block_send]
        """
        assert arg_parser

        arg_parser.add_argument('--heartbeat',
                                type=int,
                                default=3,
                                help="Set the heartbeat rate in seconds of "
                                "the core 0mq poller timeout.")
        arg_parser.add_argument('--monitor_stream',
                                action='store_true',
                                help='Enable the sampling of message flow.')
        arg_parser.add_argument(
            '--no_block_send',
            action='store_true',
            help='Enable NOBLOCK on the sending of messages.'
            ' This will cause an message to be dropped '
            'if no receiver is present.')

    def configure(self, args=None):
        """
        The configure method configures a Cornerstone instance by
        prior to the invocation of start.

        Keyword Arguments:
        args - an object with attributes set to the argument values.e
        """
        assert args

    def register_input_sock(self, sock):
        """
        Register a given input socket as the ingest point for a Cornerstone
        instance.

        Keyward Arguments:
        sock - the input socket that is to be registered.

        Return: None

        Cornerstone does not support multiple input sockets, so any currently
        registered input socket will be discarded. This is a per instance
        limitation, in which case the primary concern is ipaddress collision.

        Example Usage:
        >>> from zmq import SUB, SUBSCRIBE
        >>> foo = Cornerstone()
        >>> ctx = foo.zmq_ctx
        >>> sock1 = ctx.socket(SUB)
        >>> sock1.connect('tcp://localhost:2880')
        >>> sock1.setsockopt(SUBSCRIBE, "")
        >>> assert foo._poll.sockets == {}
        >>> foo.register_input_sock(sock1)
        >>> assert foo._poll.sockets.has_key(sock1)
        >>> sock2 = ctx.socket(SUB)
        >>> sock2.connect('tcp://localhost:2881')
        >>> sock2.setsockopt(SUBSCRIBE, "")
        >>> foo.register_input_sock(sock2)
        >>> assert not foo._poll.sockets.has_key(sock1)
        >>> assert foo._poll.sockets.has_key(sock2)
        """
        # if there is an existing input socket, then it will be removed.
        if self._input_sock is not None:
            self._poll.unregister(self._input_sock)
            self._input_sock.close()
            self._input_sock = None

        self._input_sock = sock
        if self._input_sock is not None:
            self._poll.register(self._input_sock, POLLIN)

    def register_output_sock(self, sock):
        """
        Register a given output socket as the egress point for a Cornerstone
        instance.

        Keyward Arguments:
        sock - the output socket that is to be registered.

        Return: none

        Cornerstone does not support multiple output sockets,
        so any currently registered output socket will be discarded. This is
        a per instance limitation. In which case the primary concern is
        ipaddress collision.

        Example Usage:
        >>> from zmq import PUB
        >>> foo = Cornerstone()
        >>> ctx = foo.zmq_ctx
        >>> sock1 = ctx.socket(PUB)
        >>> sock1.bind('tcp://*:2880')
        >>> assert foo._output_sock == None
        >>> foo.register_output_sock(sock1)
        >>> assert foo._output_sock == sock1
        >>> sock2 = ctx.socket(PUB)
        >>> sock2.bind('tcp://*:28881')
        >>> foo.register_output_sock(sock2)
        >>> assert foo._output_sock == sock2
        """
        # if there is an existing output socket, then it will be removed.
        if self._output_sock is not None:
            self._output_sock.close()
            self._output_sock = None

        self._output_sock = sock

    def send(self, msg):
        assert msg
        if self.monitor_stream:
            self.log.info('o: %s', msg)

        if not self.no_block_send:
            self._output_sock.send(msg)
        else:
            try:
                self._output_sock.send(msg, NOBLOCK)
            except:
                self.log.error("Unexpected error:", sys.exc_info()[0])

    def setRun(self):
        self._stop = False

    def isStopped(self):
        return self._stop

    def run(self):
        """
        Comment: -- AAA --
        What needs to occur here si to see if there is a 0mq connection
        configured. If so, then we will simply push to that connector. This
        will be the default behavior, at least for now. There should be a
        mechanism for transmitting the data out to a registered handler.
        """
        self._stop = False

        self.log.info('Beginning run() with configuration: %s', self._args)

        #todo: raul - move this section to command configuraiton layer
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # of course this is when a command configuration layer get's added
        controller = self.zmq_ctx.socket(SUB)
        controller.connect('tcp://localhost:7885')
        controller.setsockopt(SUBSCRIBE, "")
        self._control_sock = controller
        self._poll.register(self._control_sock)
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        loop_count = 0
        input_count = 0
        while True:
            try:
                socks = dict(self._poll.poll(timeout=self.heartbeat))
                loop_count += 1
                if self.monitor_stream and (loop_count % 1000) == 0:
                    sys.stdout.write('loop(%s)' % loop_count)
                    sys.stdout.flush()

                if self._input_sock and socks.get(self._input_sock) == POLLIN:
                    #todo: raul - this whole section needs to be redone,
                    # see additional comment AAA above.
                    msg = self.input_recv_handler(self._input_sock)
                    input_count += 1
                    if self.monitor_stream:  # and (input_count % 10) == 0:
                        self.log.info('i:%s- %s', input_count, msg)

                if (self._control_sock
                        and socks.get(self._control_sock) == POLLIN):
                    msg = self._control_sock.recv()
                    if self._command_handler is not None:
                        self._command_handler(msg)

                if self._stop:
                    self.log.info('Stop flag triggered ... shutting down.')
                    break

            except ZMQError, ze:
                if ze.errno == 4:  # Known exception due to keyboard ctrl+c
                    self.log.info('System interrupt call detected.')
                else:  # exit hard on unhandled exceptions
                    self.log.error(
                        'Unhandled exception in run execution:%d - %s' %
                        (ze.errno, ze.strerror))
                    exit(-1)

        # close the sockets held by the poller
        self._control_sock.close()
        self.register_input_sock(sock=None)
        self.register_output_sock(sock=None)

        self.log.info('Run terminated for %s', self.name)
Example #16
0
class ZmqSelector(BaseSelector):
    """A selector that can be used with asyncio's selector base event loops."""

    def __init__(self):
        # this maps file descriptors to keys
        self._fd_to_key = {}
        # read-only mapping returned by get_map()
        self._map = _SelectorMapping(self)
        self._poller = ZMQPoller()

    def _fileobj_lookup(self, fileobj):
        """Return a file descriptor from a file object.

        This wraps _fileobj_to_fd() to do an exhaustive search in case
        the object is invalid but we still have it in our map.  This
        is used by unregister() so we can unregister an object that
        was previously registered even if it is closed.  It is also
        used by _SelectorMapping.
        """
        try:
            return _fileobj_to_fd(fileobj)
        except ValueError:
            # Do an exhaustive search.
            for key in self._fd_to_key.values():
                if key.fileobj is fileobj:
                    return key.fd
            # Raise ValueError after all.
            raise

    def register(self, fileobj, events, data=None):
        if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
            raise ValueError("Invalid events: {!r}".format(events))

        key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)

        if key.fd in self._fd_to_key:
            raise KeyError("{!r} (FD {}) is already registered"
                           .format(fileobj, key.fd))

        z_events = 0
        if events & EVENT_READ:
            z_events |= POLLIN
        if events & EVENT_WRITE:
            z_events |= POLLOUT
        try:
            self._poller.register(key.fd, z_events)
        except ZMQError as exc:
            raise OSError(exc.errno, exc.strerror) from exc

        self._fd_to_key[key.fd] = key
        return key

    def unregister(self, fileobj):
        try:
            key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
        except KeyError:
            raise KeyError("{!r} is not registered".format(fileobj)) from None
        try:
            self._poller.unregister(key.fd)
        except ZMQError as exc:
            self._fd_to_key[key.fd] = key
            raise OSError(exc.errno, exc.strerror) from exc
        return key

    def modify(self, fileobj, events, data=None):
        try:
            fd = self._fileobj_lookup(fileobj)
            key = self._fd_to_key[fd]
        except KeyError:
            raise KeyError("{!r} is not registered".format(fileobj)) from None
        if data == key.data and events == key.events:
            return key
        if events != key.events:
            z_events = 0
            if events & EVENT_READ:
                z_events |= POLLIN
            if events & EVENT_WRITE:
                z_events |= POLLOUT
            try:
                self._poller.modify(fd, z_events)
            except ZMQError as exc:
                raise OSError(exc.errno, exc.strerror) from exc

        key = key._replace(data=data, events=events)
        self._fd_to_key[key.fd] = key
        return key

    def close(self):
        self._fd_to_key.clear()
        self._poller = None

    def get_map(self):
        return self._map

    def _key_from_fd(self, fd):
        """Return the key associated to a given file descriptor.

        Parameters:
        fd -- file descriptor

        Returns:
        corresponding key, or None if not found
        """
        try:
            return self._fd_to_key[fd]
        except KeyError:
            return None

    def select(self, timeout=None):
        if timeout is None:
            timeout = None
        elif timeout <= 0:
            timeout = 0
        else:
            # poll() has a resolution of 1 millisecond, round away from
            # zero to wait *at least* timeout seconds.
            timeout = math.ceil(timeout * 1e3)

        ready = []
        try:
            z_events = self._poller.poll(timeout)
        except ZMQError as exc:
            if exc.errno == EINTR:
                return ready
            else:
                raise OSError(exc.errno, exc.strerror) from exc

        for fd, evt in z_events:
            events = 0
            if evt & POLLIN:
                events |= EVENT_READ
            if evt & POLLOUT:
                events |= EVENT_WRITE
            if evt & POLLERR:
                events = EVENT_READ | EVENT_WRITE

            key = self._key_from_fd(fd)
            if key:
                ready.append((key, events & key.events))

        return ready
Example #17
0
class JobContext(object):
    def __init__(self, job_id):
        self.job_id = job_id
        self.connections_by_name = {}
        self.connections_by_socket = {}
        self.poller = Poller()
        self.num_pending_reqs = 0

    def send_req(self, worker_node, msg, callback):
        """Send a message and register callback for response. Message
        should be unserialized json
        """
        node_name = worker_node["name"]
        if self.connections_by_name.has_key(node_name):
            conn = self.connections_by_name[node_name]
            assert conn.port==worker_node["worker_port"]
            assert conn.contact_address==worker_node["contact_address"]
        else:
            conn = Connection(node_name, worker_node["contact_address"],
                              worker_node["worker_port"])
            conn.connect()
            self.connections_by_name[node_name] = conn
            self.connections_by_socket[conn.socket] = conn
            self.poller.register(conn.socket, flags=POLLIN)
        self.num_pending_reqs += 1
        conn.send(json.dumps(msg), callback)
        if msg.has_key("mtype"): mtype = msg["mtype"]
        else: mtype = "unknown"
        logger.debug("Sent message of type %s to %s" %
                     (mtype, worker_node["name"]))

    def poll(self, timeout_in_ms):
        events = self.poller.poll(timeout_in_ms)
        if len(events)==0:
            return 0
        for (socket, num) in events:
            if not self.connections_by_socket.has_key(socket):
                raise ZmqServerError("Poll: Could not find registered socket for socket %s" % socket)
            conn = self.connections_by_socket[socket]
            if not conn.callback:
                raise ZmqServerError("Received event for connection %s when no request pending" % conn.node_name)
            msg = socket.recv()
            logger.debug("message received from worker %s" % conn.node_name)
            callback = conn.callback
            # We clear the callback field of the connection just before we call
            # the callback. This lets us disconnect within the callback,
            # if we want to (e.g. after receiving a stop ack).
            conn.callback = None
            callback(msg)
            self.num_pending_reqs = self.num_pending_reqs - 1
        return len(events)

    def disconnect(self, node_name):
        if not self.connections_by_name.has_key(node_name):
            raise ZmqServerError("Attempting to disconnect from %s but no conection found" % node_name)
        conn = self.connections_by_name[node_name]
        assert conn.callback==None
        del self.connections_by_name[node_name]
        del self.connections_by_socket[conn.socket]
        self.poller.unregister(conn.socket)
        conn.socket.close()

    def stop_job(self):
        node_names = [n for n in self.connections_by_name.keys()]
        for node_name in node_names:
            self.disconnect(node_name)