Ejemplo n.º 1
0
def debug_to_file(filename, mode='wb', lvl=None, formatter=None):
    """Add debug logging to a file.

    :param filename:  name of file to log to
    :param mode:      file open mode
    :param lvl:       debug level to log
    :type  lvl:       int
    :param formatter: formatter for debug log entries
    :type  formatter: :class:`versile.common.log.VLogEntryFormatter`

    Will only add console logging if :func:`debug_to_console`\
    :func:`debug_to_file` or :func:`debug_to_watcher` has not already
    been called.

    """
    global debug_log
    global __debug_lock
    with __debug_lock:
        if not debug_log:
            from versile.common.log import VLogger, VFileLog
            from versile.common.log import VLogEntryFormatter
            debug_log = VLogger()
            if formatter is None:
                formatter = VLogEntryFormatter()
            watcher = VFileLog(open(filename, mode), formatter=formatter)
            if lvl is not None:

                class Filter(VLogEntryFilter):
                    def keep_entry(self, log_entry):
                        return (log_entry.lvl >= lvl)

                watcher.add_watch_filter(Filter())
            debug_log.add_watcher(watcher)
Ejemplo n.º 2
0
def debug_to_watcher(watcher):
    """Add debug logging to a log watcher.

    :param watcher:  log watcher to log to
    :type  watcher:  :class:`versile.common.log.VLogWatcher`

    Will only add console logging if :func:`debug_to_console`\
    :func:`debug_to_file` or :func:`debug_to_watcher` has not already
    been called.

    """
    global debug_log
    global __debug_lock
    with __debug_lock:
        if not debug_log:
            from versile.common.log import VLogger
            debug_log = VLogger()
            debug_log.add_watcher(watcher)
Ejemplo n.º 3
0
class VSocketBase(object):
    """Base class for reactor-driven sockets.

    :param reactor:     reactor handling socket events
    :param sock:        native socket
    :type  sock:        :class:`socket.socket`
    :param hc_pol:      half-close policy
    :type  hc_pol:      :class:`versile.reactor.io.VHalfClosePolicy`
    :param close_cback: callback when closed (or None)
    :type  close_cback: callable

    The socket is set to a non-blocking mode. If *sock* is None then a
    socket is created with :meth:`create_native`\ .

    *hc_pol* determines whether the socket object allows closing only
    the socket input or output. If *hc_pol* is None an
    :class:`versile.reactor.io.VHalfClose` instance is used.

    This class is abstract and not intended to be directly
    instantiated. Instead, :class:`VSocket` or derived classes should
    be used.

    """
    def __init__(self, reactor, sock=None, hc_pol=None, close_cback=None):
        self.__reactor = reactor
        if not sock:
            sock = self.create_native()
        sock.setblocking(False)
        self.__sock = sock
        if hc_pol is None:
            hc_pol = VHalfClose()
        self.__hc_pol = hc_pol
        self._sock_close_cback = close_cback
        self._sock_sent_close_cback = False

        # Set up a socket logger for convenience
        self.__logger = VLogger(prefix='Sock')
        self.__logger.add_watcher(self.reactor.log)

    def __del__(self):
        if self._sock_close_cback and not self._sock_sent_close_cback:
            try:
                self._sock_close_cback()
            except Exception as e:
                self.log.debug('Close callback failed')
                _v_silent(e)

    @abstract
    def close_io(self, reason):
        """See :meth:`versile.reactor.io.IVByteHandle.close_io`\ ."""
        self._sock_shutdown_rw()
        self.log.debug('close()')
        try:
            self.sock.close()
        except socket.error as e:
            _v_silent(e)
        return True

    def fileno(self):
        """See :meth:`versile.reactor.io.IVSelectable.fileno`\ ."""
        if not self.__sock:
            raise VIOError('No socket')
        try:
            fd = self.__sock.fileno()
        except socket.error:
            return -1
        else:
            return fd

    @classmethod
    def create_native(cls):
        """Creates a native socket.

        :returns: native socket

        The created socket is set up as non-blocking.

        """
        sock = socket.socket()
        sock.setblocking(0)
        return sock

    @classmethod
    def create_native_pair(cls):
        """Returns two native paired (connected) sockets.

        :returns: two paired native sockets
        :rtype:   :class:`socket.socket`\ , :class:`socket.socket`
        :raises:  :exc:`versile.reactor.io.VIOError`

        """
        # Use native socket.socketpair() if available
        if hasattr(socket, 'socketpair'):
            try:
                pair = socket.socketpair()
            except:
                raise VIOError('Could not create socket pair')
            else:
                return pair
        # Use local TCP socket implementation as a workaround
        from versile.reactor.io.tcp import VTCPSocket
        return VTCPSocket.create_native_pair()

    @property
    def reactor(self):
        """See :attr:`versile.reactor.IVReactorObject.reactor`\ ."""
        return self.__reactor

    @property
    def sock(self):
        """The :class:`VSocketBase`\ \'s associated native socket."""
        return self.__sock

    @property
    def log(self):
        """Logger for the socket (:class:`versile.common.log.VLogger`\ )."""
        return self.__logger

    def _set_logger(self, logger=None, safe=False):
        """Replace current logger with a different logger.

        If logger is None, a non-connected logger is put in place.

        """
        if not safe:
            self.reactor.schedule(0.0, self._set_logger, logger, True)
            return
        if logger is None:
            logger = VLogger()
        self.__logger = logger

    def _get_hc_pol(self):
        return self.__hc_pol

    def _set_hc_pol(self, policy):
        self.__hc_pol = policy

    __doc = 'See :meth:`versile.reactor.io.IVByteIO.half_close_policy`'
    half_close_policy = property(_get_hc_pol, _set_hc_pol, doc=__doc)
    del (__doc)

    def _sock_shutdown_read(self):
        """Internal call to shut down the socket for reading."""
        try:
            self.stop_reading()
            self.sock.shutdown(socket.SHUT_RD)
        except socket.error as e:
            _v_silent(e)

    def _sock_shutdown_write(self):
        """Internal call to shut down the socket for writing."""
        try:
            self.stop_writing()
            self.sock.shutdown(socket.SHUT_WR)
        except socket.error as e:
            _v_silent(e)

    def _sock_shutdown_rw(self):
        """Internal call to shut down the socket for reading and writing."""
        try:
            if not self._sock_in_closed:
                self.stop_reading()
            if not self._sock_out_closed:
                self.stop_writing()
            self.sock.shutdown(socket.SHUT_RDWR)
        except socket.error as e:
            _v_silent(e)

    def _sock_replace(self, sock):
        """Internal call to replace the native socket with *sock*\ ."""
        self.__sock = sock

    def _connect_transform(self, sock):
        """Returns a transformed native socket.

        :param sock: socket to transform
        :type  sock: :class:`socket.socket`
        :returns:    transformed socket
        :rtype:      :class:`socket.socket`

        Called internally when the socket is connected. The native
        socket is then replaced with the return value of this
        method. Default returns the same socket, derived classes can
        override.

        """
        return sock
Ejemplo n.º 4
0
class VLinkAgent(VLink, VEntityWAgent):
    """Implements a :class:`versile.orb.link.VLink` as a reactor agent.

    :param reactor: a (running) reactor for the link
    :param conf:    additional configuration (default if None)
    :type  conf:    :class:`VLinkAgentConfig`

    Other arguments are similar to :class:`versile.orb.link.VLink`\ .

    The link is derived from a
    :class:`versile.reactor.io.vec.VEntityAgent`\ . It should be
    connected to a peer entity agent in order to establish
    communication with a link peer.

    If *reactor* is None and *conf.lazy_reactor* is True then a link
    reactor is lazy-created with a call to :meth:`_create_reactor`. If
    the reactor was lazy-created, then the link takes ownership of the
    reactor and shuts down the reactor when the link is shut down.

    .. warning::

        Similar to :class:`versile.orb.link.VLink` lazy-construction
        of processors, lazy-construction of reactors is primarily a
        convenience method for programs which operate a single
        link. As each reactor consumes a thread, using
        lazy-construction for running multiple links can cause reduced
        performance or exhaust the processor's available threads. When
        running multiple links, the program should create reactor(s)
        to be shared between links.

    .. automethod:: _create_reactor

    """

    def __init__(self, gateway=None, reactor=None, processor=None,
                 init_callback=None, context=None, auth=None, conf=None):
        # This statement must go before VLink.__init__ so overloaded
        # methods can safely assume self.reactor value is set
        if conf is None:
            conf = VLinkAgentConfig()
        if reactor is None:
            if conf.lazy_reactor:
                self.__lazy_reactor = True
                reactor = self._create_reactor()
            else:
                raise VLinkError('reactor or lazy_reactor must be provided')
        else:
            self.__lazy_reactor = False
        VEntityWAgent.__init__(self, reactor)
        VLink.__init__(self, gateway=gateway, processor=processor,
                       init_callback=init_callback, context=context,
                       auth=auth, conf=conf)
        if conf.str_encoding:
            self.str_encoding = conf.str_encoding

        self.__closing_input = self.__closing_output = False
        self._link_got_connect = False

        self._msg_id_provider = VLinearIDProvider()
        self.__send_msg_lock = threading.Lock()

        # Convenience logger interface which sets a prefix
        self.__logger = VLogger(prefix='Link')
        self.__logger.add_watcher(self.reactor.log)

    @classmethod
    def create_pair(cls, gw1=None, gw2=None, init_cback1=None,
                    init_cback2=None, reactor=None, processor=None,
                    internal=False, buf_size=None):
        """Creates and returns two locally connected link objects.

        :param gw1:         local gateway object for link1
        :type  gw1:         :class:`versile.orb.entity.VEntity`
        :param gw2:         local gateway object for link1
        :type  gw2:         :class:`versile.orb.entity.VEntity`
        :param init_cback1: handshake completion callback for link1
        :type  init_cback1: callable
        :param init_cback2: handshake completion callback for link2
        :type  init_cback2: callable
        :param internal:    if True set buffersizes for internal socket
        :type  internal:    bool
        :param buf_size:    if not None, override default buffersizes
        :type  buf_size:    int
        :returns:           two connecting links (link1, link2)
        :rtype:             (:class:`VLinkAgent`\ , :class:`VLinkAgent`\ )

        Remaining arguments are similar to :class:`VLinkAgent`\
        construction.

        If *internal* is True then buffer sizes in the link's
        consumer/producer chain are set to
        :attr:`DEFAULT_INT_LINK_BUFSIZE`\ , otherwise the socket and
        entity channel defaults are used. If *buf_size* is set then it
        is used as buffer size, regardless of the value of *internal*.

        """
        if gw1 is None:
            gw1 = VObject()
        if gw2 is None:
            gw2 = VObject()

        s1, s2 = VClientSocketAgent.create_native_pair()
        links = []
        for s, gw, cback in (s1, gw1, init_cback1), (s2, gw2, init_cback2):
            _link = cls.from_socket(sock=s, gw=gw, init_cback=cback,
                                    reactor=reactor, processor=processor,
                                    internal=internal, buf_size=buf_size)
            links.append(_link)
        links = tuple(links)
        return links

    @classmethod
    def from_socket(cls, sock, gw=None, init_cback=None, reactor=None,
                    processor=None, context=None, auth=None, internal=False,
                    buf_size=None, conf=None):
        """Creates a link node which interacts via a client socket.

        :param sock:     client socket to set up on
        :type  sock:     :class:`socket.socket`
        :param internal: if True set buffersizes for internal socket
        :type  internal: bool
        :param buf_size: if not None, override default buffersizes
        :type  buf_size: int
        :returns:        link
        :rtype:          :class:`VLinkAgent`

        Other arguments are similar to :class:`VLinkAgent`\ .

        *sock* should be an already connected client socket. The link
        is set up as an unencrypted :term:`VEC` serialized :term:`VOL`
        connection.

        If *internal* is True then buffer sizes in the link's
        consumer/producer chain are set to
        :attr:`DEFAULT_INT_LINK_BUFSIZE`\ , otherwise the socket and
        entity channel defaults are used. If *buf_size* is set then it
        is used as buffer size, regardless of the value of *internal*.

        This method is primarily intended for setting up links locally
        between local threads or processes.

        """
        link = cls(gateway=gw, reactor=reactor, processor=processor,
                   init_callback=init_cback, context=context, auth=auth,
                   conf=conf)
        bsize = buf_size
        if internal and bsize is None:
            bsize = cls.DEFAULT_INT_LINK_BUFSIZE
        SCls = VClientSocketAgent
        if bsize is None:
            csock = SCls(reactor=link.reactor, sock=sock, connected=True)
        else:
            csock = SCls(reactor=link.reactor, sock=sock, connected=True,
                         max_read=bsize, max_write=bsize)
        link_io = link.create_byte_agent(internal=internal,
                                            buf_size=buf_size)
        csock.byte_io.attach(link_io)
        return link

    def create_byte_agent(self, internal=False, buf_size=None, conf=None):
        """Creates a byte agent interface to the link.

        :param internal: if True set buffersizes for internal socket
        :type  internal: bool
        :param buf_size: if not None, override default buffersizes
        :type  buf_size: int
        :param conf:    serializer configuration (default if None)
        :type  conf:    :class:`versile.reactor.io.vec.VEntitySerializerConfig`
        :returns:        link byte consumer/producer pair
        :rtype:          :class:`versile.reactor.io.VByteIOPair`
        :raises:         :exc:`versile.orb.error.VLinkError`

        This is a convenience method for creating a byte
        producer/consumer pair for serialized link communication. It
        creates a
        :class:`versile.reactor.io.vec.VEntitySerializer`, attaches
        its entity interface to the link, and returns the serializer's
        byte interfaces.

        .. warning::

            The method should only be called once, and the link cannot
            be connected to other entity producer/consumer interfaces
            when this method is called.

        If *internal* is True then buffer sizes in the link's entity
        channel is set to :attr:`DEFAULT_INT_LINK_BUFSIZE`\ ,
        otherwise the entity channel default is used. If *buf_size* is
        set then it is used as buffer size, regardless of the value of
        *internal*. When *internal* or *buf_size* are set, they
        override buffer sizes set on a *conf* object.

        If a *conf* configuration object is not provided, a default
        configuration is set up with *conf.weakctx* set to True.

        """
        if self.entity_consume.producer or self.entity_produce.consumer:
            raise VLinkError('Consumer or producer already connected')
        if internal and buf_size is None:
            buf_size = self.DEFAULT_INT_LINK_BUFSIZE
        if conf is None:
            conf = VEntitySerializerConfig(weakctx=True)
        SerCls = VEntitySerializer
        if buf_size is not None:
            conf.rbuf_len = buf_size
            conf.max_write = buf_size
        ser = SerCls(reactor=self.reactor, ctx=self, conf=conf)
        ser.entity_io.attach(self.entity_io)
        return ser.byte_io

    def create_vop_client(self, key=None, identity=None, certificates=None,
                          p_auth=None, vts=True, tls=False, insecure=False,
                          crypto=None, internal=False, buf_size=None,
                          vec_conf=None, vts_conf=None):
        """Create a client VOP I/O channel interface to the link.

        :param key:          key for secure VOP
        :type  key:          :class:`versile.crypto.VAsymmetricKey`
        :param identity:     identity (or None)
        :type  identity:     :class:`versile.crypto.x509.cert.VX509Name`
        :param certificates: chain
        :type  certificates: :class:`versile.crypto.x509.cert.VX509Certificate`
                             ,
        :param p_auth:       connection authorizer
        :type  p_auth:       :class:`versile.crypto.auth.VAuth`
        :param vts:          if True allow VTS secure connections
        :type  vts:          bool
        :param tls:          if True allow TLS secure connections
        :type  tls:          bool
        :param insecure:     if True allow insecure connections
        :type  insecure:     bool
        :param crypto:       crypto provider (default if None)
        :type  crypto:       :class:`versile.crypto.VCrypto`
        :param internal:     if True set buffersizes for internal socket
        :type  internal:     bool
        :param buf_size:     if not None, override default buffersizes
        :type  buf_size:     int
        :returns:            byte consumer/producer pair
        :rtype:              :class:`versile.reactor.io.VByteIOPair`

        Creates a byte I/O interface to Versile Object Protocol for
        the role of :term:`VOP` client.

        Creating the byte I/O interface will also connect the
        resulting producer/consumer chain to the link. The link cannot be
        connected to any other I/O chain before or after this call is made.

        """
        f = self.__create_vop_agent
        return f(True, key=key, identity=identity, certificates=certificates,
                 p_auth=p_auth, vts=vts, tls=tls, insecure=insecure,
                 crypto=crypto, internal=internal, buf_size=buf_size,
                 vec_conf=vec_conf, vts_conf=vts_conf)

    def create_vop_server(self, key=None, identity=None, certificates=None,
                          p_auth=None, vts=True, tls=False, insecure=False,
                          crypto=None, internal=False, buf_size=None,
                          vec_conf=None, vts_conf=None):
        """Create a server VOP I/O channel interface to the link.

        See :meth:`create_vop_server`\ . This method is similar,
        except it Creates a byte I/O interface to Versile Object
        Protocol for the role of :term:`VOP` server.

        """
        f = self.__create_vop_agent
        return f(False, key=key, identity=identity, certificates=certificates,
                 p_auth=p_auth, vts=vts, tls=tls, insecure=insecure,
                 crypto=crypto, internal=internal, buf_size=buf_size,
                 vec_conf=vec_conf, vts_conf=vts_conf)

    def shutdown(self, force=False, timeout=None, purge=None):
        if timeout is None:
            timeout = self._force_timeout
        if purge is None:
            purge = self._purge_calls

        with self._status_cond:
            if not self._active:
                return
            elif not self._handshake_done:
                force=True
                purge=True
            was_closing, self._closing = self._closing, True
            if not was_closing:
                self._status_cond.notify_all()
                self._status_bus.push(self.status)
            if not was_closing:
                self.__shutdown_input()
            if not was_closing or force:
                self.__shutdown_output(force, purge)
            if self._active and not force and timeout is not None:
                # Register force-shutdown after <timeout> seconds
                self.reactor.schedule(timeout, self.shutdown, force=True,
                                      purge=purge)

    def set_handshake_timeout(self, timeout):
        """Sets a timeout for completion of a link handshake.

        :param timeout: the timeout in seconds
        :type  timeout: float
        :returns:       reference to the timeout call
        :rtype:         :class:`versile.reactor.VScheduledCall`

        """
        w_link = weakref.ref(self)
        def timeout_check():
            link = w_link()
            if link:
                if link._active and not link._handshake_done:
                    link.shutdown(force=True, purge=True)
        return self.reactor.schedule(timeout, timeout_check)

    @property
    def log(self):
        return self.__logger

    def _create_reactor(self):
        """Creates a default reactor for the link type.

        :returns: reactor

        Creates and starts a reactor before returning. Derived classes
        can override to have other reactors created.

        """
        reactor = VReactor()
        reactor.start()
        return reactor

    def __create_vop_agent(self, is_client, key, identity, certificates,
                           p_auth, vts, tls, insecure, crypto, internal,
                           buf_size, vec_conf, vts_conf):
        if p_auth is None:
            p_auth = VAuth()

        if not key:
            if identity is not None or certificates is not None:
                raise VUrlException('VOP credentials requires a key')
            elif not is_client and (vts or tls):
                raise VUrlException('server mode with VTS/TLS requires key')

        # Set up default crypto
        crypto = VCrypto.lazy(crypto)
        rand = VUrandom()

        bsize = buf_size
        if internal and bsize is None:
            bsize = VLinkAgent.DEFAULT_INT_LINK_BUFSIZE

        # Get VEC byte interface to link
        vec_io = self.create_byte_agent(internal=internal, buf_size=buf_size,
                                        conf=vec_conf)

        # Set up VOP multiplexer
        vts_factory = tls_factory = None
        if (vts):
            def _factory(reactor):
                if is_client:
                    Cls = VSecureClient
                else:
                    Cls = VSecureServer
                _vts = Cls(reactor=reactor, crypto=crypto,
                          rand=rand, keypair=key, identity=identity,
                          certificates=certificates, p_auth=p_auth,
                          conf=vts_conf)
                ext_c = _vts.cipher_consume
                ext_p = _vts.cipher_produce
                int_c = _vts.plain_consume
                int_p = _vts.plain_produce
                return (ext_c, ext_p, int_c, int_p)
            vts_factory = _factory
        if (tls):
            def _factory(reactor):
                if is_client:
                    Cls = VTLSClient
                else:
                    Cls = VTLSServer
                _tls = Cls(reactor=reactor, key=key, identity=identity,
                           certificates=certificates, p_auth=p_auth)
                ext_c = _tls.cipher_consume
                ext_p = _tls.cipher_produce
                int_c = _tls.plain_consume
                int_p = _tls.plain_produce
                return (ext_c, ext_p, int_c, int_p)
            tls_factory = _factory
        if is_client:
            Cls = VOPClientBridge
        else:
            Cls = VOPServerBridge
        vop = Cls(reactor=self.reactor, vec=vec_io, vts=vts_factory,
                  tls=tls_factory, insecure=insecure)

        # Return transport end-points
        return VByteIOPair(vop.external_consume, vop.external_produce)

    def __shutdown_input(self):
        with self._status_cond:
            self.__closing_input = True

            # Abort consumer interface input
            producer = self.entity_consume.producer
            if producer:
                self.reactor.schedule(0.0, producer.abort)

            # Pass exception to all local calls waiting for a result
            self._ref_calls_lock.acquire()
            try:
                calls = self._ref_calls.values()
                self._ref_calls.clear()
            finally:
                self._ref_calls_lock.release()
            for w_call in calls:
                call = w_call()
                if call:
                    try:
                        call.push_exception(VCallError())
                    except VResultException as e:
                        _v_silent(e)

    def __shutdown_output(self, force, purge):
        with self._status_cond:
            self.__closing_output = True

            if not force:
                # If purging, clear all queued processor calls for this link
                if purge:
                    self.processor.remove_group_calls(self)

                # If there are queued or running calls, just return - shutdown
                # will continue as a callback when queue has cleared
                if self.processor.has_group_calls(self):
                    return
                self._ongoing_calls_lock.acquire()
                try:
                    if self._ongoing_calls:
                        return
                finally:
                    self._ongoing_calls_lock.release()

                # No queued or running calls - proceed with closing output
                self.__shutdown_writer(force=False)

            else:
                self.__shutdown_writer(force=True)

    def __shutdown_writer(self, force):
        with self._status_cond:
            if not self._active:
                return
        if not force:
            output_closed = self.end_write(True)
        else:
            self.abort_writer()
            self._finalize_shutdown()

    def _data_ended(self, clean):
        self.reactor.schedule(0.0, self.shutdown, force=False,
                              timeout=self._force_timeout,
                              purge=self._purge_calls)

    def _consumer_aborted(self):
        self.reactor.schedule(0.0, self.shutdown, force=False,
                              timeout=self._force_timeout,
                              purge=self._purge_calls)

    def _producer_aborted(self):
        self.reactor.schedule(0.0, self.shutdown, force=True,
                              timeout=self._force_timeout,
                              purge=self._purge_calls)

    def _consumer_control(self):
        class _Control(VIOControl):
            def __init__(self, link):
                self.__link = link
            def connected(self, peer):
                # Process 'connected' state message (process only once)
                if not self.__link._link_got_connect:
                    self.__link._link_got_connect = True
                    self.__link.log.debug('Connected to %s' % peer)
                    _recv_lim = self.__link._config.vec_recv_lim
                    self.__link._set_receive_limit(_recv_lim)
                    self.__link.context._v_set_network_peer(peer)
                    self.__link._initiate_handshake()
            def authorize(self, key, certs, identity, protocol):
                # Log peer's claimed credentials on the link's call context
                _ctx = self.__link.context
                _ctx._v_set_credentials(key, certs)
                _ctx._v_set_claimed_identity(identity)
                _ctx._v_set_sec_protocol(protocol)
                # Perform link authorization and return result
                return self.__link._authorize()
            def notify_producer_attached(self, producer):
                # Request producer chain 'state'
                def request():
                    try:
                        _cons = self.__link.entity_consume
                        if not _cons.producer:
                            _v_silent(Exception('Notif. w/o producer'))
                            return
                        _cons.producer.control.req_producer_state(_cons)
                    except VIOMissingControl:
                        pass
                self.__link.reactor.schedule(0.0, request)
        return _Control(self)

    def _producer_attached(self):
        # Overriding means recv lim not set here, instead set when 'connected'
        try:
            _cons = self.entity_consume
            _cons.producer.control.req_producer_state(_cons)
        except VIOMissingControl:
            pass

    def _send_handshake_msg(self, msg):
        self.write((msg,))

    def _send_msg(self, msg_code, payload):
        """Sends a VLink protocol-level message to peer

        :param msg_code: message code for the message type
        :type  msg_code: int, long
        :param payload:  message data for this message type
        :type  payload:  :class:`versile.orb.entity.VEntity`
        :returns:        message ID sent to peer
        :raises:         :exc:`versile.orb.error.VLinkError`

        If a message ID is not provided, an ID is generated.

        """
        self.__send_msg_lock.acquire()
        try:
            msg_id = self._msg_id_provider.get_id()
            try:
                send_data = VTuple(VInteger(msg_id), VInteger(msg_code),
                                   payload)
                self.write((send_data,))
            except Exception as e:
                raise VLinkError('Could not send message')
            if self._keep_alive_send:
                self._keep_alive_s_t = time.time()
            return msg_id
        finally:
            self.__send_msg_lock.release()

    def _send_call_msg(self, msg_code, payload, checks=None):
        """Dispatch a message to the link peer.

        :param msg_code: message code (as specified by protocol)
        :type  msg_code: int, long
        :param payload:  message payload (as specified by protocol)
        :type  payload:  :class:`versile.orb.entity.VEntity`
        :param checks:   call result validation checks (or None)
        :returns:        associated registered reference call
        :raises:         :exc:`versile.orb.error.VLinkError`

        Should hold a lock on message sending while generating a
        message ID and dispatching the associated message, in order to
        prevent protocol violations for messages being sent out of
        order due to concurrent sending.

        For internal use by link infrastructure, and should normally
        not be invoked directly by an application.

        """
        self.__send_msg_lock.acquire()
        try:
            msg_id = self._msg_id_provider.get_id()
            call = self._create_ref_call(msg_id, checks=checks)
            try:
                send_data = VTuple(VInteger(msg_id), VInteger(msg_code),
                                   payload)
                self.write((send_data,))
            except Exception as e:
                raise VLinkError('Could not send message')
            return call
        finally:
            self.__send_msg_lock.release()

    def _data_received(self, data):
        if not self._active:
            # Link no longer active - handle silently by performing
            # (another) shutdown of the input
            self.__shutdown_input()
            return
        for obj in data:
            if self._protocol_handshake:
                try:
                    self._recv_handshake(obj)
                except VLinkError as e:
                    raise VIOError('VLink handshake error', e.args)
            else:
                try:
                    self._recv_msg(obj)
                except VLinkError as e:
                    raise VIOError('VLink protocol error', e.args)

    def _finalize_shutdown(self):
        super(VLinkAgent, self)._finalize_shutdown()

        # If reactor was lazy-created, schedule reactor to stop itself
        if self.__lazy_reactor:
            self.log.debug('Lazy-stopping link reactor')
            self.reactor.schedule(0.0, self.reactor.stop)

    def _shutdown_calls_completed(self):
        """Called when pending shutdown and final call completed."""
        # ISSUE - the call to __shutdown_writer could potentially
        # cause a deadlock as it locks status_cond
        self.__shutdown_writer(force=False)

    def _schedule_keep_alive_send(self, delay):
        if self._active and not self._closing:
            self.reactor.schedule(delay/1000., self._handle_keep_alive_send)

    def _schedule_keep_alive_recv(self, delay):
        if self._active and not self._closing:
            self.reactor.schedule(delay/1000., self._handle_keep_alive_recv)
Ejemplo n.º 5
0
class VByteChannel(object):
    """Producer/consumer end-point for byte data.

    :param reactor:  reactor driving the socket's event handling
    :param buf_len:  buffer length for read operations
    :type  buf_len:  int

    This class is primarily intended for debugging byte producer/consumer
    I/O chains.

    """
    def __init__(self, reactor, buf_len=4096):
        self.__reactor = reactor
        self.__buf_len = buf_len

        self.__bc_consumed = 0
        self.__bc_consume_lim = 0
        self.__bc_producer = None
        self.__bc_eod = False
        self.__bc_eod_clean = None
        self.__bc_rbuf = VByteBuffer()
        self.__bc_rbuf_len = buf_len
        self.__bc_reader = None
        self.__bc_aborted = False
        self.__bc_cond = Condition()
        self.__bc_scheduled_lim_update = False

        self.__bp_produced = 0
        self.__bp_produce_lim = 0
        self.__bp_consumer = None
        self.__bp_eod = False
        self.__bp_eod_clean = None
        self.__bp_wbuf = VByteBuffer()
        self.__bp_wbuf_len = buf_len
        self.__bp_writer = None
        self.__bp_sent_eod = False
        self.__bp_aborted = False
        self.__bp_cond = Condition()
        self.__bp_scheduled_produce = False

        self.__bc_iface = self.__bp_iface = None

        # Set up a local logger for convenience
        self.__logger = VLogger(prefix='ByteChannel')
        self.__logger.add_watcher(self.reactor.log)

    def __del__(self):
        self.__logger.debug('Dereferenced')

    def recv(self, max_read, timeout=None):
        """Receive input data from byte channel.

        :param max_read: max bytes to read (unlimited if None)
        :type  max_read: int
        :param timeout:  timeout in seconds (blocking if None)
        :type  timeout:  float
        :returns:        data read (empty if input was closed)
        :rtype:          bytes
        :raises:         :exc:`versile.reactor.io.VIOTimeout`\ ,
                         :exc:`versile.reactor.io.VIOError`

        """
        if timeout:
            start_time = time.time()
        with self.__bc_cond:
            while True:
                if self.__bc_rbuf:
                    if max_read is None:
                        result = self.__bc_rbuf.pop()
                    elif max_read > 0:
                        result = self.__bc_rbuf.pop(max_read)
                    else:
                        result = b''

                    # Trigger updating can_produce in reactor thread
                    if not self.__bc_scheduled_lim_update:
                        self.__bc_scheduled_lim_update = True
                        self.reactor.schedule(0.0, self.__bc_lim_update)

                    return result
                elif self.__bc_aborted:
                    raise VIOError('Byte input was aborted')
                elif self.__bc_eod:
                    if self.__bc_eod_clean:
                        return b''
                    else:
                        raise VIOError('Byte input was closed but not cleanly')

                if timeout == 0.0:
                    raise VIOTimeout()
                elif timeout is not None and timeout > 0.0:
                    current_time = time.time()
                    if current_time > start_time + timeout:
                        raise VIOTimeout()
                    wait_time = start_time + timeout - current_time
                    self.__bc_cond.wait(wait_time)
                else:
                    self.__bc_cond.wait()

    def send(self, data, timeout=None):
        """Receive input data from byte channel.

        :param data:     data to write
        :type  data:     bytes
        :type  max_read: int
        :param timeout:  timeout in seconds (blocking if None)
        :type  timeout:  float
        :returns:        number bytes written
        :rtype:          int
        :raises:         :exc:`versile.reactor.io.VIOTimeout`\ ,
                         :exc:`versile.reactor.io.VIOError`

        """
        if timeout:
            start_time = time.time()
        with self.__bp_cond:
            while True:
                if self.__bp_aborted:
                    raise VIOError('Byte output was aborted')
                elif self.__bp_eod:
                    raise VIOError('Byte output was closed')
                if not data:
                    return 0
                max_write = self.__bp_wbuf_len - len(self.__bp_wbuf)
                if max_write > 0:
                    write_data = data[:max_write]
                    self.__bp_wbuf.append(write_data)

                    # Trigger reactor production
                    if not self.__bp_scheduled_produce:
                        self.__bp_scheduled_produce = True
                        self.reactor.schedule(0.0, self.__bp_do_produce)

                    return len(write_data)

                if timeout == 0.0:
                    raise VIOTimeout()
                elif timeout is not None and timeout > 0.0:
                    current_time = time.time()
                    if current_time > start_time + timeout:
                        raise VIOTimeout()
                    wait_time = start_time + timeout - current_time
                    self.__bc_cond.wait(wait_time)
                else:
                    self.__bc_cond.wait()

    def close(self):
        """Closes the connection."""
        def _close():
            if not self.__bp_aborted and not self.__bp_eod:
                self.__bp_eod = True
                self.__bp_eod_clean = True
                self.__bp_do_produce()
            if not self.__bc_aborted and not self.__bc_eod:
                self.__bc_eod = True
                self.__bc_eod_clean = True

        self.reactor.schedule(0.0, _close)

    def abort(self):
        """Aborts the connection."""
        def _abort():
            self._bc_abort()
            self._bp_abort()

        self.reactor.schedule(0.0, _abort)

    @property
    def byte_consume(self):
        """Holds the Byte Consumer interface to the serializer."""
        cons = None
        if self.__bc_iface:
            cons = self.__bc_iface()
        if not cons:
            cons = _VByteConsumer(self)
            self.__bc_iface = weakref.ref(cons)
        return cons

    @property
    def byte_produce(self):
        """Holds the Byte Producer interface to the serializer."""
        prod = None
        if self.__bp_iface:
            prod = self.__bp_iface()
        if not prod:
            prod = _VByteProducer(self)
            self.__bp_iface = weakref.ref(prod)
        return prod

    @property
    def byte_io(self):
        """Byte interface (\ :class:`versile.reactor.io.VByteIOPair`\ )."""
        return VByteIOPair(self.byte_consume, self.byte_produce)

    @property
    def reactor(self):
        """Holds the object's reactor."""
        return self.__reactor

    @peer
    def _bc_consume(self, data, clim):
        if self.__bc_eod:
            raise VIOError('Consumer already received end-of-data')
        elif not self._bc_producer:
            raise VIOError('No connected producer')
        elif not data:
            raise VIOError('No data to consume')
        max_cons = self.__lim(self.__bc_consumed, self.__bc_consume_lim)
        if max_cons == 0:
            raise VIOError('Consume limit exceeded')
        if clim is not None and clim > 0:
            max_cons = min(max_cons, clim)

        with self.__bc_cond:
            buf_len = len(self.__bc_rbuf)
            self.__bc_rbuf.append_list(data.pop_list(max_cons))
            self.__bc_consumed += len(self.__bc_rbuf) - buf_len

            # Update consume limit
            max_add = self.__lim(len(self.__bc_rbuf), self.__bc_rbuf_len)
            if max_add >= 0:
                self.__bc_consume_lim = self.__bc_consumed + max_add
            else:
                self.__bc_consume_lim = -1

            # Notify data is available
            self.__bc_cond.notify_all()

        return self.__bc_consume_lim

    @peer
    def _bc_end_consume(self, clean):
        if self.__bc_eod:
            return
        self.__bc_eod = True
        self.__bc_eod_clean = clean

        with self.__bc_cond:
            self.__bc_cond.notify_all()

    def _bc_abort(self):
        if not self.__bc_aborted:
            with self.__bc_cond:
                self.__bc_aborted = True
                self.__bc_eod = True
                if self.__bc_producer:
                    self.__bc_producer.abort()
                    self._bc_detach()
                self.__bc_cond.notify_all()

    def _bc_attach(self, producer, rthread=False):
        # Ensure 'attach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._bc_attach, producer, rthread=True)
            return

        if self.__bc_producer is producer:
            return
        if self.__bc_eod:
            raise VIOError('Consumer already received end-of-data')
        elif self.__bc_producer:
            raise VIOError('Producer already connected')
        self.__bc_producer = producer
        self.__bc_consumed = 0
        self.__bc_consume_lim = self.__lim(len(self.__bc_rbuf),
                                           self.__bc_rbuf_len)
        producer.attach(self.byte_consume)
        producer.can_produce(self.__bc_consume_lim)

        # Notify attached chain
        try:
            producer.control.notify_consumer_attached(self.byte_consume)
        except VIOMissingControl:
            pass

    def _bc_detach(self, rthread=False):
        # Ensure 'detach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._bc_detach, rthread=True)
            return

        if self.__bc_producer:
            prod, self.__bc_producer = self.__bc_producer, None
            self.__bc_consumed = self.__bc_consume_lim = 0
            prod.detach()

    @peer
    def _bp_can_produce(self, limit):
        if not self.__bp_consumer:
            raise VIOError('No attached consumer')
        if limit is None or limit < 0:
            if (not self.__bp_produce_lim is None
                    and not self.__bp_produce_lim < 0):
                self.__bp_produce_lim = limit
                if not self.__bp_scheduled_produce:
                    self.__bp_scheduled_produce = True
                    self.reactor.schedule(0.0, self.__bp_do_produce)
        else:
            if (self.__bp_produce_lim is not None
                    and 0 <= self.__bp_produce_lim < limit):
                self.__bp_produce_lim = limit
                if not self.__bp_scheduled_produce:
                    self.__bp_scheduled_produce = True
                    self.reactor.schedule(0.0, self.__bp_do_produce)

    def _bp_abort(self):
        if not self.__bp_aborted:
            with self.__bp_cond:
                self.__bp_aborted = True
                self.__bp_wbuf.remove()
                if self.__bp_consumer:
                    self.__bp_consumer.abort()
                    self._bp_detach()
                self.__bp_cond.notify_all()

    def _bp_attach(self, consumer, rthread=False):
        # Ensure 'attach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._bp_attach, consumer, rthread=True)
            return

        if self.__bp_consumer is consumer:
            return
        if self.__bp_consumer:
            raise VIOError('Consumer already attached')
        elif self.__bp_eod:
            raise VIOError('Producer already reached end-of-data')
        self.__bp_consumer = consumer
        self.__bp_produced = self.__bp_produce_lim = 0
        consumer.attach(self.byte_produce)

        # Notify attached chain
        try:
            consumer.control.notify_producer_attached(self.byte_produce)
        except VIOMissingControl:
            pass

    def _bp_detach(self, rthread=False):
        # Ensure 'detach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._bp_detach, rthread=True)
            return

        if self.__bp_consumer:
            cons, self.__bp_consumer = self.__bp_consumer, None
            cons.detach()
            self.__bp_produced = self.__bp_produce_lim = 0

    @property
    def _bc_control(self):
        return VIOControl()

    @property
    def _bc_producer(self):
        return self.__bc_producer

    @property
    def _bc_flows(self):
        return (self.entity_produce, )

    @property
    def _bc_twoway(self):
        return True

    @property
    def _bc_reverse(self):
        return self.byte_produce

    @property
    def _bp_control(self):
        return VIOControl()

    @property
    def _bp_consumer(self):
        return self.__bp_consumer

    @property
    def _bp_flows(self):
        return (self.entity_consume, )

    @property
    def _bp_twoway(self):
        return True

    @property
    def _bp_reverse(self):
        return self.byte_consume

    def __bc_lim_update(self):
        self.__bc_scheduled_lim_update = False

        if not self.__bc_producer or self.__bc_aborted or self.__bc_eod:
            return

        old_lim = self.__bc_consume_lim
        self.__bc_consume_lim = self.__lim(len(self.__bc_rbuf),
                                           self.__bc_rbuf_len)
        if old_lim != self.__bc_consume_lim:
            self.__bc_producer.can_produce(self.__bc_consume_lim)

    def __bp_do_produce(self):
        self.__bp_scheduled_produce = False

        if not self.__bp_consumer:
            return

        with self.__bp_cond:
            if self.__bp_eod:
                # If end-of-data was reached notify consumer
                if self.__bp_consumer and not self.__bp_sent_eod:
                    self.__bp_consumer.end_consume(self.__bp_eod_clean)
                    self.__bp_sent_eod = True
                return

            if (self.__bp_produce_lim is not None
                    and 0 <= self.__bp_produce_lim <= self.__bp_produced):
                return

            old_lim = self.__bp_produce_lim
            max_write = self.__lim(self.__bp_produced, self.__bp_produce_lim)
            if max_write != 0 and self.__bp_wbuf:
                old_len = len(self.__bp_wbuf)
                new_lim = self.__bp_consumer.consume(self.__bp_wbuf)
                self.__bp_produced += self.__bp_wbuf_len - len(self.__bp_wbuf)
                self.__bp_produce_lim = new_lim
                if old_len != len(self.__bp_wbuf):
                    self.__bp_cond.notify_all()

            # Schedule another if produce limit was updated and buffer has data
            if self.__bp_wbuf and self.__bp_produce_lim != old_lim:
                if not self.__bp_scheduled_produce:
                    self.__bp_scheduled_produce = True
                    self.reactor.schedule(0.0, self.__bp_do_produce)

    @classmethod
    def __lim(self, base, *lims):
        """Return smallest (lim-base) limit, or -1 if all limits are <0"""
        result = -1
        for lim in lims:
            if lim is not None and lim >= 0:
                lim = max(lim - base, 0)
                if result < 0:
                    result = lim
                result = min(result, lim)
        return result
Ejemplo n.º 6
0
    def __init__(self,
                 reactor,
                 key,
                 identity=None,
                 certificates=None,
                 p_auth=None,
                 buf_size=4096):
        if p_auth is None:
            p_auth = VAuth()

        self.__reactor = reactor

        # Set up an internal TCP socket connection for performing
        # TLS conversion via the python ssl module's TLS support
        ext_sock, int_sock = VTCPSocket.create_native_pair()

        # Set up agent for ciphertext communication
        cipher_sock = VClientSocketAgent(reactor,
                                         sock=ext_sock,
                                         connected=True,
                                         wbuf_len=buf_size)
        cipher_sock._set_logger()
        self.__c_consumer = cipher_sock.byte_consume
        self.__c_producer = cipher_sock.byte_produce

        # Set up plaintext agent which handles TLS protocol
        plain_sock = self._create_tls_socket(reactor, int_sock, key, identity,
                                             certificates, p_auth, buf_size)
        tls_logger = VLogger(prefix='TLS')
        tls_logger.add_watcher(reactor.log)
        plain_sock._set_logger(tls_logger)

        self.__p_consumer = plain_sock.byte_consume
        self.__p_producer = plain_sock.byte_produce

        # Replace ciphertext agent consumer control handling with a proxy
        # to the consumer attached to the TLS socket (if any)
        _p_prod = self.__p_producer

        def _cipher_c_control():
            if _p_prod.consumer:
                return _p_prod.consumer.control
            else:
                return VIOControl()

        cipher_sock._c_get_control = _cipher_c_control

        # Replace ciphertext agent producer control handling with a proxy
        # to the producer attached to the TLS socket (if any)
        _p_cons = self.__p_consumer

        def _cipher_p_control():
            if _p_cons.producer:
                return _p_cons.producer.control
            else:
                return VIOControl()

        cipher_sock._p_get_control = _cipher_p_control

        # Replace ciphertext agent consumer control handling with a proxy
        # to the ciphertext's connected producer
        _c_cons = self.__c_consumer

        def _plain_p_control():
            if _c_cons.producer:
                return _c_cons.producer.control
            else:
                return VIOControl()

        plain_sock._p_get_control = _plain_p_control

        # Replace plaintext agent consumer control handling with a proxy
        # to the ciphertext's connected consumer, while also
        # handling req_producer_state
        _c_prod = self.__c_producer

        def _plain_c_control():
            if _c_prod.consumer:
                proxy_control = _c_prod.consumer.control
            else:
                proxy_control = VIOControl()

            class _Control(VIOControl):
                def __getattr__(self, attr):
                    return proxy_control.attr

                def req_producer_state(self, consumer):
                    # Perform request pass-through
                    try:
                        proxy_control.req_producer_state(consumer)
                    except VIOMissingControl:
                        pass
                    # Pass TLS authorization information if available
                    if plain_sock._tls_handshake_done:

                        def notify():
                            _auth = plain_sock._tls_authorize_peer()
                            if not _auth:
                                # Not authorized, abort
                                plain_sock._c_abort()
                                plain_sock._p_abort()
                                cipher_sock._c_abort()
                                cipher_sock._p_abort()

                        reactor.schedule(0.0, notify)

            return _Control()

        plain_sock._c_get_control = _plain_c_control
Ejemplo n.º 7
0
class VFDWaitReactor(threading.Thread):
    """Base class for reactor which waits on multiple file descriptors.

    This class is abstract and should not be directly instantiated,
    instead a derived class should be used.

    The reactor provides descriptor event handling services for
    descriptors, and it provides timer services.

    :param daemon: if True run reactor thread as a daemonic thread
    :param daemon: bool

    If *daemon* is set, then the reactor thread object is set up
    as a daemon thread, by setting self.daemon=True. When the
    reactor is executed as a thread and the daemonic property is
    set, the thread will not block program exit and will be
    terminated if only daemon threads are running.

    The reactor is not started when it is created. Its event loop can
    either be run by calling :meth:`run` or by starting it as a
    thread.

    """

    # Internal message codes for message-driven actions
    (__ADD_READER, __REMOVE_READER, __ADD_WRITER,
     __REMOVE_WRITER, __STOP, __ADD_CALL, __REMOVE_CALL) = range(7)

    # Internal codes for internal I/O wait function
    _FD_READ = 1
    _FD_WRITE = 2
    _FD_ERROR = 0
    _FD_READ_ERROR = -1
    _FD_WRITE_ERROR = -2

    def __init__(self, daemon=False):
        super(VFDWaitReactor, self).__init__()
        if daemon:
            self.daemon = True
        self.name = 'VSelectReactor-' + str(self.name.split('-')[1])

        self.__readers, self.__writers = set(), set()

        self.__finished = False
        self.__thread = None

        if _vplatform == 'ironpython' or sys.platform == _b2s(b'win32'):
            self.__ctrl_is_pipe = False
            from versile.reactor.io.tcp import VTCPSocket
            self.__ctrl_r, self.__ctrl_w = VTCPSocket.create_native_pair()
        else:
            self.__ctrl_is_pipe = True
            self.__ctrl_r, self.__ctrl_w = os.pipe()
        self.__ctrl_queue = deque()
        self.__ctrl_msg_flag = False
        self.__ctrl_stop = False
        # Locks __ctrl_msg_flag, __ctrl_queue, writing to __ctrl_w
        self.__ctrl_lock = threading.Lock()

        self.__scheduled_calls = []          # heapq-ordered list
        self.__grouped_calls = {}
        self.__calls_lock = threading.Lock() # Locks scheduled/grouped calls
        self.__t_next_call = None            # Timestamp next call (or None)

        self.__core_log = VLogger()
        self.__logger = VReactorLogger(self)
        self.__logger.add_watcher(self.__core_log)
        # Reactor-only proxy logger which adds a prefix
        self.__rlog = self.__core_log.create_proxy_logger(prefix='Reactor')

    # Class default log watcher
    __default_log_watcher = None

    def run(self):
        """See :meth:`versile.reactor.IVCoreReactor.run`\ ."""
        if self.__finished or self.__thread:
            raise RuntimeError('Can only start reactor once')
        self.__thread = threading.current_thread()
        self.started()

        try:
            self._run()
        finally:
            # Clean up and free resources
            self.__finished = True
            if self.__ctrl_is_pipe:
                for pipe_fd in self.__ctrl_r, self.__ctrl_w:
                    try:
                        os.close(pipe_fd)
                    except Exception as e:
                        _v_silent(e)
            else:
                for pipe_f in self.__ctrl_r, self.__ctrl_w:
                    try:
                        pipe_f.close()
                    except Exception as e:
                        _v_silent(e)
            self.__readers.clear()
            self.__writers.clear()
            self.__ctrl_queue.clear()
            self.__scheduled_calls = []
            self.__grouped_calls.clear()
            self._fd_done()

    @final
    def _run(self):
        """Runs the reactor's event handling loop."""
        # Monitor control message pipe
        self._add_read_fd(self.__ctrl_r)

        s_calls = deque()
        while True:
            t_next_call = self.__t_next_call
            if t_next_call is not None:
                timeout = max(t_next_call - time.time(), 0)
            else:
                timeout = None

            for event, fd in self._fd_wait(timeout):
                if event == self._FD_ERROR:
                    if fd in (self.__ctrl_r, self.__ctrl_w):
                        self.__rlog.critical('Reactor message pipe failed')
                        raise VReactorException('Reactor message pipe error')
                    self.remove_reader(fd, internal=True)
                    self.remove_writer(fd, internal=True)
                    if not isinstance(fd, int):
                        fd.close_io(VFIOLost())
                    elif fd >= 0:
                        # WORKAROUND - poll.poll() on OSX some times produces
                        # descriptors that do not close cleanly; we ignore this
                        try:
                            os.close(fd)
                        except Exception as e:
                            _v_silent(e)
                elif event == self._FD_READ_ERROR:
                    if fd in (self.__ctrl_r, self.__ctrl_w):
                        self.__rlog.critical('Reactor message pipe failed')
                        raise VReactorException('Reactor message pipe error')
                    self.remove_reader(fd, internal=True)
                    if not isinstance(fd, int):
                        fd.close_input(VFIOLost())
                    elif fd >= 0:
                        # WORKAROUND - poll.poll() on OSX some times produces
                        # descriptors that do not close cleanly; we ignore this
                        try:
                            os.close(fd)
                        except Exception as e:
                            _v_silent(e)
                elif event == self._FD_WRITE_ERROR:
                    if fd in (self.__ctrl_r, self.__ctrl_w):
                        self.__rlog.critical('Reactor message pipe failed')
                        raise VReactorException('Reactor message pipe error')
                    self.remove_writer(fd, internal=True)
                    if not isinstance(fd, int):
                        fd.close_output(VFIOLost())
                    elif fd >= 0:
                        # WORKAROUND - poll.poll() on OSX some times produces
                        # descriptors that do not close cleanly; we ignore this
                        try:
                            os.close(fd)
                        except Exception as e:
                            _v_silent(e)
                elif event == self._FD_READ:
                    if ((self.__ctrl_is_pipe and fd == self.__ctrl_r)
                        or not self.__ctrl_is_pipe and fd is self.__ctrl_r):
                        messages = self.__msg_pop_all()
                        for code, data in messages:
                            self.__msg_process(code, data)
                            # Execute stop instruction immediately
                            if self.__ctrl_stop:
                                return
                        messages = code = data = None
                    elif fd in self.__readers:
                        try:
                            fd.do_read()
                        except:
                            # Should never happen if compliant do_read
                            self.__rlog.log_trace(lvl=self.log.ERROR) #DBG
                            self.__rlog.info('do_read() exception, aborting')
                            self.remove_reader(fd, internal=True)
                            fd.close_input(VFIOLost())
                elif event == self._FD_WRITE:
                    if fd in self.__writers:
                        try:
                            fd.do_write()
                        except:
                            # Should never happen if compliant do_write
                            self.__rlog.log_trace(lvl=self.log.ERROR) #DBG
                            self.__rlog.info('do_write() exception, aborting')
                            self.remove_writer(fd, internal=True)
                            fd.close_output(VFIOLost())
            if self.__ctrl_stop:
                return

            # Execute all timed-out scheduled calls
            if self.__scheduled_calls:
                self.__calls_lock.acquire()
                try:
                    loop_time = time.time()
                    _sc = self.__scheduled_calls
                    while _sc:
                        call = _sc[0]
                        if call.scheduled_time <= loop_time:
                            call = heapq.heappop(_sc)
                            s_calls.append(call)
                        else:
                            break
                    _sc = None
                    if s_calls:
                        for call in s_calls:
                            if call.callgroup:
                                callgroup = call.callgroup
                                group = self.__grouped_calls.get(callgroup,
                                                                 None)
                                if group:
                                    group.discard(call)
                                    if not group:
                                        self.__grouped_calls.pop(callgroup)
                        callgroup = None
                        if self.__scheduled_calls:
                            _t = self.__scheduled_calls[0].scheduled_time
                            self.__t_next_call = _t
                        else:
                            self.__t_next_call = None
                finally:
                    self.__calls_lock.release()
                for call in s_calls:
                    try:
                        call.execute()
                    except Exception as e:
                        self.__rlog.error('Scheduled call failed')
                        self.__rlog.log_trace(lvl=self.log.ERROR)
                s_calls.clear()

                # Lose any loop variable references
                call = fd = None

    def started(self):
        """See :meth:`versile.reactor.IVCoreReactor.started`\ .

        If a default log watcher has been set with
        :meth:`set_default_log_watcher`\ , then the default watcher is
        added to this reactor's logger.

        """
        if hasattr(self, '_default_log_watcher'):
            self.__core_log.add_watcher(self._default_log_watcher)

    @final
    def stop(self):
        """See :meth:`versile.reactor.IVCoreReactor.stop`\ ."""
        if self.__finished or self.__thread is None:
            return
        else:
            self.__msg_stop()

    @final
    def add_reader(self, reader, internal=False):
        """See :meth:`versile.reactor.IVDescriptorReactor.add_reader`\ ."""
        if self.__finished:
            raise VReactorStopped('Reactor was stopped.')
        elif internal or self.__is_reactor_thread():
            # TROUBLESHOOT - if 'internal' is set True from a
            # non-reactor thread, there can be all kinds of thread conflicts,
            # as the reactor relies on the 'internal parameter' as a promise.
            # In case of odd errors that indicate thread conflicts, can
            # perform a 'if internal and not self.__is_reactor_thread()'
            # debug check here
            try:
                self._add_read_fd(reader)
            except IOError as e:
                _v_silent(e) # Ignoring for now
            else:
                self.__readers.add(reader)
        else:
            self.__msg_add_reader(reader)

    @final
    def add_writer(self, writer, internal=False):
        """See :meth:`versile.reactor.IVDescriptorReactor.add_writer`\ ."""
        if self.__finished:
            raise VReactorStopped('Reactor was stopped.')
        elif internal or self.__is_reactor_thread():
            # TROUBLESHOOT - see add_reader comments regarding 'internal'
            try:
                self._add_write_fd(writer)
            except IOError as e:
                _v_silent(e) # Ignoring for now
            else:
                self.__writers.add(writer)
        else:
            self.__msg_add_writer(writer)

    @final
    def remove_reader(self, reader, internal=False):
        """See :meth:`versile.reactor.IVDescriptorReactor.remove_reader`\ ."""
        if internal or self.__is_reactor_thread():
            # TROUBLESHOOT - see add_reader comments regarding 'internal'
            if reader in self.__readers:
                self.__readers.discard(reader)
                self._remove_read_fd(reader)
        else:
            self.__msg_remove_reader(reader)

    @final
    def remove_writer(self, writer, internal=False):
        """See :meth:`versile.reactor.IVDescriptorReactor.remove_writer`\ ."""
        if internal or self.__is_reactor_thread():
            # TROUBLESHOOT - see add_reader comments regarding 'internal'
            if writer in self.__writers:
                self.__writers.discard(writer)
                self._remove_write_fd(writer)
        else:
            self.__msg_remove_writer(writer)

    @final
    def remove_all(self):
        """See :meth:`versile.reactor.IVDescriptorReactor.remove_all`\ .

        Should only be called by the reactor thread.

        """
        for r in self.readers():
            self.remove_reader(r)
        for w in self.writers():
            self.remove_writer(w)

    @final
    @property
    def readers(self):
        """See :attr:`versile.reactor.IVDescriptorReactor.readers`\ .

        Should only be called by the reactor thread.

        """
        return copy.copy(self.__readers)

    @final
    @property
    def writers(self):
        """See :attr:`versile.reactor.IVDescriptorReactor.writers`\ .

        Should only be called by the reactor thread.

        """
        return copy.copy(self.__writers)

    @final
    @property
    def log(self):
        """See :attr:`versile.reactor.IVCoreReactor.log`\ ."""
        return self.__logger

    def time(self):
        """See :meth:`versile.reactor.IVTimeReactor.time`"""
        return time.time()

    @final
    def execute(self, callback, *args, **kargs):
        """See :meth:`versile.reactor.IVTimeReactor.execute`\ ."""
        if self.__is_reactor_thread():
            return callback(*args, **kargs)
        else:
            return self.schedule(0.0, callback, *args, **kargs)

    @final
    def schedule(self, delay_time, callback, *args, **kargs):
        """See :meth:`versile.reactor.IVTimeReactor.schedule`"""
        return VScheduledCall(self, delay_time, None, callback,
                              True, *args, **kargs)

    @final
    def cg_schedule(self, delay_time, callgroup, callback, *args, **kargs):
        """See :meth:`versile.reactor.IVTimeReactor.cg_schedule`"""
        return VScheduledCall(self, delay_time, callgroup, callback,
                              True, *args, **kargs)

    @final
    def call_when_running(self, callback, *args, **kargs):
        """See :meth:`versile.reactor.IVCoreReactor.call_when_running`"""
        return self.schedule(0.0, callback, *args, **kargs)

    @final
    def add_call(self, call, internal=False):
        """See :meth:`versile.reactor.IVTimeReactor.add_call`\ ."""
        if not call.active:
            return
        if internal or self.__is_reactor_thread():
            # TROUBLESHOOT - see add_reader comments regarding 'internal'
            self.__calls_lock.acquire()
            try:
                heapq.heappush(self.__scheduled_calls, call)

                if call.callgroup:
                    callgroup = call.callgroup
                    group = self.__grouped_calls.get(callgroup, None)
                    if not group:
                        group = set()
                        self.__grouped_calls[callgroup] = group
                    group.add(call)

                # Update time of next call
                if self.__scheduled_calls:
                    _t = self.__scheduled_calls[0].scheduled_time
                    self.__t_next_call = _t
                else:
                    self.__t_next_call = None
            finally:
                self.__calls_lock.release()
        else:
            self.__msg_add_call(call)

    @final
    def remove_call(self, call, internal=False):
        """See :meth:`versile.reactor.IVTimeReactor.remove_call`\ ."""
        if internal or self.__is_reactor_thread():
            # TROUBLESHOOT - see add_reader comments regarding 'internal'
            self.__calls_lock.acquire()
            try:
                self.__remove_call(call)
            finally:
                self.__calls_lock.release()
        else:
            self.__msg_remove_call(call)

    def cg_remove_calls(self, callgroup):
        """See :meth:`versile.reactor.IVTimeReactor.cg_remove_calls`\ ."""
        self.__calls_lock.acquire()
        try:
            calls = copy.copy(self.__grouped_calls.get(callgroup, None))
            if calls:
                for call in calls:
                    self.__remove_call(call)
        finally:
            self.__calls_lock.release()

    @classmethod
    def set_default_log_watcher(cls, lvl=None, watcher=None):
        """Set a class default log watcher for reactor logging.

        :param lvl:     log level (or None)
        :type  lvl:     int
        :param watcher: log watcher (or None)
        :type  watcher: :class:`versile.common.log.VLogWatcher`

        Not thread-safe. Intended mainly to be called in the beginning
        of a program in order to set a watcher for general reactor log
        output. If no watcher is specified sets up default logging to
        the console. If lvl is not None then adds a filter to the
        watcher for the given debug level.

        """
        if watcher is None:
            watcher = VConsoleLog(VLogEntryFormatter())
        if lvl is not None:
            from versile.common.log import VLogEntryFilter
            class _Filter(VLogEntryFilter):
                def keep_entry(self, log_entry):
                    return log_entry.lvl >= lvl
            orig_watcher = watcher
            watcher = VLogger()
            watcher.add_watch_filter(_Filter())
            watcher.add_watcher(orig_watcher)
        VFDWaitReactor._default_log_watcher = watcher

    def _fd_wait(self, timeout):
        """Wait on I/O on registered file descriptors.

        Generator which yields tuples of (event, fd). *fd* is a file
        descriptor [either an integer or an object with fileno()]
        which was registered for reading or writing.

        *event* is one of :attr:`_FD_READ`\ , :attr:`_FD_WRITE` or
        :attr:`_FD_ERROR`\ .

        """
        raise NotImplementedError()

    def _add_read_fd(self, fd):
        """Called internally to add a reader.

        :param fd: file descriptor or object with fileno() method

        Should only be called by the reactor thread.

        """
        raise NotImplementedError()

    def _add_write_fd(self, fd):
        """Called internally to add a reader.

        :param fd: file descriptor or object with fileno() method

        Should only be called by the reactor thread.

        """
        raise NotImplementedError()

    def _remove_read_fd(self, fd):
        """Removes both internally and externally registered readers.

        :param fd: file descriptor or object with fileno() method

        Should only be called by the reactor thread.

        """
        raise NotImplementedError()

    def _remove_write_fd(self, fd):
        """Removes both internally and externally registered writers.

        :param fd: file descriptor or object with fileno() method

        Should only be called by the reactor thread.

        """
        raise NotImplementedError()

    def _fd_done(self):
        """Internal call to notify fd wait subsystem reactor loop has ended.

        Subsystem should use this to free any held resources.

        """
        raise NotImplementedError()


    def __is_reactor_thread(self):
        """Checks if running thread is the reactor thread.

        :returns: True if same, or if reactor not running as a thread

        This method is thread safe once the reactor loop has started
        as the reactor thread of a running reactor never changes.

        """
        return self.__thread in (None, threading.current_thread())

    def __remove_call(self, call):
        """Removes a call.

        The method assumes the caller holds a lock on self.__calls_lock

        """
        # This is an expensive operation as it requires full call heap
        # normalization, however __remove_call is typically used
        # infrequently, so a heap is used to optimize for adding and popping
        scheduled_time = call.scheduled_time
        _sc = self.__scheduled_calls
        for pos in xrange(len(_sc)):
            if _sc[pos] is call:
                break
        else:
            return

        _sc = _sc.pop(pos)
        heapy.heapify(_sc)
        self.__scheduled_calls, _sc = _sc, None

        callgroup = call.callgroup
        if callgroup and callgroup in self.__grouped_calls:
            group = self.__grouped_calls[callgroup]
            group.discard(call)
            if not group:
                self.__grouped_calls.pop(callgroup)
        self.__scheduled_calls.pop(pos)

        # Update time of next call
        if self.__scheduled_calls:
            self.__t_next_call = self.__scheduled_calls[0].scheduled_time
        else:
            self.__t_next_call = None

    def __msg_push(self, code, data):
        """Push internal message onto msg queue and interrupts event loop."""
        if self.__finished:
            # Not accepting messages if reactor has finished
            return

        self.__ctrl_lock.acquire()
        try:
            self.__ctrl_queue.append((code, data))
            if not self.__ctrl_msg_flag:
                if _pyver == 2:
                    if self.__ctrl_is_pipe:
                        os.write(self.__ctrl_w, _b2s(b'x'))
                    else:
                        self.__ctrl_w.send(_b2s(b'x'))
                else:
                    if self.__ctrl_is_pipe:
                        os.write(self.__ctrl_w, b'x')
                    else:
                        self.__ctrl_w.send(b'x')
                self.__ctrl_msg_flag = True
        finally:
            self.__ctrl_lock.release()

    def __msg_pop_all(self):
        """Pop all messages off the control queue.

        Can only be called when the I/O wait subsystem has confirmed
        the control pipe has data for reading, otherwise this call
        will block (possibly forever).

        """
        self.__ctrl_lock.acquire()
        try:
            # Clear internal messaging pipe (it holds max 1 byte)
            if self.__ctrl_is_pipe:
                os.read(self.__ctrl_r, 128)
            else:
                self.__ctrl_r.recv(128)
            queue, self.__ctrl_queue = self.__ctrl_queue, deque()
            self.__ctrl_msg_flag = False
            return queue
        finally:
            self.__ctrl_lock.release()

    def __msg_add_reader(self, reader):
        code, data = self.__ADD_READER, reader
        self.__msg_push(code, data)

    def __msg_remove_reader(self, reader):
        code, data = self.__REMOVE_READER, reader
        self.__msg_push(code, data)

    def __msg_add_writer(self, writer):
        code, data = self.__ADD_WRITER, writer
        self.__msg_push(code, data)

    def __msg_remove_writer(self, writer):
        code, data = self.__REMOVE_WRITER, writer
        self.__msg_push(code, data)

    def __msg_stop(self):
        code, data = self.__STOP, None
        self.__msg_push(code, data)

    def __msg_add_call(self, call):
        code, data = self.__ADD_CALL, call
        self.__msg_push(code, data)

    def __msg_remove_call(self, call):
        code, data = self.__REMOVE_CALL, call
        self.__msg_push(code, data)

    def __msg_process(self, code, data):
        if code == self.__ADD_READER:
            self.add_reader(data, True)
        elif code == self.__ADD_WRITER:
            self.add_writer(data, True)
        elif code == self.__REMOVE_READER:
            self.remove_reader(data, True)
        elif code == self.__REMOVE_WRITER:
            self.remove_writer(data, True)
        elif code == self.__STOP:
            self.__msg_process_stop()
        elif code == self.__ADD_CALL:
            self.add_call(data, True)
        elif code == self.__REMOVE_CALL:
            self.remove_call(data, True)
        else:
            raise RuntimeError('Unknown internal message code')

    def __msg_process_stop(self):
        self.__thread = None
        self.__finished = True
        self.__ctrl_stop = True
Ejemplo n.º 8
0
class VOPBridge(object):
    """A reactor interface for the :term:`VOP` protocol.

    Handles :term:`VOP` handshake and setup of a :term:`VOL` link,
    negotiating a byte transport for the connection.

    .. note::

        The :term:`VOP` specification states that each side of a
        :term:`VOP` connection takes either the role of \"client\" or
        \"server\"\ . The classes :class:`VOPClientBridge` and
        :class:`VOPServerBridge` provide interfaces for the respective
        roles. The :class:`VOPBridge` class is abstract and should not
        be directly instantiated.

    :param reactor:  channel reactor
    :param vec:      :term:`VEC` channel for link
    :type  vec:      :class:`versile.reactor.io.VByteIOPair`
    :param vts:      factory for :term:`VTS` transport (or None)
    :type  vts:      callable
    :param tls:      factory for :term:`TLS` transport (or None)
    :type  tls:      callable
    :param insecure: if True allow unencrypted connections
    :type  insecure: boolean
    :raises:         :exc:`versile.reactor.io.VIOException`

    *vec* should be a byte I/O pair which will be connected to the
    internal (plaintext) side of the protocol's negotiated byte
    transport mechanism.

    The *vts* and *tls* arguments are functions which produce byte
    transports for the corresponding protocols. If not None then that
    transport is enabled for the :term:`VOP` handshake.

    *vts* and *tls* should take a reactor as an argument and return a
    4-tuple (transport_consumer, transport_producer, vec_consumer,
    vec_producer) where each consumer is a
    :class:`versile.reactor.io.VByteConsumer` and each producer is a
    :class:`versile.reactor.io.VByteProducer`\ . The first two
    elements are the external transport connecters and the last two
    elements are the internal connecters for serialized :term:`VEC`
    data.

    """
    def __init__(self, reactor, vec, vts=None, tls=None, insecure=False):
        self.__reactor = reactor

        self._vec_consumer = vec.consumer
        self._vec_producer = vec.producer

        if not (vts or tls or insecure):
            raise VIOException('No transports enabled')

        self._vts_factory = vts
        self._tls_factory = tls
        self._allow_insecure = insecure

        self._handshaking = True
        self._handshake_error = False
        self._handshake_consumed = 0
        self._handshake_produced = 0

        self.__tc_producer = None
        self._tc_cons_lim = 0

        self.__tp_consumer = None
        self._tp_prod_lim = 0

        self.__ec_producer = None
        self._ec_cons_lim = 0

        self.__ep_consumer = None
        self._ep_prod_lim = 0

        self.__tc_iface = self.__tp_iface = None
        self.__ec_iface = self.__ep_iface = None

        # Set up a local logger for convenience
        self._logger = VLogger(prefix='VOP')
        self._logger.add_watcher(self.reactor.log)

    def __del__(self):
        #self._logger.debug('Dereferenced')
        pass

    @property
    def external_consume(self):
        """Holds an external interface to a :term:`VOP` protocol consumer."""
        cons = None
        if self.__ec_iface:
            cons = self.__ec_iface()
        if not cons:
            cons = _VExternalConsumer(self)
            self.__ec_iface = weakref.ref(cons)
        return cons

    @property
    def external_produce(self):
        """Holds an external interface to a :term:`VOP` protocol producer."""
        prod = None
        if self.__ep_iface:
            prod = self.__ep_iface()
        if not prod:
            prod = _VExternalProducer(self)
            self.__ep_iface = weakref.ref(prod)
        return prod

    @property
    def external_io(self):
        """External I/O (\ :class:`versile.reactor.io.VByteIOPair`\ )."""
        return VByteIOPair(self.external_consume, self.external_produce)

    @property
    def reactor(self):
        """The object's reactor.

        See :class:`versile.reactor.IVReactorObject`

        """
        return self.__reactor

    @property
    def _transport_consume(self):
        """Holds an internal interface to the external consumer."""
        cons = None
        if self.__tc_iface:
            cons = self.__tc_iface()
        if not cons:
            cons = _VTransportConsumer(self)
            self.__tc_iface = weakref.ref(cons)
        return cons

    @property
    def _transport_produce(self):
        """Holds an internal interface to the external producer."""
        prod = None
        if self.__tp_iface:
            prod = self.__tp_iface()
        if not prod:
            prod = _VTransportProducer(self)
            self.__tp_iface = weakref.ref(prod)
        return prod

    @property
    def _transport_io(self):
        """Transport I/O (\ :class:`versile.reactor.io.VByteIOPair`\ )."""
        return VByteIOPair(self._transport_consume, self._transport_produce)

    @peer
    def _tc_consume(self, data, clim):
        if not self._tc_producer:
            raise VIOError('No connected producer')
        elif not data:
            raise VIOError('No data to consume')
        elif self._handshake_error:
            raise VIOError('Error during handshaking')

        if self._handshaking:
            raise VIOError('Handshaking not completed')

        if self._ep_consumer:
            _lim = self._ep_consumer.consume(data, clim)
            self._ep_prod_lim = _lim
            if _lim >= 0:
                _lim = max(_lim - self._handshake_produced, 0)
            self._tc_cons_lim = _lim

        return self._tc_cons_lim

    @peer
    def _tc_end_consume(self, clean):
        if self._handshake_error:
            return

        if self._handshaking:
            self._handshake_abort()
        else:
            if self._ep_consumer:
                return self._ep_consumer.end_consume(clean)
            else:
                self._tc_abort()

    def _tc_abort(self):
        if self._handshaking and not self._handshake_error:
            self._handshake_abort()
        else:
            if self._ep_consumer:
                self._ep_consumer.abort()
                self._ep_detach()
            if self._tc_producer:
                self._tc_producer.abort()
                self._tc_detach()

    def _tc_attach(self, producer, rthread=False):
        # Ensure 'attach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._tc_attach, producer, rthread=True)
            return

        if self._handshake_error:
            raise VIOError('Earlier error during handshaking')
        elif self._tc_producer is producer:
            return
        elif self._tc_producer:
            raise VIOError('Producer already connected')

        self.__tc_producer = producer
        self._tc_cons_lim = 0
        producer.attach(self._transport_consume)

        if not self._handshaking:
            _lim = self._ec_cons_lim
            if _lim >= 0:
                _lim -= self._handshake_consumed
            producer.can_produce(_lim)

        try:
            producer.control.notify_consumer_attached(self._transport_consume)
        except VIOMissingControl:
            pass

    def _tc_detach(self, rthread=False):
        # Ensure 'detach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._tc_detach, rthread=True)
            return

        if self.__tc_producer:
            prod, self.__tc_producer = self.__tc_producer, None
            self._tc_cons_lim = 0
            prod.detach()

    @peer
    def _tp_can_produce(self, limit):
        if self._handshake_error:
            raise VIOError('Earlier error during handshaking')
        elif not self._tp_consumer:
            raise VIOError('No attached consumer')

        self._tp_prod_lim = limit

        if not self._handshaking and self._ec_producer:
            if limit >= 0:
                limit += self._handshake_consumed
            self._ec_producer.can_produce(limit)

    def _tp_abort(self):
        self._ec_abort()

    def _tp_attach(self, consumer, rthread=False):
        # Ensure 'attach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._tp_attach, consumer, rthread=True)
            return

        if self._handshake_error:
            raise VIOError('Earlier error during handshaking')
        elif self._tp_consumer is consumer:
            return
        elif self._tp_consumer:
            raise VIOError('Consumer already attached')

        self.__tp_consumer = consumer
        self._tp_prod_lim = 0
        consumer.attach(self._transport_produce)

        try:
            consumer.control.notify_producer_attached(self._transport_produce)
        except VIOMissingControl:
            pass

    def _tp_detach(self, rthread=False):
        # Ensure 'detach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._tp_detach, rthread=True)
            return

        if self.__tp_consumer:
            cons, self.__tp_consumer = self.__tp_consumer, None
            cons.detach()
            self._tp_prod_lim = 0

    @peer
    def _ec_consume(self, data, clim):
        if not self._ec_producer:
            raise VIOError('No connected external producer')
        elif not data:
            raise VIOError('No data to consume')
        elif self._handshake_error:
            raise VIOError('Earlier handshake error')

        # Handle handshake
        if self._handshaking:
            _len = len(data)
            self._handshake_consume(data, clim)
            if clim is not None:
                clim -= _len - len(data)

        # Handle post-handshake pass-through to transport
        if (not self._handshaking and self._tp_consumer and data
                and (clim is None or clim > 0)):
            _lim = self._tp_consumer.consume(data, clim)
            if _lim >= 0:
                _lim += self._handshake_consumed
            self._ec_cons_lim = _lim

        return self._ec_cons_lim

    @peer
    def _ec_end_consume(self, clean):
        if self._handshake_error:
            return

        if self._handshaking:
            self._handshake_abort()
        else:
            if self._tp_consumer:
                self._tp_consumer.end_consume(clean)

    def _ec_abort(self):
        if self._handshaking and not self._handshake_error:
            self._handshake_abort()
        else:
            if self._tp_consumer:
                self._tp_consumer.abort()
                self._tp_detach()
            if self._ec_producer:
                self._ec_producer.abort()
                self._ec_detach()

    def _ec_attach(self, producer, rthread=False):
        # Ensure 'attach' is performed from reactor thread
        if not rthread:
            self.reactor.execute(self._ec_attach, producer, rthread=True)
            return

        if self._handshake_error:
            raise VIOError('Earlier error during handshaking')
        elif self._ec_producer is producer:
            return
        elif self._ec_producer:
            raise VIOError('Producer already connected')

        self.__ec_producer = producer
        self._ec_cons_lim = 0
        producer.attach(self.external_consume)

        try:
            producer.control.notify_consumer_attached(self.external_consume)
        except VIOMissingControl:
            pass

        # Trigger any handshake actions
        self._handshake_producer_attached()

    def _ec_detach(self, rthread=False):
        # Ensure 'detach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._ec_detach, rthread=True)
            return

        if self.__ec_producer:
            prod, self.__ec_producer = self.__ec_producer, None
            self._ec_cons_lim = 0
            prod.detach()

    @peer
    def _ep_can_produce(self, limit):
        if self._handshake_error:
            raise VIOError('Earlier error during handshaking')
        elif not self._ep_consumer:
            raise VIOError('No attached consumer')

        self._ep_prod_lim = limit

        if self._handshaking:
            self._handshake_can_produce()
        else:
            if self._tc_producer:
                if limit >= 0:
                    limit = max(limit - self._handshake_produced, 0)
                self._tc_cons_lim = limit
                self._tc_producer.can_produce(limit)

    def _ep_abort(self):
        self._tc_abort()

    def _ep_attach(self, consumer, rthread=False):
        # Ensure 'attach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._ep_attach, consumer, rthread=True)
            return

        if self._handshake_error:
            raise VIOError('Earlier error during handshaking')
        elif self._ep_consumer is consumer:
            return
        elif self._ep_consumer:
            raise VIOError('Consumer already attached')

        self.__ep_consumer = consumer
        self._ep_prod_lim = 0
        consumer.attach(self.external_produce)

        try:
            consumer.control.notify_producer_attached(self.external_produce)
        except VIOMissingControl:
            pass

    def _ep_detach(self, rthread=False):
        # Ensure 'detach' is performed in reactor thread
        if not rthread:
            self.reactor.execute(self._ep_detach, rthread=True)
            return

        if self.__ep_consumer:
            cons, self.__ep_consumer = self.__ep_consumer, None
            cons.detach()
            self._ep_prod_lim = 0

    def _handshake_abort(self):
        if not self._handshake_error:
            self._logger.debug('Aborting')
            self._handshaking = False
            self._handshake_error = True
            self._ec_abort()
            self._ep_abort()

            # Abort VEC chain
            if self._vec_consumer:
                self._vec_consumer.abort()
            if self._vec_producer:
                self._vec_producer.abort()

            # Free up any held resources
            self._vec_consumer = None
            self._vec_producer = None
            self._vts_factory = None
            self._tls_factory = None

    @abstract
    def _handshake_producer_attached(self):
        """Notification handshake producer was attached."""
        raise NotImplementedError()

    @abstract
    def _handshake_consume(self, data, clim):
        """Consume handshake data."""
        raise NotImplementedError()

    @abstract
    def _handshake_can_produce(self):
        """Process can_produce during handshake."""
        raise NotImplementedError()

    def _handshake_complete(self, factory):
        """Finalizes handshake after sending/receiving hello messages."""
        self._handshaking = False

        # Initiate transport communication
        if (factory is None):
            # Plaintext transport
            self._tc_attach(self._vec_producer, True)
            self._tp_attach(self._vec_consumer, True)
        else:
            # Secure transport
            ext_cons, ext_prod, int_cons, int_prod = factory(self.reactor)
            self._tc_attach(ext_prod, True)
            self._tp_attach(ext_cons, True)
            int_cons.attach(self._vec_producer)
            int_prod.attach(self._vec_consumer)

        # Dereference any resouces held for the handshake
        self._vec_consumer = None
        self._vec_producer = None
        self._vts_factory = None
        self._tls_factory = None

        self._logger.debug('Completed handshake')

    @property
    def _tc_control(self):
        if self._ep_consumer:
            return self._ep_consumer.control
        else:
            return VIOControl()

    @property
    def _tc_producer(self):
        return self.__tc_producer

    @property
    def _tc_flows(self):
        return (self.external_produce, )

    @property
    def _tp_control(self):
        if self._ec_producer:
            return self._ec_producer.control
        else:
            return VIOControl()

    @property
    def _tc_twoway(self):
        return True

    @property
    def _tc_reverse(self):
        return self.transport_produce

    @property
    def _tp_consumer(self):
        return self.__tp_consumer

    @property
    def _tp_flows(self):
        return (self.external_consume, )

    @property
    def _tp_twoway(self):
        return True

    @property
    def _tp_reverse(self):
        return self.transport_consume

    @property
    def _ec_control(self):
        if self._tp_consumer:
            return self._tp_consumer.control
        else:
            return VIOControl()

    @property
    def _ec_producer(self):
        return self.__ec_producer

    @property
    def _ec_flows(self):
        return (self.transport_produce, )

    @property
    def _ec_twoway(self):
        return True

    @property
    def _ec_reverse(self):
        return self.external_produce

    @property
    def _ep_control(self):
        if self._tc_producer:
            return self._tc_producer.control
        else:
            return VIOControl()

    @property
    def _ep_consumer(self):
        return self.__ep_consumer

    @property
    def _ep_flows(self):
        return (self.transport_consume, )

    @property
    def _ep_twoway(self):
        return True

    @property
    def _ep_reverse(self):
        return self.external_consume
Ejemplo n.º 9
0
class VPipeBase(object):
    """Base class for reactor-driven OS pipe I/O.

    :param reactor:     reactor handling socket events
    :param fd:          pipe file descriptor
    :type  fd:          int
    :param hc_pol:      half-close policy
    :type  hc_pol:      :class:`versile.reactor.io.VHalfClosePolicy`
    :param close_cback: callback when closed (or None)
    :type  close_cback: callable

    The pipe is set to a non-blocking mode.

    *hc_pol* determines whether the pipe allows closing only one
    direction if the pipe has a peer. If *hc_pol* is None an
    :class:`versile.reactor.io.VHalfClose` instance is used.

    The file descriptor is closed when this object is deleted.

    This class is abstract and should not be directly instantiated.

    """
    def __init__(self, reactor, fd, hc_pol=None, close_cback=None):
        self.__reactor = reactor
        fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)  # Set fd nonblocking
        self._fd = fd
        self._peer = None  # weak reference
        if hc_pol is None:
            hc_pol = VHalfClose()
        self.__hc_pol = hc_pol
        self._close_cback = close_cback
        self._sent_close_cback = False

        # Set up a socket logger for convenience
        self.__logger = VLogger(prefix='Pipe')
        self.__logger.add_watcher(self.reactor.log)

    def __del__(self):
        if self._fd >= 0:
            try:
                os.close(self._fd)
            except OSError as e:
                _v_silent(e)
        if self._close_cback and not self._sent_close_cback:
            try:
                self._close_cback()
            except Exception as e:
                self.log.debug('Close callback failed')
                _v_silent(e)

    @abstract
    def set_pipe_peer(self, peer):
        """Sets a peer pipe object for a reverse pipe direction.

        :param peer: peer pipe
        :type  peer: :class:`VPipeBase`

        If registered a peer pipe is used with :meth:`close_io` and
        for resolving half-close policies.

        """
        raise NotImplementedError()

    @abstract
    def close_io(self, reason):
        """See :meth:`versile.reactor.io.IVByteHandle.close_io`\ ."""
        raise NotImplementedError()

    def fileno(self):
        """See :meth:`versile.reactor.io.IVSelectable.fileno`\ ."""
        return self._fd

    @property
    def peer(self):
        """A peer pipe registered with :meth:`set_pipe_peer`\ ."""
        if self._peer:
            return self._peer()
        else:
            return None

    @property
    def reactor(self):
        """See :attr:`versile.reactor.IVReactorObject.reactor`\ ."""
        return self.__reactor

    @property
    def log(self):
        """Logger for the socket (:class:`versile.common.log.VLogger`\ )."""
        return self.__logger

    def _get_hc_pol(self):
        return self.__hc_pol

    def _set_hc_pol(self, policy):
        self.__hc_pol = policy

    __doc = 'See :meth:`versile.reactor.io.IVByteIO.half_close_policy`'
    half_close_policy = property(_get_hc_pol, _set_hc_pol, doc=__doc)
    del (__doc)