예제 #1
0
    def __init__(self,
                 addr,
                 storages,
                 read_only=0,
                 invalidation_queue_size=100,
                 transaction_timeout=None,
                 monitor_address=None,
                 auth_protocol=None,
                 auth_database=None,
                 auth_realm=None):
        """StorageServer constructor.

        This is typically invoked from the start.py script.

        Arguments (the first two are required and positional):

        addr -- the address at which the server should listen.  This
            can be a tuple (host, port) to signify a TCP/IP connection
            or a pathname string to signify a Unix domain socket
            connection.  A hostname may be a DNS name or a dotted IP
            address.

        storages -- a dictionary giving the storage(s) to handle.  The
            keys are the storage names, the values are the storage
            instances, typically FileStorage or Berkeley storage
            instances.  By convention, storage names are typically
            strings representing small integers starting at '1'.

        read_only -- an optional flag saying whether the server should
            operate in read-only mode.  Defaults to false.  Note that
            even if the server is operating in writable mode,
            individual storages may still be read-only.  But if the
            server is in read-only mode, no write operations are
            allowed, even if the storages are writable.  Note that
            pack() is considered a read-only operation.

        invalidation_queue_size -- The storage server keeps a queue
            of the objects modified by the last N transactions, where
            N == invalidation_queue_size.  This queue is used to
            speed client cache verification when a client disconnects
            for a short period of time.

        transaction_timeout -- The maximum amount of time to wait for
            a transaction to commit after acquiring the storage lock.
            If the transaction takes too long, the client connection
            will be closed and the transaction aborted.

        monitor_address -- The address at which the monitor server
            should listen.  If specified, a monitor server is started.
            The monitor server provides server statistics in a simple
            text format.

        auth_protocol -- The name of the authentication protocol to use.
            Examples are "digest" and "srp".

        auth_database -- The name of the password database filename.
            It should be in a format compatible with the authentication
            protocol used; for instance, "sha" and "srp" require different
            formats.

            Note that to implement an authentication protocol, a server
            and client authentication mechanism must be implemented in a
            auth_* module, which should be stored inside the "auth"
            subdirectory. This module may also define a DatabaseClass
            variable that should indicate what database should be used
            by the authenticator.
        """

        self.addr = addr
        self.storages = storages
        set_label()
        msg = ", ".join([
            "%s:%s:%s" %
            (name, storage.isReadOnly() and "RO" or "RW", storage.getName())
            for name, storage in storages.items()
        ])
        log("%s created %s with storages: %s" %
            (self.__class__.__name__, read_only and "RO" or "RW", msg))
        for s in storages.values():
            s._waiting = []
        self.read_only = read_only
        self.auth_protocol = auth_protocol
        self.auth_database = auth_database
        self.auth_realm = auth_realm
        self.database = None
        if auth_protocol:
            self._setup_auth(auth_protocol)
        # A list, by server, of at most invalidation_queue_size invalidations.
        # The list is kept in sorted order with the most recent
        # invalidation at the front.  The list never has more than
        # self.invq_bound elements.
        self.invq_bound = invalidation_queue_size
        self.invq = {}
        for name, storage in storages.items():
            self._setup_invq(name, storage)
            storage.registerDB(StorageServerDB(self, name))

        self.connections = {}
        self.dispatcher = self.DispatcherClass(addr,
                                               factory=self.new_connection)
        self.stats = {}
        self.timeouts = {}
        for name in self.storages.keys():
            self.stats[name] = StorageStats()
            if transaction_timeout is None:
                # An object with no-op methods
                timeout = StubTimeoutThread()
            else:
                timeout = TimeoutThread(transaction_timeout)
                timeout.start()
            self.timeouts[name] = timeout
        if monitor_address:
            self.monitor = StatsServer(monitor_address, self.stats)
        else:
            self.monitor = None
예제 #2
0
    def __init__(self, addr, storages, read_only=0,
                 invalidation_queue_size=100,
                 transaction_timeout=None,
                 monitor_address=None,
                 auth_protocol=None,
                 auth_database=None,
                 auth_realm=None):
        """StorageServer constructor.

        This is typically invoked from the start.py script.

        Arguments (the first two are required and positional):

        addr -- the address at which the server should listen.  This
            can be a tuple (host, port) to signify a TCP/IP connection
            or a pathname string to signify a Unix domain socket
            connection.  A hostname may be a DNS name or a dotted IP
            address.

        storages -- a dictionary giving the storage(s) to handle.  The
            keys are the storage names, the values are the storage
            instances, typically FileStorage or Berkeley storage
            instances.  By convention, storage names are typically
            strings representing small integers starting at '1'.

        read_only -- an optional flag saying whether the server should
            operate in read-only mode.  Defaults to false.  Note that
            even if the server is operating in writable mode,
            individual storages may still be read-only.  But if the
            server is in read-only mode, no write operations are
            allowed, even if the storages are writable.  Note that
            pack() is considered a read-only operation.

        invalidation_queue_size -- The storage server keeps a queue
            of the objects modified by the last N transactions, where
            N == invalidation_queue_size.  This queue is used to
            speed client cache verification when a client disconnects
            for a short period of time.

        transaction_timeout -- The maximum amount of time to wait for
            a transaction to commit after acquiring the storage lock.
            If the transaction takes too long, the client connection
            will be closed and the transaction aborted.

        monitor_address -- The address at which the monitor server
            should listen.  If specified, a monitor server is started.
            The monitor server provides server statistics in a simple
            text format.

        auth_protocol -- The name of the authentication protocol to use.
            Examples are "digest" and "srp".

        auth_database -- The name of the password database filename.
            It should be in a format compatible with the authentication
            protocol used; for instance, "sha" and "srp" require different
            formats.

            Note that to implement an authentication protocol, a server
            and client authentication mechanism must be implemented in a
            auth_* module, which should be stored inside the "auth"
            subdirectory. This module may also define a DatabaseClass
            variable that should indicate what database should be used
            by the authenticator.
        """

        self.addr = addr
        self.storages = storages
        set_label()
        msg = ", ".join(
            ["%s:%s:%s" % (name, storage.isReadOnly() and "RO" or "RW",
                           storage.getName())
             for name, storage in storages.items()])
        log("%s created %s with storages: %s" %
            (self.__class__.__name__, read_only and "RO" or "RW", msg))
        for s in storages.values():
            s._waiting = []
        self.read_only = read_only
        self.auth_protocol = auth_protocol
        self.auth_database = auth_database
        self.auth_realm = auth_realm
        self.database = None
        if auth_protocol:
            self._setup_auth(auth_protocol)
        # A list of at most invalidation_queue_size invalidations.
        # The list is kept in sorted order with the most recent
        # invalidation at the front.  The list never has more than
        # self.invq_bound elements.
        self.invq = []
        self.invq_bound = invalidation_queue_size
        self.connections = {}
        self.dispatcher = self.DispatcherClass(addr,
                                               factory=self.new_connection)
        self.stats = {}
        self.timeouts = {}
        for name in self.storages.keys():
            self.stats[name] = StorageStats()
            if transaction_timeout is None:
                # An object with no-op methods
                timeout = StubTimeoutThread()
            else:
                timeout = TimeoutThread(transaction_timeout)
                timeout.start()
            self.timeouts[name] = timeout
        if monitor_address:
            self.monitor = StatsServer(monitor_address, self.stats)
        else:
            self.monitor = None
예제 #3
0
class StorageServer:
    """The server side implementation of ZEO.

    The StorageServer is the 'manager' for incoming connections.  Each
    connection is associated with its own ZEOStorage instance (defined
    below).  The StorageServer may handle multiple storages; each
    ZEOStorage instance only handles a single storage.
    """

    # Classes we instantiate.  A subclass might override.

    DispatcherClass = Dispatcher
    ZEOStorageClass = ZEOStorage
    ManagedServerConnectionClass = ManagedServerConnection

    def __init__(self,
                 addr,
                 storages,
                 read_only=0,
                 invalidation_queue_size=100,
                 transaction_timeout=None,
                 monitor_address=None,
                 auth_protocol=None,
                 auth_database=None,
                 auth_realm=None):
        """StorageServer constructor.

        This is typically invoked from the start.py script.

        Arguments (the first two are required and positional):

        addr -- the address at which the server should listen.  This
            can be a tuple (host, port) to signify a TCP/IP connection
            or a pathname string to signify a Unix domain socket
            connection.  A hostname may be a DNS name or a dotted IP
            address.

        storages -- a dictionary giving the storage(s) to handle.  The
            keys are the storage names, the values are the storage
            instances, typically FileStorage or Berkeley storage
            instances.  By convention, storage names are typically
            strings representing small integers starting at '1'.

        read_only -- an optional flag saying whether the server should
            operate in read-only mode.  Defaults to false.  Note that
            even if the server is operating in writable mode,
            individual storages may still be read-only.  But if the
            server is in read-only mode, no write operations are
            allowed, even if the storages are writable.  Note that
            pack() is considered a read-only operation.

        invalidation_queue_size -- The storage server keeps a queue
            of the objects modified by the last N transactions, where
            N == invalidation_queue_size.  This queue is used to
            speed client cache verification when a client disconnects
            for a short period of time.

        transaction_timeout -- The maximum amount of time to wait for
            a transaction to commit after acquiring the storage lock.
            If the transaction takes too long, the client connection
            will be closed and the transaction aborted.

        monitor_address -- The address at which the monitor server
            should listen.  If specified, a monitor server is started.
            The monitor server provides server statistics in a simple
            text format.

        auth_protocol -- The name of the authentication protocol to use.
            Examples are "digest" and "srp".

        auth_database -- The name of the password database filename.
            It should be in a format compatible with the authentication
            protocol used; for instance, "sha" and "srp" require different
            formats.

            Note that to implement an authentication protocol, a server
            and client authentication mechanism must be implemented in a
            auth_* module, which should be stored inside the "auth"
            subdirectory. This module may also define a DatabaseClass
            variable that should indicate what database should be used
            by the authenticator.
        """

        self.addr = addr
        self.storages = storages
        set_label()
        msg = ", ".join([
            "%s:%s:%s" %
            (name, storage.isReadOnly() and "RO" or "RW", storage.getName())
            for name, storage in storages.items()
        ])
        log("%s created %s with storages: %s" %
            (self.__class__.__name__, read_only and "RO" or "RW", msg))
        for s in storages.values():
            s._waiting = []
        self.read_only = read_only
        self.auth_protocol = auth_protocol
        self.auth_database = auth_database
        self.auth_realm = auth_realm
        self.database = None
        if auth_protocol:
            self._setup_auth(auth_protocol)
        # A list, by server, of at most invalidation_queue_size invalidations.
        # The list is kept in sorted order with the most recent
        # invalidation at the front.  The list never has more than
        # self.invq_bound elements.
        self.invq_bound = invalidation_queue_size
        self.invq = {}
        for name, storage in storages.items():
            self._setup_invq(name, storage)
            storage.registerDB(StorageServerDB(self, name))

        self.connections = {}
        self.dispatcher = self.DispatcherClass(addr,
                                               factory=self.new_connection)
        self.stats = {}
        self.timeouts = {}
        for name in self.storages.keys():
            self.stats[name] = StorageStats()
            if transaction_timeout is None:
                # An object with no-op methods
                timeout = StubTimeoutThread()
            else:
                timeout = TimeoutThread(transaction_timeout)
                timeout.start()
            self.timeouts[name] = timeout
        if monitor_address:
            self.monitor = StatsServer(monitor_address, self.stats)
        else:
            self.monitor = None

    def _setup_invq(self, name, storage):
        lastInvalidations = getattr(storage, 'lastInvalidations', None)
        if lastInvalidations is None:
            self.invq[name] = [(storage.lastTransaction(), None)]
        else:
            self.invq[name] = list(lastInvalidations(self.invq_bound))
            self.invq[name].reverse()

    def _setup_auth(self, protocol):
        # Can't be done in global scope, because of cyclic references
        from ZEO.auth import get_module

        name = self.__class__.__name__

        module = get_module(protocol)
        if not module:
            log("%s: no such an auth protocol: %s" % (name, protocol))
            return

        storage_class, client, db_class = module

        if not storage_class or not issubclass(storage_class, ZEOStorage):
            log(("%s: %s isn't a valid protocol, must have a StorageClass" %
                 (name, protocol)))
            self.auth_protocol = None
            return
        self.ZEOStorageClass = storage_class

        log("%s: using auth protocol: %s" % (name, protocol))

        # We create a Database instance here for use with the authenticator
        # modules. Having one instance allows it to be shared between multiple
        # storages, avoiding the need to bloat each with a new authenticator
        # Database that would contain the same info, and also avoiding any
        # possibly synchronization issues between them.
        self.database = db_class(self.auth_database)
        if self.database.realm != self.auth_realm:
            raise ValueError("password database realm %r "
                             "does not match storage realm %r" %
                             (self.database.realm, self.auth_realm))

    def new_connection(self, sock, addr):
        """Internal: factory to create a new connection.

        This is called by the Dispatcher class in ZEO.zrpc.server
        whenever accept() returns a socket for a new incoming
        connection.
        """
        if self.auth_protocol and self.database:
            zstorage = self.ZEOStorageClass(self,
                                            self.read_only,
                                            auth_realm=self.auth_realm)
            zstorage.set_database(self.database)
        else:
            zstorage = self.ZEOStorageClass(self, self.read_only)

        c = self.ManagedServerConnectionClass(sock, addr, zstorage, self)
        log("new connection %s: %s" % (addr, repr(c)))
        return c

    def register_connection(self, storage_id, conn):
        """Internal: register a connection with a particular storage.

        This is called by ZEOStorage.register().

        The dictionary self.connections maps each storage name to a
        list of current connections for that storage; this information
        is needed to handle invalidation.  This function updates this
        dictionary.

        Returns the timeout and stats objects for the appropriate storage.
        """
        l = self.connections.get(storage_id)
        if l is None:
            l = self.connections[storage_id] = []
        l.append(conn)
        stats = self.stats[storage_id]
        stats.clients += 1
        return self.timeouts[storage_id], stats

    def _invalidateCache(self, storage_id):
        """We need to invalidate any caches we have.

        This basically means telling our clients to
        invalidate/revalidate their caches. We do this by closing them
        and making them reconnect.
        """

        # This method can be called from foreign threads.  We have to
        # worry about interaction with the main thread.

        # 1. We modify self.invq which is read by get_invalidations
        #    below. This is why get_invalidations makes a copy of
        #    self.invq.

        # 2. We access connections.  There are two dangers:
        #
        # a. We miss a new connection.  This is not a problem because
        #    if a client connects after we get the list of connections,
        #    then it will have to read the invalidation queue, which
        #    has already been reset.
        #
        # b. A connection is closes while we are iterating.  This
        #    doesn't matter, bacause we can call should_close on a closed
        #    connection.

        # Rebuild invq
        self._setup_invq(storage_id, self.storages[storage_id])

        connections = self.connections.get(storage_id, ())

        # Make a copy since we are going to be mutating the
        # connections indirectoy by closing them.  We don't care about
        # later transactions since they will have to validate their
        # caches anyway.
        connections = connections[:]
        for p in connections:
            try:
                p.connection.should_close()
                p.connection.trigger.pull_trigger()
            except ZEO.zrpc.error.DisconnectedError:
                pass

    def invalidate(self, conn, storage_id, tid, invalidated=(), info=None):
        """Internal: broadcast info and invalidations to clients.

        This is called from several ZEOStorage methods.

        invalidated is a sequence of oid, version pairs.

        This can do three different things:

        - If the invalidated argument is non-empty, it broadcasts
          invalidateTransaction() messages to all clients of the given
          storage except the current client (the conn argument).

        - If the invalidated argument is empty and the info argument
          is a non-empty dictionary, it broadcasts info() messages to
          all clients of the given storage, including the current
          client.

        - If both the invalidated argument and the info argument are
          non-empty, it broadcasts invalidateTransaction() messages to all
          clients except the current, and sends an info() message to
          the current client.

        """

        # This method can be called from foreign threads.  We have to
        # worry about interaction with the main thread.

        # 1. We modify self.invq which is read by get_invalidations
        #    below. This is why get_invalidations makes a copy of
        #    self.invq.

        # 2. We access connections.  There are two dangers:
        #
        # a. We miss a new connection.  This is not a problem because
        #    we are called while the storage lock is held.  A new
        #    connection that tries to read data won't read committed
        #    data without first recieving an invalidation.  Also, if a
        #    client connects after getting the list of connections,
        #    then it will have to read the invalidation queue, which
        #    has been updated to reflect the invalidations.
        #
        # b. A connection is closes while we are iterating. We'll need
        #    to cactch and ignore Disconnected errors.

        if invalidated:
            invq = self.invq[storage_id]
            if len(invq) >= self.invq_bound:
                invq.pop()
            invq.insert(0, (tid, invalidated))

        for p in self.connections.get(storage_id, ()):
            if invalidated and p is not conn:
                try:
                    p.client.invalidateTransaction(tid, invalidated)
                except ZEO.zrpc.error.DisconnectedError:
                    pass

            elif info is not None:
                p.client.info(info)

    def get_invalidations(self, storage_id, tid):
        """Return a tid and list of all objects invalidation since tid.

        The tid is the most recent transaction id seen by the client.

        Returns None if it is unable to provide a complete list
        of invalidations for tid.  In this case, client should
        do full cache verification.
        """

        invq = self.invq[storage_id]

        # We make a copy of invq because it might be modified by a
        # foreign (other than main thread) calling invalidate above.
        invq = invq[:]

        if not invq:
            log("invq empty")
            return None, []

        earliest_tid = invq[-1][0]
        if earliest_tid > tid:
            log("tid to old for invq %s < %s" % (u64(tid), u64(earliest_tid)))
            return None, []

        oids = {}
        for _tid, L in invq:
            if _tid <= tid:
                break
            for key in L:
                oids[key] = 1
        latest_tid = invq[0][0]
        return latest_tid, oids.keys()

    def close_server(self):
        """Close the dispatcher so that there are no new connections.

        This is only called from the test suite, AFAICT.
        """
        self.dispatcher.close()
        if self.monitor is not None:
            self.monitor.close()
        for storage in self.storages.values():
            storage.close()
        # Force the asyncore mainloop to exit by hackery, i.e. close
        # every socket in the map.  loop() will return when the map is
        # empty.
        for s in asyncore.socket_map.values():
            try:
                s.close()
            except:
                pass

    def close_conn(self, conn):
        """Internal: remove the given connection from self.connections.

        This is the inverse of register_connection().
        """
        for cl in self.connections.values():
            if conn.obj in cl:
                cl.remove(conn.obj)
예제 #4
0
class StorageServer:

    """The server side implementation of ZEO.

    The StorageServer is the 'manager' for incoming connections.  Each
    connection is associated with its own ZEOStorage instance (defined
    below).  The StorageServer may handle multiple storages; each
    ZEOStorage instance only handles a single storage.
    """

    # Classes we instantiate.  A subclass might override.

    DispatcherClass = Dispatcher
    ZEOStorageClass = ZEOStorage
    ManagedServerConnectionClass = ManagedServerConnection

    def __init__(self, addr, storages, read_only=0,
                 invalidation_queue_size=100,
                 transaction_timeout=None,
                 monitor_address=None,
                 auth_protocol=None,
                 auth_database=None,
                 auth_realm=None):
        """StorageServer constructor.

        This is typically invoked from the start.py script.

        Arguments (the first two are required and positional):

        addr -- the address at which the server should listen.  This
            can be a tuple (host, port) to signify a TCP/IP connection
            or a pathname string to signify a Unix domain socket
            connection.  A hostname may be a DNS name or a dotted IP
            address.

        storages -- a dictionary giving the storage(s) to handle.  The
            keys are the storage names, the values are the storage
            instances, typically FileStorage or Berkeley storage
            instances.  By convention, storage names are typically
            strings representing small integers starting at '1'.

        read_only -- an optional flag saying whether the server should
            operate in read-only mode.  Defaults to false.  Note that
            even if the server is operating in writable mode,
            individual storages may still be read-only.  But if the
            server is in read-only mode, no write operations are
            allowed, even if the storages are writable.  Note that
            pack() is considered a read-only operation.

        invalidation_queue_size -- The storage server keeps a queue
            of the objects modified by the last N transactions, where
            N == invalidation_queue_size.  This queue is used to
            speed client cache verification when a client disconnects
            for a short period of time.

        transaction_timeout -- The maximum amount of time to wait for
            a transaction to commit after acquiring the storage lock.
            If the transaction takes too long, the client connection
            will be closed and the transaction aborted.

        monitor_address -- The address at which the monitor server
            should listen.  If specified, a monitor server is started.
            The monitor server provides server statistics in a simple
            text format.

        auth_protocol -- The name of the authentication protocol to use.
            Examples are "digest" and "srp".

        auth_database -- The name of the password database filename.
            It should be in a format compatible with the authentication
            protocol used; for instance, "sha" and "srp" require different
            formats.

            Note that to implement an authentication protocol, a server
            and client authentication mechanism must be implemented in a
            auth_* module, which should be stored inside the "auth"
            subdirectory. This module may also define a DatabaseClass
            variable that should indicate what database should be used
            by the authenticator.
        """

        self.addr = addr
        self.storages = storages
        set_label()
        msg = ", ".join(
            ["%s:%s:%s" % (name, storage.isReadOnly() and "RO" or "RW",
                           storage.getName())
             for name, storage in storages.items()])
        log("%s created %s with storages: %s" %
            (self.__class__.__name__, read_only and "RO" or "RW", msg))
        for s in storages.values():
            s._waiting = []
        self.read_only = read_only
        self.auth_protocol = auth_protocol
        self.auth_database = auth_database
        self.auth_realm = auth_realm
        self.database = None
        if auth_protocol:
            self._setup_auth(auth_protocol)
        # A list of at most invalidation_queue_size invalidations.
        # The list is kept in sorted order with the most recent
        # invalidation at the front.  The list never has more than
        # self.invq_bound elements.
        self.invq = []
        self.invq_bound = invalidation_queue_size
        self.connections = {}
        self.dispatcher = self.DispatcherClass(addr,
                                               factory=self.new_connection)
        self.stats = {}
        self.timeouts = {}
        for name in self.storages.keys():
            self.stats[name] = StorageStats()
            if transaction_timeout is None:
                # An object with no-op methods
                timeout = StubTimeoutThread()
            else:
                timeout = TimeoutThread(transaction_timeout)
                timeout.start()
            self.timeouts[name] = timeout
        if monitor_address:
            self.monitor = StatsServer(monitor_address, self.stats)
        else:
            self.monitor = None

    def _setup_auth(self, protocol):
        # Can't be done in global scope, because of cyclic references
        from ZEO.auth import get_module

        name = self.__class__.__name__

        module = get_module(protocol)
        if not module:
            log("%s: no such an auth protocol: %s" % (name, protocol))
            return

        storage_class, client, db_class = module

        if not storage_class or not issubclass(storage_class, ZEOStorage):
            log(("%s: %s isn't a valid protocol, must have a StorageClass" %
                 (name, protocol)))
            self.auth_protocol = None
            return
        self.ZEOStorageClass = storage_class

        log("%s: using auth protocol: %s" % (name, protocol))

        # We create a Database instance here for use with the authenticator
        # modules. Having one instance allows it to be shared between multiple
        # storages, avoiding the need to bloat each with a new authenticator
        # Database that would contain the same info, and also avoiding any
        # possibly synchronization issues between them.
        self.database = db_class(self.auth_database)
        if self.database.realm != self.auth_realm:
            raise ValueError("password database realm %r "
                             "does not match storage realm %r"
                             % (self.database.realm, self.auth_realm))


    def new_connection(self, sock, addr):
        """Internal: factory to create a new connection.

        This is called by the Dispatcher class in ZEO.zrpc.server
        whenever accept() returns a socket for a new incoming
        connection.
        """
        if self.auth_protocol and self.database:
            zstorage = self.ZEOStorageClass(self, self.read_only,
                                            auth_realm=self.auth_realm)
            zstorage.set_database(self.database)
        else:
            zstorage = self.ZEOStorageClass(self, self.read_only)

        c = self.ManagedServerConnectionClass(sock, addr, zstorage, self)
        log("new connection %s: %s" % (addr, repr(c)))
        return c

    def register_connection(self, storage_id, conn):
        """Internal: register a connection with a particular storage.

        This is called by ZEOStorage.register().

        The dictionary self.connections maps each storage name to a
        list of current connections for that storage; this information
        is needed to handle invalidation.  This function updates this
        dictionary.

        Returns the timeout and stats objects for the appropriate storage.
        """
        l = self.connections.get(storage_id)
        if l is None:
            l = self.connections[storage_id] = []
        l.append(conn)
        stats = self.stats[storage_id]
        stats.clients += 1
        return self.timeouts[storage_id], stats

    def invalidate(self, conn, storage_id, tid, invalidated=(), info=None):
        """Internal: broadcast info and invalidations to clients.

        This is called from several ZEOStorage methods.

        This can do three different things:

        - If the invalidated argument is non-empty, it broadcasts
          invalidateTransaction() messages to all clients of the given
          storage except the current client (the conn argument).

        - If the invalidated argument is empty and the info argument
          is a non-empty dictionary, it broadcasts info() messages to
          all clients of the given storage, including the current
          client.

        - If both the invalidated argument and the info argument are
          non-empty, it broadcasts invalidateTransaction() messages to all
          clients except the current, and sends an info() message to
          the current client.

        """
        if invalidated:
            if len(self.invq) >= self.invq_bound:
                self.invq.pop()
            self.invq.insert(0, (tid, invalidated))
        for p in self.connections.get(storage_id, ()):
            if invalidated and p is not conn:
                p.client.invalidateTransaction(tid, invalidated)
            elif info is not None:
                p.client.info(info)

    def get_invalidations(self, tid):
        """Return a tid and list of all objects invalidation since tid.

        The tid is the most recent transaction id seen by the client.

        Returns None if it is unable to provide a complete list
        of invalidations for tid.  In this case, client should
        do full cache verification.
        """

        if not self.invq:
            log("invq empty")
            return None, []

        earliest_tid = self.invq[-1][0]
        if earliest_tid > tid:
            log("tid to old for invq %s < %s" % (u64(tid), u64(earliest_tid)))
            return None, []

        oids = {}
        for _tid, L in self.invq:
            if _tid <= tid:
                break
            for key in L:
                oids[key] = 1
        latest_tid = self.invq[0][0]
        return latest_tid, oids.keys()

    def close_server(self):
        """Close the dispatcher so that there are no new connections.

        This is only called from the test suite, AFAICT.
        """
        self.dispatcher.close()
        if self.monitor is not None:
            self.monitor.close()
        for storage in self.storages.values():
            storage.close()
        # Force the asyncore mainloop to exit by hackery, i.e. close
        # every socket in the map.  loop() will return when the map is
        # empty.
        for s in asyncore.socket_map.values():
            try:
                s.close()
            except:
                pass

    def close_conn(self, conn):
        """Internal: remove the given connection from self.connections.

        This is the inverse of register_connection().
        """
        for cl in self.connections.values():
            if conn.obj in cl:
                cl.remove(conn.obj)