Пример #1
0
    def loadEx(self, oid, version):
        self._lock.acquire()  # for atomic processing of invalidations
        try:
            t = self._cache.load(oid, version)
            if t:
                return t
        finally:
            self._lock.release()

        if self._server is None:
            raise ClientDisconnected()

        self._load_lock.acquire()
        try:
            self._lock.acquire()
            try:
                self._load_oid = oid
                self._load_status = 1
            finally:
                self._lock.release()

            data, tid, ver = self._server.loadEx(oid, version)

            self._lock.acquire()  # for atomic processing of invalidations
            try:
                if self._load_status:
                    self._cache.store(oid, ver, tid, None, data)
                self._load_oid = None
            finally:
                self._lock.release()
        finally:
            self._load_lock.release()

        return data, tid, ver
Пример #2
0
    def tpc_finish(self, txn, f=None):
        """Storage API: finish a transaction."""
        if txn is not self._transaction:
            return
        self._load_lock.acquire()
        try:
            if self._midtxn_disconnect:
                raise ClientDisconnected(
                    'Calling tpc_finish() on a disconnected transaction')

            # The calls to tpc_finish() and _update_cache() should
            # never run currently with another thread, because the
            # tpc_cond condition variable prevents more than one
            # thread from calling tpc_finish() at a time.
            tid = self._server.tpc_finish(id(txn))
            self._lock.acquire()  # for atomic processing of invalidations
            try:
                self._update_cache(tid)
                if f is not None:
                    f(tid)
            finally:
                self._lock.release()

            r = self._check_serials()
            assert r is None or len(r) == 0, "unhandled serialnos: %s" % r
        finally:
            self._load_lock.release()
            self.end_transaction()
Пример #3
0
 def close(self):
     if not self.closed:
         self.closed = True
         self._connecting.cancel()
         if self.transport is not None:
             self.transport.close()
         for future in self.pop_futures():
             future.set_exception(ClientDisconnected("Closed"))
Пример #4
0
 def wait_for_result(self, future, timeout):
     try:
         return future.result(timeout)
     except concurrent.futures.TimeoutError:
         if not self.client.ready:
             raise ClientDisconnected("timed out waiting for connection")
         else:
             raise
Пример #5
0
 def call_threadsafe(self, future, wait_ready, method, args):
     if self.ready:
         self.protocol.call(future, method, args)
     elif wait_ready:
         self._when_ready(self.call_threadsafe, future, wait_ready, method,
                          args)
     else:
         future.set_exception(ClientDisconnected())
Пример #6
0
    def prefetch(self, future, wait_ready, oids, tid):
        if self.ready:
            for oid in oids:
                if self.cache.loadBefore(oid, tid) is None:
                    self._prefetch(oid, tid)

            future.set_result(None)
        else:
            future.set_exception(ClientDisconnected())
Пример #7
0
    def loadBulk(self, oids):
        """
        Storage API to return multiple objects
        We load a unique set of them, just in case

        :param list oids: Iterable oids to load at once
        :return: Loaded oid objects
        :rtype: list
        """
        # First, try to get whatever possible from cache
        self._load_lock.acquire()
        try:
            self._lock.acquire()  # for atomic processing of invalidations
            try:
                result = []
                for oid in oids:
                    out = self._cache.load(oid)
                    if not out:
                        self._load_oids[oid] = 1
                    else:
                        result.append(out)
            finally:
                self._lock.release()
            if len(self._load_oids) == 0:
                return result
            # If we ever get here, we need to load some more stuff
            # self._load_oids dictionary is protected by self._load_lock

            if self._server is None:
                raise ClientDisconnected()

            load_oids = list(self._load_oids.keys())

            # [(data, tid), (data, tid), ...]
            bulk_data = self._server.rpc.call("loadBulk", load_oids)

            data_size = 0
            for oid, (data, tid) in izip(load_oids, bulk_data):
                data_size += len(data)
                self._lock.acquire()  # for atomic processing of invalidations
                try:
                    if self._load_oids[
                            oid]:  # Update cache only when there was no invalidation
                        self._cache.store(oid, tid, None, data)
                    del self._load_oids[oid]
                    result.append(
                        (data, tid)
                    )  # XXX shouldn't we provide a recent value from cache then?
                finally:
                    self._lock.release()
            logging.debug("Bulk-loaded {0} objects of size {1}".format(
                len(load_oids), data_size))
        finally:
            self._load_lock.release()

        return result
Пример #8
0
 def connection_lost(self, exc):
     logger.debug('connection_lost %r', exc)
     self.heartbeat_handle.cancel()
     if self.closed:
         for f in self.pop_futures():
             f.cancel()
     else:
         # We have to be careful processing the futures, because
         # exception callbacks might modufy them.
         for f in self.pop_futures():
             f.set_exception(ClientDisconnected(exc or 'connection lost'))
         self.closed = True
         self.client.disconnected(self)
Пример #9
0
    def __next__(self):
        if self._ended:
            raise StopIteration()

        if self._iid < 0:
            raise ClientDisconnected("Disconnected iterator")

        tx_data = self._storage._call('iterator_next', self._iid)
        if tx_data is None:
            # The iterator is exhausted, and the server has already
            # disposed it.
            self._ended = True
            self._storage._forget_iterator(self._iid)
            raise StopIteration()

        return ClientStorageTransactionInformation(self._storage, self,
                                                   *tx_data)
Пример #10
0
    def _when_ready(self, func, result_future, *args):

        if self.ready is None:
            # We started without waiting for a connection. (prob tests :( )
            result_future.set_exception(ClientDisconnected("never connected"))
        else:

            @self.connected.add_done_callback
            def done(future):
                e = future.exception()
                if e is not None:
                    future.set_exception(e)
                else:
                    if self.ready:
                        func(result_future, *args)
                    else:
                        self._when_ready(func, result_future, *args)
Пример #11
0
    def _check_trans(self, trans, meth):
        """Internal helper to check a transaction argument for sanity."""
        if self._is_read_only:
            raise POSException.ReadOnlyError()

        try:
            buf = trans.data(self)
        except KeyError:
            buf = None

        if buf is None:
            raise POSException.StorageTransactionError(
                "Transaction not committing", meth, trans)

        if buf.connection_generation != self._connection_generation:
            # We were disconnected, so this one is poisoned
            raise ClientDisconnected(meth, 'on a disconnected transaction')

        return buf
Пример #12
0
 def load_before_threadsafe(self, future, wait_ready, oid, tid):
     data = self.cache.loadBefore(oid, tid)
     if data is not None:
         future.set_result(data)
     elif self.ready:
         try:
             data = yield self.protocol.load_before(oid, tid)
         except Exception as exc:
             future.set_exception(exc)
         else:
             future.set_result(data)
             if data:
                 data, start, end = data
                 self.cache.store(oid, start, end, data)
     elif wait_ready:
         self._when_ready(self.load_before_threadsafe, future, wait_ready,
                          oid, tid)
     else:
         future.set_exception(ClientDisconnected())
Пример #13
0
    def tpc_finish_threadsafe(self, future, wait_ready, tid, updates, f):
        if self.ready:
            try:
                tid = yield self.protocol.fut('tpc_finish', tid)
                cache = self.cache
                for oid, data, resolved in updates:
                    cache.invalidate(oid, tid)
                    if data and not resolved:
                        cache.store(oid, tid, None, data)
                cache.setLastTid(tid)
            except Exception as exc:
                future.set_exception(exc)

                # At this point, our cache is in an inconsistent
                # state.  We need to reconnect in hopes of
                # recovering to a consistent state.
                self.protocol.close()
                self.disconnected(self.protocol)
            else:
                f(tid)
                future.set_result(tid)
        else:
            future.set_exception(ClientDisconnected())
Пример #14
0
 def __getattr__(self, attr):
     raise ClientDisconnected()
Пример #15
0
 def call_async_threadsafe(self, future, wait_ready, method, args):
     if self.ready:
         self.protocol.call_async(method, args)
         future.set_result(None)
     else:
         future.set_exception(ClientDisconnected())
Пример #16
0
 def call_closed(*a, **k):
     raise ClientDisconnected('closed')
Пример #17
0
 def call_async_iter_threadsafe(self, future, wait_ready, it):
     if self.ready:
         self.protocol.call_async_iter(it)
         future.set_result(None)
     else:
         future.set_exception(ClientDisconnected())