Esempio n. 1
0
 def fast_encode():
     # Only use in cases where you *know* the data contains only basic
     # Python objects
     pickler = Pickler(1)
     pickler.fast = 1
     dump = pickler.dump
     def fast_encode(*args):
         return dump(args, 1)
     return fast_encode
Esempio n. 2
0
 def fast_encode():
     # Only use in cases where you *know* the data contains only basic
     # Python objects
     pickler = Pickler(1)
     pickler.fast = 1
     dump = pickler.dump
     def fast_encode(*args):
         return dump(args, 1)
     return fast_encode
Esempio n. 3
0
def encode(*args): # args: (msgid, flags, name, args)
    # (We used to have a global pickler, but that's not thread-safe. :-( )

    # It's not thread safe if, in the couse of pickling, we call the
    # Python interpeter, which releases the GIL.

    # Note that args may contain very large binary pickles already; for
    # this reason, it's important to use proto 1 (or higher) pickles here
    # too.  For a long time, this used proto 0 pickles, and that can
    # bloat our pickle to 4x the size (due to high-bit and control bytes
    # being represented by \xij escapes in proto 0).
    # Undocumented:  cPickle.Pickler accepts a lone protocol argument;
    # pickle.py does not.
    if PY3:
        # XXX: Py3: Needs optimization.
        f = BytesIO()
        pickler = Pickler(f, 3)
        pickler.fast = 1
        pickler.dump(args)
        res = f.getvalue()
        return res
    else:
        pickler = Pickler(1)
        pickler.fast = 1
        return pickler.dump(args, 1)
Esempio n. 4
0
def encode(*args): # args: (msgid, flags, name, args)
    # (We used to have a global pickler, but that's not thread-safe. :-( )

    # It's not thread safe if, in the couse of pickling, we call the
    # Python interpeter, which releases the GIL.

    # Note that args may contain very large binary pickles already; for
    # this reason, it's important to use proto 1 (or higher) pickles here
    # too.  For a long time, this used proto 0 pickles, and that can
    # bloat our pickle to 4x the size (due to high-bit and control bytes
    # being represented by \xij escapes in proto 0).
    # Undocumented:  cPickle.Pickler accepts a lone protocol argument;
    # pickle.py does not.
    if PY3:
        # XXX: Py3: Needs optimization.
        f = BytesIO()
        pickler = Pickler(f, 3)
        pickler.fast = 1
        pickler.dump(args)
        res = f.getvalue()
        return res
    else:
        pickler = Pickler(1)
        pickler.fast = 1
        # Only CPython's cPickle supports dumping
        # and returning in one operation:
        #   return pickler.dump(args, 1)
        # For PyPy we must return the value; fortunately this
        # works the same on CPython and is no more expensive
        pickler.dump(args)
        return pickler.getvalue()
Esempio n. 5
0
 def __init__(self):
     self.file = tempfile.TemporaryFile(suffix=".tbuf")
     self.lock = Lock()
     self.closed = 0
     self.count = 0
     self.size = 0
     self.blobs = []
     # It's safe to use a fast pickler because the only objects
     # stored are builtin types -- strings or None.
     self.pickler = Pickler(self.file, 1)
     self.pickler.fast = 1
Esempio n. 6
0
 def __init__(self, connection_generation):
     self.connection_generation = connection_generation
     self.file = tempfile.TemporaryFile(suffix=".tbuf")
     self.count = 0
     self.size = 0
     self.blobs = []
     # It's safe to use a fast pickler because the only objects
     # stored are builtin types -- strings or None.
     self.pickler = Pickler(self.file, 1)
     self.pickler.fast = 1
     self.server_resolved = set()  # {oid}
     self.client_resolved = {}  # {oid -> buffer_record_number}
     self.exception = None
Esempio n. 7
0
def encode(*args): # args: (msgid, flags, name, args)
    # (We used to have a global pickler, but that's not thread-safe. :-( )

    # It's not thread safe if, in the couse of pickling, we call the
    # Python interpeter, which releases the GIL.

    # Note that args may contain very large binary pickles already; for
    # this reason, it's important to use proto 1 (or higher) pickles here
    # too.  For a long time, this used proto 0 pickles, and that can
    # bloat our pickle to 4x the size (due to high-bit and control bytes
    # being represented by \xij escapes in proto 0).
    # Undocumented:  cPickle.Pickler accepts a lone protocol argument;
    # pickle.py does not.
    # XXX: Py3: Needs optimization.
    f = BytesIO()
    pickler = Pickler(f, 3)
    pickler.fast = 1
    pickler.dump(args)
    res = f.getvalue()
    return res
Esempio n. 8
0
 def __init__(self):
     self.file = tempfile.TemporaryFile(suffix=".tbuf")
     self.lock = Lock()
     self.closed = 0
     self.count = 0
     self.size = 0
     self.blobs = []
     # It's safe to use a fast pickler because the only objects
     # stored are builtin types -- strings or None.
     self.pickler = Pickler(self.file, 1)
     self.pickler.fast = 1
Esempio n. 9
0
class CommitLog(object):
    def __init__(self):
        self.file = tempfile.TemporaryFile(suffix=".comit-log")
        self.pickler = Pickler(self.file, 1)
        self.pickler.fast = 1
        self.stores = 0

    def size(self):
        return self.file.tell()

    def delete(self, oid, serial):
        self.pickler.dump(('_delete', (oid, serial)))
        self.stores += 1

    def checkread(self, oid, serial):
        self.pickler.dump(('_checkread', (oid, serial)))
        self.stores += 1

    def store(self, oid, serial, data):
        self.pickler.dump(('_store', (oid, serial, data)))
        self.stores += 1

    def restore(self, oid, serial, data, prev_txn):
        self.pickler.dump(('_restore', (oid, serial, data, prev_txn)))
        self.stores += 1

    def undo(self, transaction_id):
        self.pickler.dump(('_undo', (transaction_id, )))
        self.stores += 1

    def __iter__(self):
        self.file.seek(0)
        unpickler = Unpickler(self.file)
        for i in range(self.stores):
            yield unpickler.load()

    def close(self):
        if self.file:
            self.file.close()
            self.file = None
Esempio n. 10
0
 def __init__(self, connection_generation):
     self.connection_generation = connection_generation
     self.file = tempfile.TemporaryFile(suffix=".tbuf")
     self.count = 0
     self.size = 0
     self.blobs = []
     # It's safe to use a fast pickler because the only objects
     # stored are builtin types -- strings or None.
     self.pickler = Pickler(self.file, 1)
     self.pickler.fast = 1
     self.server_resolved = set() # {oid}
     self.client_resolved = {} # {oid -> buffer_record_number}
     self.exception = None
Esempio n. 11
0
 def __init__(self):
     self.file = tempfile.TemporaryFile(suffix=".comit-log")
     self.pickler = Pickler(self.file, 1)
     self.pickler.fast = 1
     self.stores = 0
Esempio n. 12
0
class TransactionBuffer:

    # Valid call sequences:
    #
    #     ((store | invalidate)* begin_iterate next* clear)* close
    #
    # get_size can be called any time

    # The TransactionBuffer is used by client storage to hold update
    # data until the tpc_finish().  It is normally used by a single
    # thread, because only one thread can be in the two-phase commit
    # at one time.

    # It is possible, however, for one thread to close the storage
    # while another thread is in the two-phase commit.  We must use
    # a lock to guard against this race, because unpredictable things
    # can happen in Python if one thread closes a file that another
    # thread is reading.  In a debug build, an assert() can fail.

    # Caution:  If an operation is performed on a closed TransactionBuffer,
    # it has no effect and does not raise an exception.  The only time
    # this should occur is when a ClientStorage is closed in one
    # thread while another thread is in its tpc_finish().  It's not
    # clear what should happen in this case.  If the tpc_finish()
    # completes without error, the Connection using it could have
    # inconsistent data.  This should have minimal effect, though,
    # because the Connection is connected to a closed storage.

    def __init__(self):
        self.file = tempfile.TemporaryFile(suffix=".tbuf")
        self.lock = Lock()
        self.closed = 0
        self.count = 0
        self.size = 0
        self.blobs = []
        # It's safe to use a fast pickler because the only objects
        # stored are builtin types -- strings or None.
        self.pickler = Pickler(self.file, 1)
        self.pickler.fast = 1

    def close(self):
        self.clear()
        self.lock.acquire()
        try:
            self.closed = 1
            try:
                self.file.close()
            except OSError:
                pass
        finally:
            self.lock.release()

    def store(self, oid, data):
        """Store oid, version, data for later retrieval"""
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.pickler.dump((oid, data))
            self.count += 1
            # Estimate per-record cache size
            self.size = self.size + (data and len(data) or 0) + 31
        finally:
            self.lock.release()

    def storeBlob(self, oid, blobfilename):
        self.blobs.append((oid, blobfilename))

    def invalidate(self, oid):
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.pickler.dump((oid, None))
            self.count += 1
        finally:
            self.lock.release()

    def clear(self):
        """Mark the buffer as empty"""
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.file.seek(0)
            self.count = 0
            self.size = 0
            while self.blobs:
                oid, blobfilename = self.blobs.pop()
                if os.path.exists(blobfilename):
                    ZODB.blob.remove_committed(blobfilename)
        finally:
            self.lock.release()

    def __iter__(self):
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.file.flush()
            self.file.seek(0)
            return TBIterator(self.file, self.count)
        finally:
            self.lock.release()
Esempio n. 13
0
class TransactionBuffer:

    # Valid call sequences:
    #
    #     ((store | invalidate)* begin_iterate next* clear)* close
    #
    # get_size can be called any time

    # The TransactionBuffer is used by client storage to hold update
    # data until the tpc_finish().  It is normally used by a single
    # thread, because only one thread can be in the two-phase commit
    # at one time.

    # It is possible, however, for one thread to close the storage
    # while another thread is in the two-phase commit.  We must use
    # a lock to guard against this race, because unpredictable things
    # can happen in Python if one thread closes a file that another
    # thread is reading.  In a debug build, an assert() can fail.

    # Caution:  If an operation is performed on a closed TransactionBuffer,
    # it has no effect and does not raise an exception.  The only time
    # this should occur is when a ClientStorage is closed in one
    # thread while another thread is in its tpc_finish().  It's not
    # clear what should happen in this case.  If the tpc_finish()
    # completes without error, the Connection using it could have
    # inconsistent data.  This should have minimal effect, though,
    # because the Connection is connected to a closed storage.

    def __init__(self):
        self.file = tempfile.TemporaryFile(suffix=".tbuf")
        self.lock = Lock()
        self.closed = 0
        self.count = 0
        self.size = 0
        self.blobs = []
        # It's safe to use a fast pickler because the only objects
        # stored are builtin types -- strings or None.
        self.pickler = Pickler(self.file, 1)
        self.pickler.fast = 1

    def close(self):
        self.clear()
        self.lock.acquire()
        try:
            self.closed = 1
            try:
                self.file.close()
            except OSError:
                pass
        finally:
            self.lock.release()

    def store(self, oid, data):
        """Store oid, version, data for later retrieval"""
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.pickler.dump((oid, data))
            self.count += 1
            # Estimate per-record cache size
            self.size = self.size + (data and len(data) or 0) + 31
        finally:
            self.lock.release()

    def storeBlob(self, oid, blobfilename):
        self.blobs.append((oid, blobfilename))

    def invalidate(self, oid):
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.pickler.dump((oid, None))
            self.count += 1
        finally:
            self.lock.release()

    def clear(self):
        """Mark the buffer as empty"""
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.file.seek(0)
            self.count = 0
            self.size = 0
            while self.blobs:
                oid, blobfilename = self.blobs.pop()
                if os.path.exists(blobfilename):
                    ZODB.blob.remove_committed(blobfilename)
        finally:
            self.lock.release()

    def __iter__(self):
        self.lock.acquire()
        try:
            if self.closed:
                return
            self.file.flush()
            self.file.seek(0)
            return TBIterator(self.file, self.count)
        finally:
            self.lock.release()
Esempio n. 14
0
class TransactionBuffer(object):

    # The TransactionBuffer is used by client storage to hold update
    # data until the tpc_finish().  It is only used by a single
    # thread, because only one thread can be in the two-phase commit
    # at one time.

    def __init__(self, connection_generation):
        self.connection_generation = connection_generation
        self.file = tempfile.TemporaryFile(suffix=".tbuf")
        self.count = 0
        self.size = 0
        self.blobs = []
        # It's safe to use a fast pickler because the only objects
        # stored are builtin types -- strings or None.
        self.pickler = Pickler(self.file, 1)
        self.pickler.fast = 1
        self.server_resolved = set()  # {oid}
        self.client_resolved = {}  # {oid -> buffer_record_number}
        self.exception = None

    def close(self):
        self.file.close()

    def store(self, oid, data):
        """Store oid, version, data for later retrieval"""
        self.pickler.dump((oid, data))
        self.count += 1
        # Estimate per-record cache size
        self.size = self.size + (data and len(data) or 0) + 31

    def resolve(self, oid, data):
        """Record client-resolved data
        """
        self.store(oid, data)
        self.client_resolved[oid] = self.count - 1

    def server_resolve(self, oid):
        self.server_resolved.add(oid)

    def storeBlob(self, oid, blobfilename):
        self.blobs.append((oid, blobfilename))

    def __iter__(self):
        self.file.seek(0)
        unpickler = Unpickler(self.file)
        server_resolved = self.server_resolved
        client_resolved = self.client_resolved

        # Gaaaa, this is awkward. There can be entries in serials that
        # aren't in the buffer, because undo.  Entries can be repeated
        # in the buffer, because ZODB. (Maybe this is a bug now, but
        # it may be a feature later.

        seen = set()
        for i in range(self.count):
            oid, data = unpickler.load()
            if client_resolved.get(oid, i) == i:
                seen.add(oid)
                yield oid, data, oid in server_resolved

        # We may have leftover oids because undo
        for oid in server_resolved:
            if oid not in seen:
                yield oid, None, True

    # Support ZEO4:

    def serialnos(self, args):
        for oid in args:
            if isinstance(oid, bytes):
                self.server_resolved.add(oid)
            else:
                oid, serial = oid
                if isinstance(serial, Exception):
                    self.exception = serial
                elif serial == b'rs':
                    self.server_resolved.add(oid)
Esempio n. 15
0
class TransactionBuffer:

    # The TransactionBuffer is used by client storage to hold update
    # data until the tpc_finish().  It is only used by a single
    # thread, because only one thread can be in the two-phase commit
    # at one time.

    def __init__(self, connection_generation):
        self.connection_generation = connection_generation
        self.file = tempfile.TemporaryFile(suffix=".tbuf")
        self.count = 0
        self.size = 0
        self.blobs = []
        # It's safe to use a fast pickler because the only objects
        # stored are builtin types -- strings or None.
        self.pickler = Pickler(self.file, 1)
        self.pickler.fast = 1
        self.server_resolved = set() # {oid}
        self.client_resolved = {} # {oid -> buffer_record_number}
        self.exception = None

    def close(self):
        self.file.close()

    def store(self, oid, data):
        """Store oid, version, data for later retrieval"""
        self.pickler.dump((oid, data))
        self.count += 1
        # Estimate per-record cache size
        self.size = self.size + (data and len(data) or 0) + 31

    def resolve(self, oid, data):
        """Record client-resolved data
        """
        self.store(oid, data)
        self.client_resolved[oid] = self.count - 1

    def server_resolve(self, oid):
        self.server_resolved.add(oid)

    def storeBlob(self, oid, blobfilename):
        self.blobs.append((oid, blobfilename))

    def __iter__(self):
        self.file.seek(0)
        unpickler = Unpickler(self.file)
        server_resolved = self.server_resolved
        client_resolved = self.client_resolved

        # Gaaaa, this is awkward. There can be entries in serials that
        # aren't in the buffer, because undo.  Entries can be repeated
        # in the buffer, because ZODB. (Maybe this is a bug now, but
        # it may be a feature later.

        seen = set()
        for i in range(self.count):
            oid, data = unpickler.load()
            if client_resolved.get(oid, i) == i:
                seen.add(oid)
                yield oid, data, oid in server_resolved

        # We may have leftover oids because undo
        for oid in server_resolved:
            if oid not in seen:
                yield oid, None, True


    # Support ZEO4:

    def serialnos(self, args):
        for oid in args:
            if isinstance(oid, bytes):
                self.server_resolved.add(oid)
            else:
                oid, serial = oid
                if isinstance(serial, Exception):
                    self.exception = serial
                elif serial == b'rs':
                    self.server_resolved.add(oid)