示例#1
0
    def testDeepCopyCanInvalidate(self):
        """
        Tests regression for invalidation problems related to missing
        readers and writers values in cloned objects (see
        http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
        """
        import ZODB.MappingStorage
        database = DB(ZODB.blob.BlobStorage(
            'blobs', ZODB.MappingStorage.MappingStorage()))
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        stream = BytesIO()
        p = Pickler(stream, _protocol)
        p.dump(root['blob'])
        u = Unpickler(stream)
        stream.seek(0)
        clone = u.load()
        clone._p_invalidate()

        # it should also be possible to open the cloned blob
        # (even though it won't contain the original data)
        clone.open().close()

        # tearDown
        database.close()
def cloneByPickle(obj, ignore_list=()):
    """Makes a copy of a ZODB object, loading ghosts as needed.

    Ignores specified objects along the way, replacing them with None
    in the copy.
    """
    ignore_dict = {}
    for o in ignore_list:
        ignore_dict[id(o)] = o

    def persistent_id(ob, ignore_dict=ignore_dict):
        if id(ob) in ignore_dict:
            return 'ignored'
        if getattr(ob, '_p_changed', 0) is None:
            ob._p_changed = 0
        return None

    def persistent_load(ref):
        assert ref == 'ignored'
        # Return a placeholder object that will be replaced by
        # removeNonVersionedData().
        placeholder = SimpleItem()
        placeholder.id = "ignored_subobject"
        return placeholder

    stream = BytesIO()
    p = Pickler(stream, 1)
    p.persistent_id = persistent_id
    p.dump(obj)
    stream.seek(0)
    u = Unpickler(stream)
    u.persistent_load = persistent_load
    return u.load()
示例#3
0
 def save(self, pos, fname):
     with open(fname, 'wb') as f:
         pickler = Pickler(f, _protocol)
         pickler.fast = True
         pickler.dump(pos)
         for k, v in six.iteritems(self._data):
             pickler.dump((k, v.toString()))
         pickler.dump(None)
 def __init__(self, obj=None):
     self._file = BytesIO()
     self._p = Pickler(self._file, _protocol)
     if sys.version_info[0] < 3:
         self._p.inst_persistent_id = self.persistent_id
     else:
         self._p.persistent_id = self.persistent_id
     self._stack = []
     if obj is not None:
         self._stack.append(obj)
         jar = obj._p_jar
         assert myhasattr(jar, "new_oid")
         self._jar = jar
示例#5
0
 def _initroot(self):
     try:
         self._storage.load(ZERO, '')
     except KeyError:
         from transaction import Transaction
         file = BytesIO()
         p = Pickler(file, _protocol)
         p.dump((PersistentMapping, None))
         p.dump({'_container': {}})
         t = Transaction()
         t.description = 'initial database creation'
         self._storage.tpc_begin(t)
         self._storage.store(ZERO, None, file.getvalue(), '', t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
示例#6
0
def dumps(obj):
    def getpersid(obj):
        if hasattr(obj, 'getoid'):
            return obj.getoid()
        return None

    s = BytesIO()
    p = Pickler(s, _protocol)
    if sys.version_info[0] < 3:
        p.inst_persistent_id = getpersid
    else:
        p.persistent_id = getpersid
    p.dump(obj)
    p.dump(None)
    return s.getvalue()
示例#7
0
 def __init__(self, obj=None):
     self._file = BytesIO()
     self._p = Pickler(self._file, _protocol)
     if sys.version_info[0] < 3:
         self._p.inst_persistent_id = self.persistent_id
         # PyPy uses a python implementation of cPickle in both Python 2
         # and Python 3. We can't really detect inst_persistent_id as its
         # a magic attribute that's not readable, but it doesn't hurt to
         # simply always assign to persistent_id also
         self._p.persistent_id = self.persistent_id
     else:
         self._p.persistent_id = self.persistent_id
     self._stack = []
     if obj is not None:
         self._stack.append(obj)
         jar = obj._p_jar
         assert myhasattr(jar, "new_oid")
         self._jar = jar
示例#8
0
def zodb_pickle(obj):
    """Create a pickle in the format expected by ZODB."""
    f = BytesIO()
    p = Pickler(f, _protocol)
    if sys.version_info[0] < 3:
        p.inst_persistent_id = _persistent_id
    else:
        p.persistent_id = _persistent_id
    klass = obj.__class__
    assert not hasattr(obj, '__getinitargs__'), "not ready for constructors"
    args = None

    mod = getattr(klass, '__module__', None)
    if mod is not None:
        klass = mod, klass.__name__

    state = obj.__getstate__()

    p.dump((klass, args))
    p.dump(state)
    return f.getvalue()
示例#9
0
def make_pickle(ob):
    sio = BytesIO()
    p = Pickler(sio, _protocol)
    p.dump(ob)
    return sio.getvalue()
示例#10
0
def pdumps(obj):
    s = BytesIO()
    p = Pickler(s, _protocol)
    p.dump(obj)
    p.dump(None)
    return s.getvalue()
示例#11
0
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
                         committedData=b''):
    # class_tuple, old, committed, newstate = ('',''), 0, 0, 0
    try:
        prfactory = PersistentReferenceFactory()
        newpickle = self._crs_untransform_record_data(newpickle)
        file = BytesIO(newpickle)
        unpickler = Unpickler(file)
        unpickler.find_global = find_global
        unpickler.persistent_load = prfactory.persistent_load
        meta = unpickler.load()
        if isinstance(meta, tuple):
            klass = meta[0]
            newargs = meta[1] or ()
            if isinstance(klass, tuple):
                klass = find_global(*klass)
        else:
            klass = meta
            newargs = ()

        if klass in _unresolvable:
            raise ConflictError

        inst = klass.__new__(klass, *newargs)

        try:
            resolve = inst._p_resolveConflict
        except AttributeError:
            _unresolvable[klass] = 1
            raise ConflictError


        oldData = self.loadSerial(oid, oldSerial)
        if not committedData:
            committedData  = self.loadSerial(oid, committedSerial)

        if newpickle == oldData:
            # old -> new diff is empty, so merge is trivial
            return committedData
        if committedData == oldData:
            # old -> committed diff is empty, so merge is trivial
            return newpickle

        newstate = unpickler.load()
        old       = state(self, oid, oldSerial, prfactory, oldData)
        committed = state(self, oid, committedSerial, prfactory, committedData)

        resolved = resolve(old, committed, newstate)

        file = BytesIO()
        pickler = Pickler(file, _protocol)
        if sys.version_info[0] < 3:
            pickler.inst_persistent_id = persistent_id
        else:
            pickler.persistent_id = persistent_id
        pickler.dump(meta)
        pickler.dump(resolved)
        return self._crs_transform_record_data(file.getvalue())
    except (ConflictError, BadClassName):
        pass
    except:
        # If anything else went wrong, catch it here and avoid passing an
        # arbitrary exception back to the client.  The error here will mask
        # the original ConflictError.  A client can recover from a
        # ConflictError, but not necessarily from other errors.  But log
        # the error so that any problems can be fixed.
        logger.error("Unexpected error", exc_info=True)

    raise ConflictError(oid=oid, serials=(committedSerial, oldSerial),
                        data=newpickle)
示例#12
0
    def _importDuringCommit(self, transaction, f, return_oid_list):
        """Import data during two-phase commit.

        Invoked by the transaction manager mid commit.
        Appends one item, the OID of the first object created,
        to return_oid_list.
        """
        oids = {}

        # IMPORTANT: This code should be consistent with the code in
        # serialize.py. It is currently out of date and doesn't handle
        # weak references.

        def persistent_load(ooid):
            """Remap a persistent id to a new ID and create a ghost for it."""

            klass = None
            if isinstance(ooid, tuple):
                ooid, klass = ooid

            if not isinstance(ooid, bytes):
                assert isinstance(ooid, str)
                # this happens on Python 3 when all bytes in the oid are < 0x80
                ooid = ooid.encode('ascii')

            if ooid in oids:
                oid = oids[ooid]
            else:
                if klass is None:
                    oid = self._storage.new_oid()
                else:
                    oid = self._storage.new_oid(), klass
                oids[ooid] = oid

            return Ghost(oid)

        while 1:
            header = f.read(16)
            if header == export_end_marker:
                break
            if len(header) != 16:
                raise ExportError("Truncated export file")

            # Extract header information
            ooid = header[:8]
            length = u64(header[8:16])
            data = f.read(length)

            if len(data) != length:
                raise ExportError("Truncated export file")

            if oids:
                oid = oids[ooid]
                if isinstance(oid, tuple):
                    oid = oid[0]
            else:
                oids[ooid] = oid = self._storage.new_oid()
                return_oid_list.append(oid)

            # Blob support
            blob_begin = f.read(len(blob_begin_marker))
            if blob_begin == blob_begin_marker:
                # Copy the blob data to a temporary file
                # and remember the name
                blob_len = u64(f.read(8))
                blob_filename = mktemp()
                blob_file = open(blob_filename, "wb")
                cp(f, blob_file, blob_len)
                blob_file.close()
            else:
                f.seek(-len(blob_begin_marker), 1)
                blob_filename = None

            pfile = BytesIO(data)
            unpickler = Unpickler(pfile)
            unpickler.persistent_load = persistent_load

            newp = BytesIO()
            pickler = Pickler(newp, _protocol)
            if sys.version_info[0] < 3:
                pickler.inst_persistent_id = persistent_id
            else:
                pickler.persistent_id = persistent_id

            pickler.dump(unpickler.load())
            pickler.dump(unpickler.load())
            data = newp.getvalue()

            if blob_filename is not None:
                self._storage.storeBlob(oid, None, data, blob_filename, '',
                                        transaction)
            else:
                self._storage.store(oid, None, data, '', transaction)
示例#13
0
    def __init__(self,
                 storage,
                 pool_size=7,
                 pool_timeout=1 << 31,
                 cache_size=400,
                 cache_size_bytes=0,
                 historical_pool_size=3,
                 historical_cache_size=1000,
                 historical_cache_size_bytes=0,
                 historical_timeout=300,
                 database_name='unnamed',
                 databases=None,
                 xrefs=True,
                 large_record_size=1 << 24,
                 **storage_args):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `cache_size_bytes`: target size measured in total estimated size
               of objects in the Connection object cache.
               "0" means unlimited.
          - `historical_pool_size`: expected maximum number of total
            historical connections
          - `historical_cache_size`: target size of Connection object cache for
            historical (`at` or `before`) connections
          - `historical_cache_size_bytes` -- similar to `cache_size_bytes` for
            the historical connection.
          - `historical_timeout`: minimum number of seconds that
            an unused historical connection will be kept, or None.
          - `xrefs` - Boolian flag indicating whether implicit cross-database
            references are allowed
        """
        if isinstance(storage, six.string_types):
            from ZODB import FileStorage
            storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
        elif storage is None:
            from ZODB import MappingStorage
            storage = ZODB.MappingStorage.MappingStorage(**storage_args)

        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # pools and cache sizes
        self.pool = ConnectionPool(pool_size, pool_timeout)
        self.historical_pool = KeyedConnectionPool(historical_pool_size,
                                                   historical_timeout)
        self._cache_size = cache_size
        self._cache_size_bytes = cache_size_bytes
        self._historical_cache_size = historical_cache_size
        self._historical_cache_size_bytes = historical_cache_size_bytes

        # Setup storage
        self.storage = storage
        self.references = ZODB.serialize.referencesf
        try:
            storage.registerDB(self)
        except TypeError:
            storage.registerDB(self, None)  # Backward compat

        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
            warnings.warn(
                "Storage doesn't have a tpc_vote and this violates "
                "the storage API. Violently monkeypatching in a do-nothing "
                "tpc_vote.", DeprecationWarning, 2)
            storage.tpc_vote = lambda *args: None

        if IMVCCStorage.providedBy(storage):
            temp_storage = storage.new_instance()
        else:
            temp_storage = storage
        try:
            try:
                temp_storage.load(z64, '')
            except KeyError:
                # Create the database's root in the storage if it doesn't exist
                from persistent.mapping import PersistentMapping
                root = PersistentMapping()
                # Manually create a pickle for the root to put in the storage.
                # The pickle must be in the special ZODB format.
                file = BytesIO()
                p = Pickler(file, _protocol)
                p.dump((root.__class__, None))
                p.dump(root.__getstate__())
                t = transaction.Transaction()
                t.description = 'initial database creation'
                temp_storage.tpc_begin(t)
                temp_storage.store(z64, None, file.getvalue(), '', t)
                temp_storage.tpc_vote(t)
                temp_storage.tpc_finish(t)
        finally:
            if IMVCCStorage.providedBy(temp_storage):
                temp_storage.release()

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self
        self.xrefs = xrefs

        self.large_record_size = large_record_size