Ejemplo n.º 1
0
    def db_layer():
        """"Create a DemoStorage that wrapps the original storage if `nodemo`
        is False.

        """
        base_db = Zope2.bobo_application._stuff[0]
        blob_temp = None

        demo_storage = DemoStorage(base=base_db._storage)
        if not IBlobStorage.providedBy(demo_storage):
            from ZODB.blob import BlobStorage
            from tempfile import mkdtemp
            blob_temp = mkdtemp()
            demo_storage = BlobStorage(blob_temp, demo_storage)

        #Remove from databases the main database otherwise it will result in
        #an error on creation of the DB
        base_db.databases.pop(base_db.database_name, None)

        # new database with the new storage
        wrapper_db = ZODB.DB(storage=demo_storage,
                             database_name=base_db.database_name,
                             databases=base_db.databases)

        # monkey-patch the current bobo_application to use our new database
        patch_bobo_application(wrapper_db)

        def cleanup():
            patch_bobo_application(base_db)
            if blob_temp is not None:
                import shutil
                shutil.rmtree(blob_temp)
        return cleanup, wrapper_db
Ejemplo n.º 2
0
    def db_layer():
        # create a DemoStorage that wraps the old storage
        base_db = Zope2.bobo_application._stuff[0]
        demo_storage = DemoStorage(base=base_db._storage)
        if not IBlobStorage.providedBy(demo_storage):
            from ZODB.blob import BlobStorage
            from tempfile import mkdtemp
            blob_temp = mkdtemp()
            demo_storage = BlobStorage(blob_temp, demo_storage)
        else:
            blob_temp = None

        # reconstruct the mount table
        database_name = base_db.database_name
        new_databases = dict(base_db.databases)
        del new_databases[database_name]

        # new database with the new storage
        wrapper_db = ZODB.DB(storage=demo_storage,
                             database_name=database_name,
                             databases=new_databases)

        # monkey-patch the current bobo_application to use our new database
        Zope2.bobo_application._stuff = (wrapper_db, 'Application', 'Zope-Version')

        def cleanup():
            Zope2.bobo_application._stuff = (base_db, 'Application', 'Zope-Version')
            if blob_temp is not None:
                import shutil
                shutil.rmtree(blob_temp)

        return cleanup, wrapper_db
Ejemplo n.º 3
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if ((serial == z64)
                    and ((self._savepoint_storage is None) or
                         (oid not in self._savepoint_storage.creating)
                         or self._savepoint_storage.creating[oid])):

                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                if (oid in self._invalidated
                        and not hasattr(obj, '_p_resolveConflict')):
                    raise ConflictError(object=obj)
                self._modified.append(oid)
            p = writer.serialize(obj)  # This calls __getstate__ of obj

            if isinstance(obj, Blob):
                if not IBlobStorage.providedBy(self._storage):
                    raise Unsupported("Storing Blobs in %s is not supported." %
                                      repr(self._storage))
                if obj.opened():
                    raise ValueError("Can't commit with opened blobs.")
                s = self._storage.storeBlob(oid, serial, p, obj._uncommitted(),
                                            self._version, transaction)
                # we invalidate the object here in order to ensure
                # that that the next attribute access of its name
                # unghostify it, which will cause its blob data
                # to be reattached "cleanly"
                obj._p_invalidate()
            else:
                s = self._storage.store(oid, serial, p, self._version,
                                        transaction)
            self._cache.update_object_size_estimation(oid, len(p))
            obj._p_estimated_size = len(p)
            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._handle_serial(s, oid)
Ejemplo n.º 4
0
def image_update_data(self, data, content_type=None, size=None):
    if isinstance(data, unicode):
        raise TypeError('Data can only be str or file-like.  '
                        'Unicode objects are expressly forbidden.')
    
    if size is None: size=len(data)

    self.size=size
    if data:
        if IBlobStorage.providedBy(self._p_jar.db().storage):
            self.data = ZODB.blob.Blob(data)
        else:
            self.data = data
    else:
        self.data = ''

    ct, width, height = getImageInfo(data)
    if ct:
        content_type = ct
    if width >= 0 and height >= 0:
        self.width = width
        self.height = height

    # Now we should have the correct content type, or still None
    if content_type is not None: self.content_type = content_type

    self.ZCacheable_invalidate()
    self.ZCacheable_set(None)
    self.http__refreshEtag()
Ejemplo n.º 5
0
 def loadBlob(self, oid, serial):
     """Return the filename where the blob file can be found.
     """
     if not IBlobStorage.providedBy(self._storage):
         raise Unsupported(
             "Blobs are not supported by the underlying storage %r." %
             self._storage)
     filename = self._getCleanFilename(oid, serial)
     if not os.path.exists(filename):
         return self._storage.loadBlob(oid, serial)
     return filename
Ejemplo n.º 6
0
 def loadBlob(self, oid, serial):
     """Return the filename where the blob file can be found.
     """
     if not IBlobStorage.providedBy(self._storage):
         raise Unsupported(
             "Blobs are not supported by the underlying storage %r." %
             self._storage)
     filename = self._getCleanFilename(oid, serial)
     if not os.path.exists(filename):
         return self._storage.loadBlob(oid, serial)
     return filename
Ejemplo n.º 7
0
    def __init__(self):
        """Prepares for a functional test case.
        """
        transaction.abort()

        storage = DemoStorage("Demo Storage")

        if not IBlobStorage.providedBy(storage):
            raise RuntimeError

        self.db = DB(storage)
        self.connection = None
Ejemplo n.º 8
0
def file_update_data(self, data, content_type=None, size=None):
    if isinstance(data, unicode):
        raise TypeError('Data can only be str or file-like.  '
                        'Unicode objects are expressly forbidden.')

    if content_type is not None: self.content_type=content_type
    if size is None: size=len(data)
    self.size=size
    if data:
        if IBlobStorage.providedBy(self._p_jar.db().storage):
            self.data = ZODB.blob.Blob(data)
        else:
            self.data = data
    else:
        self.data = ''
    self.ZCacheable_invalidate()
    self.ZCacheable_set(None)
    self.http__refreshEtag()
Ejemplo n.º 9
0
    def exportFile(self, oid, f=None):
        if f is None:
            f = TemporaryFile(prefix="EXP")
        elif isinstance(f, six.string_types):
            f = open(f, 'w+b')
        f.write(b'ZEXP')
        oids = [oid]
        done_oids = {}
        done = done_oids.__contains__
        load = self._storage.load
        supports_blobs = IBlobStorage.providedBy(self._storage)
        while oids:
            oid = oids.pop(0)
            if oid in done_oids:
                continue
            done_oids[oid] = True
            try:
                p, serial = load(oid)
            except:
                logger.debug("broken reference for oid %s",
                             repr(oid),
                             exc_info=True)
            else:
                referencesf(p, oids)
                f.writelines([oid, p64(len(p)), p])

                if supports_blobs:
                    if not isinstance(self._reader.getGhost(p), Blob):
                        continue  # not a blob

                    blobfilename = self._storage.loadBlob(oid, serial)
                    f.write(blob_begin_marker)
                    f.write(p64(os.stat(blobfilename).st_size))
                    blobdata = open(blobfilename, "rb")
                    cp(blobdata, f)
                    blobdata.close()

        f.write(export_end_marker)
        return f
Ejemplo n.º 10
0
    def exportFile(self, oid, f=None):
        if f is None:
            f = TemporaryFile(prefix="EXP")
        elif isinstance(f, six.string_types):
            f = open(f,'w+b')
        f.write(b'ZEXP')
        oids = [oid]
        done_oids = {}
        done = done_oids.__contains__
        load = self._storage.load
        supports_blobs = IBlobStorage.providedBy(self._storage)
        while oids:
            oid = oids.pop(0)
            if oid in done_oids:
                continue
            done_oids[oid] = True
            try:
                p, serial = load(oid)
            except:
                logger.debug("broken reference for oid %s", repr(oid),
                             exc_info=True)
            else:
                referencesf(p, oids)
                f.writelines([oid, p64(len(p)), p])

                if supports_blobs:
                    if not isinstance(self._reader.getGhost(p), Blob):
                        continue # not a blob

                    blobfilename = self._storage.loadBlob(oid, serial)
                    f.write(blob_begin_marker)
                    f.write(p64(os.stat(blobfilename).st_size))
                    blobdata = open(blobfilename, "rb")
                    cp(blobdata, f)
                    blobdata.close()

        f.write(export_end_marker)
        return f
Ejemplo n.º 11
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if ((serial == z64)
                    and ((self._savepoint_storage is None) or
                         (oid not in self._savepoint_storage.creating)
                         or self._savepoint_storage.creating[oid])):

                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                self._modified.append(oid)

            p = writer.serialize(obj)  # This calls __getstate__ of obj
            if len(p) >= self.large_record_size:
                warnings.warn(large_object_message % (obj.__class__, len(p)))

            if isinstance(obj, Blob):
                if not IBlobStorage.providedBy(self._storage):
                    raise Unsupported("Storing Blobs in %s is not supported." %
                                      repr(self._storage))
                if obj.opened():
                    raise ValueError("Can't commit with opened blobs.")
                blobfilename = obj._uncommitted()
                if blobfilename is None:
                    assert serial is not None  # See _uncommitted
                    self._modified.pop()  # not modified
                    continue
                s = self._storage.storeBlob(oid, serial, p, blobfilename, '',
                                            transaction)
                # we invalidate the object here in order to ensure
                # that that the next attribute access of its name
                # unghostify it, which will cause its blob data
                # to be reattached "cleanly"
                obj._p_invalidate()
            else:
                s = self._storage.store(oid, serial, p, '', transaction)

            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._cache.update_object_size_estimation(oid, len(p))
            obj._p_estimated_size = len(p)

            # if we write an object, we don't want to check if it was read
            # while current.  This is a convenient choke point to do this.
            self._readCurrent.pop(oid, None)
            if s:
                # savepoint
                obj._p_changed = 0  # transition from changed to up-to-date
                obj._p_serial = s
Ejemplo n.º 12
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if ((serial == z64)
                and
                ((self._savepoint_storage is None)
                 or (oid not in self._savepoint_storage.creating)
                 or self._savepoint_storage.creating[oid]
                 )
                ):

                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                self._modified.append(oid)

            p = writer.serialize(obj)  # This calls __getstate__ of obj
            if len(p) >= self.large_record_size:
                warnings.warn(large_object_message % (obj.__class__, len(p)))

            if isinstance(obj, Blob):
                if not IBlobStorage.providedBy(self._storage):
                    raise Unsupported(
                        "Storing Blobs in %s is not supported." %
                        repr(self._storage))
                if obj.opened():
                    raise ValueError("Can't commit with opened blobs.")
                blobfilename = obj._uncommitted()
                if blobfilename is None:
                    assert serial is not None # See _uncommitted
                    self._modified.pop() # not modified
                    continue
                s = self._storage.storeBlob(oid, serial, p, blobfilename,
                                            '', transaction)
                # we invalidate the object here in order to ensure
                # that that the next attribute access of its name
                # unghostify it, which will cause its blob data
                # to be reattached "cleanly"
                obj._p_invalidate()
            else:
                s = self._storage.store(oid, serial, p, '', transaction)

            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._cache.update_object_size_estimation(oid, len(p))
            obj._p_estimated_size = len(p)

            # if we write an object, we don't want to check if it was read
            # while current.  This is a convenient choke point to do this.
            self._readCurrent.pop(oid, None)
            if s:
                # savepoint
                obj._p_changed = 0 # transition from changed to up-to-date
                obj._p_serial = s
Ejemplo n.º 13
0
def _read_data(self, file):
    import transaction

    n=1 << 16

    # Make sure we have an _p_jar, even if we are a new object, by
    # doing a sub-transaction commit.
    transaction.savepoint(optimistic=True)

    if isinstance(file, str):
        size=len(file)
        if size<n or IBlobStorage.providedBy(self._p_jar.db().storage):
            #for blobs we don't have to cut anything up or if the size<n
            return file,size
        # Big string: cut it into smaller chunks
        file = StringIO(file)

    if isinstance(file, FileUpload) and not file:
        raise ValueError, 'File not specified'

    if hasattr(file, '__class__') and file.__class__ is Pdata:
        size=len(file)
        return file, size

    seek=file.seek
    read=file.read

    seek(0,2)
    size=end=file.tell()

    if IBlobStorage.providedBy(self._p_jar.db().storage):
        seek(0)
        return read(size), size

    if size <= 2*n:
        seek(0)
        if size < n: return read(size), size
        return Pdata(read(size)), size



    if self._p_jar is None:
        # Ugh
        seek(0)
        return Pdata(read(size)), size

    # Now we're going to build a linked list from back
    # to front to minimize the number of database updates
    # and to allow us to get things out of memory as soon as
    # possible.
    next = None
    while end > 0:
        pos = end-n
        if pos < n:
            pos = 0 # we always want at least n bytes
        seek(pos)

        # Create the object and assign it a next pointer
        # in the same transaction, so that there is only
        # a single database update for it.
        data = Pdata(read(end-pos))
        self._p_jar.add(data)
        data.next = next

        # Save the object so that we can release its memory.
        transaction.savepoint(optimistic=True)
        data._p_deactivate()
        # The object should be assigned an oid and be a ghost.
        assert data._p_oid is not None
        assert data._p_state == -1

        next = data
        end = pos

    return next, size
Ejemplo n.º 14
0
def resave_to_blob(self):
    "put the current data in a blob"
    data = self.data
    if data and not isinstance(data, ZODB.blob.Blob) and IBlobStorage.providedBy(self._p_jar.db().storage):
        self.data = ZODB.blob.Blob(str(data))
Ejemplo n.º 15
0

cache = {}


def blobdirectory():
    blobdir = cache.get("blobdir")
    if blobdir is None:
        blobdir = cache["blobdir"] = mkdtemp()
        register(rmtree, blobdir)
    return blobdir


# point the test setup to our private `custom_zodb.py`, but only if we
# have to, i.e. for plone 3.x
if not IBlobStorage.providedBy(DemoStorage()):
    import Testing

    Testing  # make pyflakes happy...
    import App.config

    cfg = App.config.getConfiguration()
    cfg.testinghome = dirname(__file__)


# ZopeLite uses DemoStorage directly, so it needs monkey-patching... :(
from Testing.ZopeTestCase import ZopeLite
from ZODB.blob import BlobStorage


def sandbox(base=None):