コード例 #1
0
    def test_committed_when_uncommitted(self):
        # An exception is raised if we call committed on a blob that has
        # uncommitted changes:
        blob = Blob()
        with self.assertRaisesRegex(BlobError, "Uncommitted changes"):
            blob.committed()

        with self.assertRaisesRegex(BlobError, "Uncommitted changes"):
            blob.open('c')

        blob = self._make_blob()
        conn = self.database.open()
        root = conn.root()
        root['blob'] = blob
        with self.assertRaisesRegex(BlobError, "Uncommitted changes"):
            blob.committed()

        with self.assertRaisesRegex(BlobError, "Uncommitted changes"):
            blob.open('c')

        transaction.savepoint()
        with self.assertRaisesRegex(BlobError, "Uncommitted changes"):
            blob.committed()
        with self.assertRaisesRegex(BlobError, "Uncommitted changes"):
            blob.open('c')

        transaction.commit()
        self._check_blob_contents(blob, self.DATA1, 'c')
コード例 #2
0
ファイル: emails.py プロジェクト: lslaz1/karl
 def __init__(self, path, ct, size):
     from karl.utils import get_random_string
     self.code = get_random_string(25)
     self.blob = Blob()
     self.path = path
     self.ct = ct
     self.size = size
コード例 #3
0
    def testRedo(self):
        base_storage = FileStorage(self.storagefile)
        blob_storage = BlobStorage(self.blob_dir, base_storage)
        database = DB(blob_storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write('this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write('this is state 2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 1')

        serial = base64.encodestring(blob_storage._tid)

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 2')

        database.close()
コード例 #4
0
    def testUndoAfterConsumption(self):
        base_storage = FileStorage(self.storagefile)
        blob_storage = BlobStorage(self.blob_dir, base_storage)
        database = DB(blob_storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        open('consume1', 'w').write('this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        open('consume2', 'w').write('this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 1')

        database.close()
コード例 #5
0
def saveFileToBlob(filepath):
    blob = Blob()
    bfile = blob.open('w')
    with open(filepath, 'rb') as fi:
        bfile.write(fi.read())
    bfile.close()
    return blob
コード例 #6
0
    def _populate(self):
        """
        Put some revisions of a blob object in our database and on the
        filesystem.
        """
        from ZODB.utils import u64 as bytes8_to_int64

        connection1 = self.database.open()
        root = connection1.root()

        tids = self.tids = []
        times = self.times = []
        blob = Blob()

        for i in range(self.BLOB_REVISION_COUNT):
            transaction.begin()
            with blob.open('w') as f:
                f.write(b'this is blob data ' + str(i).encode())
            if 'blob' not in root:
                root['blob'] = blob
            transaction.commit()

            blob._p_activate()
            tid = blob._p_serial
            tids.append(tid)
            tid_int = bytes8_to_int64(tid)

            times.append(tid_int - 1)

        blob._p_activate()

        self.oid = oid = root['blob']._p_oid
        fshelper = self.blob_storage.blobhelper.fshelper
        self.fns = [fshelper.getBlobFilename(oid, x) for x in tids]
        connection1.close()
コード例 #7
0
ファイル: generator.py プロジェクト: rnunez80/castle.cms
def create(html, css):
    try:
        registry = getUtility(IRegistry)
        prince_server_url = registry.get('castle.princexml_server_url',
                                         'http://localhost:6543/convert')
        if prince_server_url is None:
            logger.warn('error converting pdf')
            return
        logger.info('start converting pdf')
        xml = fromstring(html)
        # save styles
        resp = requests.post(prince_server_url,
                             data={
                                 'xml': tostring(xml),
                                 'css': json.dumps(css)
                             })
        data = resp.content
        blob = Blob()
        bfile = blob.open('w')
        bfile.write(data)
        bfile.close()
        return blob
    except:
        logger.info(traceback.format_exc())
        raise
コード例 #8
0
ファイル: evolve7.py プロジェクト: timgates42/substanced
def evolve(root, registry):
    logger.info('Running substanced evolve step 7: reset all blob mimetypes '
                'to nominal USE_MAGIC value')
    if magic:
        objectmap = find_objectmap(root)
        if objectmap is not None:
            oids = objectmap.get_extent(get_dotted_name(File))
            if oids is not None:
                for oid in oids:
                    f = objectmap.object_for(oid)
                    try:
                        if f.get_size():
                            blob = f.blob
                            fp = blob.open('r')
                            for chunk in chunks(fp):
                                m = magic.Magic(mime=True)
                                mimetype = m.from_buffer(chunk)
                                f.mimetype = mimetype
                                break
                    except POSKeyError:
                        logger.error(
                            'Missing blob for file %s, overwriting with '
                            'empty blob' % resource_path(f))
                        f.blob = Blob()
                        f.mimetype = 'application/octet-stream'
コード例 #9
0
    def testUndoAfterConsumption(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        with open('consume1', 'wb') as file:
            file.write(b'this is state 1')
        blob = Blob()
        blob.consumeFile('consume1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with open('consume2', 'wb') as file:
            file.write(b'this is state 2')
        blob.consumeFile('consume2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as file:
            self.assertEqual(file.read(), b'this is state 1')

        database.close()
コード例 #10
0
    def __init__(self, stream=None, mimetype=None, title=u''):
        """ The constructor of a File object.

        ``stream`` should be a filelike object (an object with a ``read``
        method that takes a size argument) or ``None``.  If stream is
        ``None``, the blob attached to this file object is created empty.

        ``title`` must be a string or Unicode object.

        ``mimetype`` may be any of the following:

        - ``None``, meaning set this file object's mimetype to
          ``application/octet-stream`` (the default).

        - A mimetype string (e.g. ``image/gif``)

        - The constant :attr:`substanced.file.USE_MAGIC`, which will
          derive the mimetype from the stream content (if ``stream`` is also
          supplied) using the ``python-magic`` library.

          .. warning::

             On non-Linux systems, successful use of
             :attr:`substanced.file.USE_MAGIC` requires the installation
             of additional dependencies.  See :ref:`optional_dependencies`.
        """
        self.blob = Blob()
        self.mimetype = mimetype or 'application/octet-stream'
        self.title = title or u''
        if stream is not None:
            if mimetype is USE_MAGIC:
                hint = USE_MAGIC
            else:
                hint = None
            self.upload(stream, mimetype_hint=hint)
コード例 #11
0
 def _deserialize(cls, data):
     blob = Blob()
     bfile = blob.open('w')
     data = base64.b64decode(data['data'])
     bfile.write(data)
     bfile.close()
     return blob
コード例 #12
0
 def __init__(self, **kwargs):
     super(NyBlobFile, self).__init__(**kwargs)
     kwargs.setdefault('filename', None)
     kwargs.setdefault('content_type', 'application/octet-stream')
     for key, value in kwargs.iteritems():
         setattr(self, key, value)
     self._blob = Blob()
コード例 #13
0
ファイル: queue.py プロジェクト: py361/repoze.postoffice
 def __init__(self, message):
     assert isinstance(message, StdlibMessage), "Not a message."
     self._v_message = message  # transient attribute
     self._blob_file = blob = Blob()
     outfp = blob.open('w')
     Generator(outfp).flatten(message)
     outfp.close()
コード例 #14
0
ファイル: testZEO.py プロジェクト: bendavis78/zope
    def checkStoreBlob(self):
        from ZODB.utils import oid_repr, tid_repr
        from ZODB.blob import Blob, BLOB_SUFFIX
        from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
             handle_serials
        import transaction

        somedata = 'a' * 10

        blob = Blob()
        bd_fh = blob.open('w')
        bd_fh.write(somedata)
        bd_fh.close()
        tfname = bd_fh.name
        oid = self._storage.new_oid()
        data = zodb_pickle(blob)
        self.assert_(os.path.exists(tfname))

        t = transaction.Transaction()
        try:
            self._storage.tpc_begin(t)
            r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t)
            r2 = self._storage.tpc_vote(t)
            revid = handle_serials(oid, r1, r2)
            self._storage.tpc_finish(t)
        except:
            self._storage.tpc_abort(t)
            raise
        self.assert_(not os.path.exists(tfname))
        filename = self._storage.fshelper.getBlobFilename(oid, revid)
        self.assert_(os.path.exists(filename))
        self.assertEqual(somedata, open(filename).read())
コード例 #15
0
    def test_loadblob_tmpstore(self):
        """
        This is a test for assuring that the TmpStore's loadBlob implementation
        falls back correctly to loadBlob on the backend.
        """

        # First, let's setup a regular database and store a blob:

        blob_storage = self.blob_storage
        database = self.database
        connection = database.open()
        root = connection.root()
        root['blob'] = Blob()
        connection.add(root['blob'])
        with root['blob'].open('w') as f:
            f.write(b'test')
        transaction.commit()
        blob_oid = root['blob']._p_oid
        tid = connection._storage.lastTransaction()

        # Now we open a database with a TmpStore in front:

        from ZODB.Connection import TmpStore
        tmpstore = TmpStore(blob_storage)

        # We can access the blob correctly:
        self.assertEqual(tmpstore.loadBlob(blob_oid, tid),
                         blob_storage.loadBlob(blob_oid, tid))

        connection.close()
        blob_storage.close()
        tmpstore.close()
        database.close()
コード例 #16
0
    def testUndo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        blob = Blob()
        with blob.open('w') as f:
            f.write(b'this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        with blob.open('w') as f:
            f.write(b'this is state 2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        with blob.open('r') as f:
            data = f.read()
        self.assertEqual(data, b'this is state 1')

        database.close()
コード例 #17
0
ファイル: generator.py プロジェクト: cleanclothes/castle.cms
def create(html, css):
    try:
        registry = getUtility(IRegistry)
        prince_server_url = registry.get(
            'castle.princexml_server_url', 'http://localhost:6543/convert')
        if prince_server_url is None:
            logger.warning(
                'error converting pdf, no princexmlserver defined')
            return
        logger.info('start converting pdf')
        xml = fromstring(html)
        # save styles
        resp = requests.post(
            prince_server_url,
            data={'xml': tostring(xml), 'css': json.dumps(css)})
        if resp.status_code != 200:
            raise PDFGenerationError('status: {}, data: {}'.format(
                resp.status_code, resp.text))
        data = resp.content
        blob = Blob()
        bfile = blob.open('w')
        bfile.write(data)
        bfile.close()
        return blob
    except Exception:
        logger.info(traceback.format_exc())
        raise
コード例 #18
0
    def test_packing_with_uncommitted_data_non_undoing(self):
        """
        This covers regression for bug #130459.

        When uncommitted data exists it formerly was written to the root of the
        blob_directory and confused our packing strategy. We now use a separate
        temporary directory that is ignored while packing.
        """

        blob_storage = self.blob_storage
        database = self.database
        connection = database.open()
        root = connection.root()
        root['blob'] = Blob()
        connection.add(root['blob'])
        with root['blob'].open('w') as f:
            _ = f.write(b'test')

        blob_storage.pack(time.time(), referencesf)

        # Clean up:

        transaction.abort()
        connection.close()
        blob_storage.close()
        database.close()
コード例 #19
0
    def testDeepCopyCanInvalidate(self):
        """
        Tests regression for invalidation problems related to missing
        readers and writers values in cloned objects (see
        http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
        """
        import ZODB.MappingStorage
        database = DB(
            ZODB.blob.BlobStorage('blobs',
                                  ZODB.MappingStorage.MappingStorage()))
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        stream = StringIO()
        p = Pickler(stream, 1)
        p.dump(root['blob'])
        u = Unpickler(stream)
        stream.seek(0)
        clone = u.load()
        clone._p_invalidate()

        # it should also be possible to open the cloned blob
        # (even though it won't contain the original data)
        clone.open()

        # tearDown
        database.close()
コード例 #20
0
 def __call__(self):
     from ZODB.blob import Blob
     from plone.app.blob.iterators import BlobStreamIterator
     myblob = Blob()
     with myblob.open('w') as fd:
         fd.write('Hi, Blob!')
     return BlobStreamIterator(myblob)
コード例 #21
0
ファイル: testblob.py プロジェクト: Cykooz/relstorage
    def testRedo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write(b('this is state 1'))
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write(b('this is state 2'))
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 1'))

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), b('this is state 2'))

        database.close()
コード例 #22
0
ファイル: testZEO.py プロジェクト: bendavis78/zope
    def checkLoadBlob(self):
        from ZODB.blob import Blob
        from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
             handle_serials
        import transaction

        version = ''
        somedata = 'a' * 10

        blob = Blob()
        bd_fh = blob.open('w')
        bd_fh.write(somedata)
        bd_fh.close()
        tfname = bd_fh.name
        oid = self._storage.new_oid()
        data = zodb_pickle(blob)

        t = transaction.Transaction()
        try:
            self._storage.tpc_begin(t)
            r1 = self._storage.storeBlob(oid, ZERO, data, tfname, '', t)
            r2 = self._storage.tpc_vote(t)
            serial = handle_serials(oid, r1, r2)
            self._storage.tpc_finish(t)
        except:
            self._storage.tpc_abort(t)
            raise

        filename = self._storage.loadBlob(oid, serial)
        self.assertEquals(somedata, open(filename, 'rb').read())
        self.assert_(not (os.stat(filename).st_mode & stat.S_IWRITE))
        self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
コード例 #23
0
    def test_merge_blobs_on_open(self):
        from ZODB.DB import DB
        from ZODB.blob import Blob
        import transaction
        storage = self._closing(self.make_storage(
            blob_dir='blobs', shared_blob_dir=False))
        db = self._closing(DB(storage))
        conn = db.open()

        blob = Blob()
        base_chunk = b"This is my base blob."
        with blob.open('w') as f:
            f.write(base_chunk)

        conn.root().blob = blob
        transaction.commit()

        # Insert some extra chunks. Get them big to be sure we loop
        # properly
        second_chunk = b'second chunk' * 800
        cursor = conn._storage._store_connection.cursor
        cursor.execute("""
        INSERT INTO blob_chunk (zoid, chunk_num, tid, chunk)
        SELECT zoid, 1, tid, lo_from_bytea(0, %s)
        FROM blob_chunk WHERE chunk_num = 0;
        """, (second_chunk,))
        third_chunk = b'third chunk' * 900
        cursor.execute("""
        INSERT INTO blob_chunk (zoid, chunk_num, tid, chunk)
        SELECT zoid, 2, tid, lo_from_bytea(0, %s)
        FROM blob_chunk WHERE chunk_num = 0;
        """, (third_chunk,))

        cursor.execute('SELECT COUNT(*) FROM blob_chunk')
        self.assertEqual(3, cursor.fetchone()[0])
        cursor.connection.commit()
        # Now open again and find everything put together.
        # But we need to use a new blob dir, because
        # we changed data behind its back.
        conn.close()
        db.close()

        storage = self._closing(self.make_storage(blob_dir='blobs2',
                                                  shared_blob_dir=False,
                                                  zap=False))
        db = self._closing(DB(storage))
        conn = db.open()

        blob = conn.root().blob
        with blob.open('r') as f:
            data = f.read()

        cursor = conn._storage._load_connection.cursor
        cursor.execute('SELECT COUNT(*) FROM blob_chunk')
        self.assertEqual(1, cursor.fetchone()[0])

        self.assertEqual(data, base_chunk + second_chunk + third_chunk)
        conn.close()
        db.close()
コード例 #24
0
ファイル: copy.py プロジェクト: kkdhanesh/NBADEMO
 def _copyBlob(self, translate):
     target = translate(self.context)
     target._blob = Blob()
     fsrc = self.context._blob.open('r')
     fdst = target._blob.open('w')
     shutil.copyfileobj(fsrc, fdst)
     fdst.close()
     fsrc.close()
コード例 #25
0
def saveFileToBlob(filepath):
    blob = Blob()
    fi = open(filepath)
    bfile = blob.open('w')
    bfile.write(fi.read())
    bfile.close()
    fi.close()
    return blob
コード例 #26
0
    def __init__(self, data=None, compress=False):
        """ instantiate the blob file """

        self._blob = Blob()
        self._compress = compress

        if data:
            self.set(data)
コード例 #27
0
 def testBlobbableOFSFileWithoutFileName(self):
     obj = File('foo', 'Foo', getFile('plone.pdf'), 'application/pdf')
     blobbable = IBlobbable(obj)
     target = Blob()
     blobbable.feed(target)
     self.assertEqual(target.open('r').read(), getFile('plone.pdf').read())
     self.assertEqual(blobbable.filename(), '')
     self.assertEqual(blobbable.mimetype(), 'application/pdf')
コード例 #28
0
 def crop_factory(fieldname, direction='keep', **parameters):
     blob = Blob()
     result = blob.open('w')
     _, image_format, dimensions = scaleImage(data['data'],
                                              result=result,
                                              **parameters)
     result.close()
     return blob, image_format, dimensions
コード例 #29
0
ファイル: testing.py プロジェクト: nilbacardit26/plone_prova
 def __call__(self):
     from ZODB.blob import Blob
     from plone.app.blob.iterators import BlobStreamIterator
     myblob = Blob()
     f = myblob.open("w")
     f.write("Hi, Blob!")
     f.close()
     return BlobStreamIterator(myblob)
コード例 #30
0
 def addfile(fname):
     myblob = Blob()
     b = myblob.open('w')
     o = open(fname)
     data = o.read()
     b.write(data)
     print b.name
     b.close()
     return myblob