Beispiel #1
0
 def setUp(self):
     super(TestBlobMixin, self).setUp()
     setUp(self)
     self._timer = MonotonicallyIncreasingTimeLayerMixin()
     self._timer.testSetUp()
     try:
         self.blob_storage = self.create_storage(
             **self.DEFAULT_BLOB_STORAGE_KWARGS)
     except:  # pragma: no cover
         # If setUp() raises an exception, tearDown is never called.
         # That's bad: ZODB.tests.util.setUp() changes directories and
         # monkeys with the contents of the stdlib tempfile.
         tearDown(self)
         raise
     self.database = DB(self.blob_storage)
    def checkConcurrentUpdates2Storages_emulated(self):
        self._storage = storage1 = self.openClientStorage()
        storage2 = self.openClientStorage()
        db1 = DB(storage1)
        db2 = DB(storage2)

        cn = db1.open()
        tree = cn.root()["tree"] = OOBTree()
        transaction.commit()
        # DM: allow time for invalidations to come in and process them
        time.sleep(0.1)

        # Run two threads that update the BTree
        t1 = StressTask(db1, 1, 1,)
        t2 = StressTask(db2, 2, 2,)
        _runTasks(100, t1, t2)

        cn.sync()
        self._check_tree(cn, tree)
        self._check_threads(tree, t1, t2)

        cn.close()
        db1.close()
        db2.close()
Beispiel #3
0
    def _populate_root_and_mapping(self):
        tx1 = transaction.TransactionManager()
        storage1 = self._storage
        db1 = self._closing(DB(storage1))
        c1 = db1.open(tx1)
        root = c1.root()
        root.myobj1 = mapping = PersistentMapping()
        root.myobj = 1
        tx1.commit()
        c1._storage._cache.clear(load_persistent=False)

        c1._storage.poll_invalidations()
        root.myobj = 2
        tx1.commit()
        c1._storage._cache.clear(load_persistent=False)

        c1._storage.poll_invalidations()
        root.myobj = 3
        tx1.commit()
        root_tid = self.assert_oid_known(ROOT_OID, c1)
        c1._storage._cache.clear(load_persistent=False)

        # Now, mutate an object that's not the root
        # so that we get a new transaction after the root was
        # modified. This transaction will be included in
        # a persistent cache.
        c1._storage.poll_invalidations()
        root.myobj1.key = PersistentMapping()
        mapping_oid = mapping._p_oid
        mapping_oid_int = bytes8_to_int64(mapping_oid)
        tx1.commit()
        mapping_tid = self.assert_oid_known(mapping_oid_int, c1)

        self.assert_checkpoints(c1, (root_tid, root_tid))
        self.assert_oid_current(mapping_oid_int, c1)

        # the root is not in a delta
        self.assert_oid_not_known(ROOT_OID, c1)
        # Nor is it in the cache, because the Connection's
        # object cache still had the root and we were never
        # asked.
        self.assert_oid_not_cached(ROOT_OID, c1)
        # So lets get it in the cache with its current TID.
        c1._storage.load(z64)
        self.assert_cached_exact(ROOT_OID, root_tid, c1)

        c1.close()
        return root_tid, mapping_tid, db1
Beispiel #4
0
    def checkBTreesLengthStress(self):
        # BTrees.Length objects are unusual Persistent objects: they
        # set _p_independent and they frequently invoke conflict
        # resolution. Run a stress test on them.
        updates_per_thread = 50
        thread_count = 4

        from BTrees.Length import Length
        db = DB(self._storage)
        try:
            c = db.open()
            try:
                c.root()['length'] = Length()
                transaction.commit()
            finally:
                c.close()

            def updater():
                thread_db = DB(self._storage)
                for i in range(updates_per_thread):
                    thread_c = thread_db.open()
                    try:
                        thread_c.root()['length'].change(1)
                        time.sleep(random.random() * 0.05)
                        transaction.commit()
                    finally:
                        thread_c.close()

            import threading
            threads = []
            for i in range(thread_count):
                t = threading.Thread(target=updater)
                threads.append(t)
            for t in threads:
                t.start()
            for t in threads:
                t.join(120)

            c = db.open()
            try:
                self.assertEqual(c.root()['length'](),
                                 updates_per_thread * thread_count)
            finally:
                transaction.abort()
                c.close()

        finally:
            db.close()
Beispiel #5
0
    def checkBackwardTimeTravelWithoutRevertWhenStale(self):
        # If revert_when_stale is false (the default), when the database
        # connection is stale (such as through failover to an
        # asynchronous slave that is not fully up to date), the poller
        # should notice that backward time travel has occurred and
        # raise a ReadConflictError.
        self._storage = self.make_storage(revert_when_stale=False)

        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage
        db = DB(self._storage)
        try:
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                fs = FileStorage(os.path.join(d, 'Data.fs'))
                fs.copyTransactionsFrom(c._storage)

                r['beta'] = PersistentMapping()
                transaction.commit()
                self.assertTrue('beta' in r)

                c._storage.zap_all(reset_oid=False)
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # Sync, which will call poll_invalidations().
            c.sync()

            # Try to load an object, which should cause ReadConflictError.
            r._p_deactivate()
            self.assertRaises(ReadConflictError, lambda: r['beta'])

        finally:
            db.close()
Beispiel #6
0
def main():
    logging.basicConfig(
        stream=sys.stderr,
        level=logging.DEBUG,
        format='%(asctime)s [%(name)s] %(levelname)s %(message)s')
    log.info("Opening")
    adapter = PostgreSQLAdapter()
    storage = RelStorage(adapter)
    db = DB(storage)
    log.info("Filling")
    fill_db(db)
    log.info("Packing")
    start = time.time()
    db.pack()
    end = time.time()
    log.info("Packed in %0.3f seconds", end - start)
Beispiel #7
0
def main(argv=sys.argv):
    if len(argv) != 2:
        usage(argv)
    config_uri = argv[1]
    setup_logging(config_uri)
    settings = get_appsettings(config_uri)
    #engine = engine_from_config(settings, 'sqlalchemy.')
    #DBSession.configure(bind=engine)
    #Base.metadata.create_all(engine)
    storage = FileStorage(settings['zodbconn.file'])
    db = DB(storage)
    conn = db.open()
    zodb_root = conn.root()
    with transaction.manager:
        populateDB(zodb_root, settings)
        transaction.commit()
Beispiel #8
0
    def testUndoWithoutPreviousVersion(self):
        base_storage = FileStorage(self.storagefile)
        blob_storage = BlobStorage(self.blob_dir, base_storage)
        database = DB(blob_storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        root['blob'] = Blob()
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        # the blob footprint object should exist no longer
        self.assertRaises(KeyError, root.__getitem__, 'blob')
        database.close()
Beispiel #9
0
    def checkHistoricalConnection(self):
        import datetime
        import persistent
        import ZODB.POSException
        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root['first'] = persistent.mapping.PersistentMapping(count=0)
        transaction.commit()

        time.sleep(.02)
        now = datetime.datetime.utcnow()
        time.sleep(.02)

        root['second'] = persistent.mapping.PersistentMapping()
        root['first']['count'] += 1
        transaction.commit()

        transaction1 = transaction.TransactionManager()

        historical_conn = db.open(transaction_manager=transaction1, at=now)

        eq = self.assertEqual

        # regular connection sees present:

        eq(sorted(conn.root().keys()), ['first', 'second'])
        eq(conn.root()['first']['count'], 1)

        # historical connection sees past:

        eq(sorted(historical_conn.root().keys()), ['first'])
        eq(historical_conn.root()['first']['count'], 0)

        # Can't change history:

        historical_conn.root()['first']['count'] += 1
        eq(historical_conn.root()['first']['count'], 1)
        self.assertRaises(ZODB.POSException.ReadOnlyHistoryError,
                          transaction1.commit)
        transaction1.abort()
        eq(historical_conn.root()['first']['count'], 0)

        historical_conn.close()
        conn.close()
        db.close()
Beispiel #10
0
    def checkCorruptionInPack(self):
        # This sets up a corrupt .fs file, with a redundant transaction
        # length mismatch.  The implementation of pack in many releases of
        # ZODB blew up if the .fs file had such damage:  it detected the
        # damage, but the code to raise CorruptedError referenced an undefined
        # global.
        import time

        from ZODB.DB import DB
        from ZODB.utils import U64, p64
        from ZODB.FileStorage.format import CorruptedError

        db = DB(self._storage)
        conn = db.open()
        conn.root()['xyz'] = 1
        transaction.commit()

        # Ensure it's all on disk.
        db.close()
        self._storage.close()

        # Reopen before damaging.
        self.open()

        # Open .fs directly, and damage content.
        f = open('FileStorageTests.fs', 'r+b')
        f.seek(0, 2)
        pos2 = f.tell() - 8
        f.seek(pos2)
        tlen2 = U64(f.read(8))  # length-8 of the last transaction
        pos1 = pos2 - tlen2 + 8 # skip over the tid at the start
        f.seek(pos1)
        tlen1 = U64(f.read(8))  # should be redundant length-8
        self.assertEqual(tlen1, tlen2)  # verify that it is redundant

        # Now damage the second copy.
        f.seek(pos2)
        f.write(p64(tlen2 - 1))
        f.close()

        # Try to pack.  This used to yield
        #     NameError: global name 's' is not defined
        try:
            self._storage.pack(time.time(), None)
        except CorruptedError, detail:
            self.assert_("redundant transaction length does not match "
                         "initial transaction length" in str(detail))
Beispiel #11
0
    def checkIsolationLevels(self):
        def assert_storage(storage):
            load_cur = storage._load_connection.cursor
            store_cur = storage._store_connection.cursor
            version_detector = storage._adapter.version_detector
            if not version_detector.supports_transaction_isolation(load_cur):
                raise unittest.SkipTest(
                    "Needs MySQL better than %s" %
                    (version_detector.get_version(load_cur)))

            for cur, ex_iso, ex_ro, ex_timeout in (
                    # Timeout for load is mysql default.
                [load_cur, 'REPEATABLE-READ', True, 50],
                [
                    store_cur, 'READ-COMMITTED', False,
                    self.DEFAULT_COMMIT_LOCK_TIMEOUT
                ],
            ):
                cur.execute("""
                SELECT @@transaction_isolation,
                       @@transaction_read_only,
                       @@innodb_lock_wait_timeout
                """)
                row, = cur.fetchall()
                iso, ro, timeout = row
                __traceback_info__ = row
                iso = iso.decode('ascii') if not isinstance(iso, str) else iso
                self.assertEqual(iso, ex_iso)
                self.assertEqual(ro, ex_ro)
                self.assertEqual(timeout, ex_timeout)

        # By default
        assert_storage(self._storage)

        # In a new instance, and after we do a transaction with it.
        from ZODB.DB import DB
        import transaction

        db = self._closing(DB(self._storage))
        conn = self._closing(db.open())
        assert_storage(conn._storage)

        conn.root()['obj'] = 1
        transaction.commit()

        assert_storage(conn._storage)
Beispiel #12
0
def ZODBDatabaseConfigurationFactory(key, dbconfig):
    config = dbconfig.get('configuration', {})
    fs = ZODB.FileStorage.FileStorage(dbconfig['path'])
    db = DB(fs)
    try:
        rootobj = db.open().root()
        if not IDatabase.providedBy(rootobj):
            alsoProvides(rootobj, IDatabase)
        transaction.commit()
        rootobj = None
    except:
        pass
    finally:
        db.close()
    # Set request aware database for app
    db = RequestAwareDB(dbconfig['path'], **config)
    return Database(key, db)
Beispiel #13
0
    def __store_two_for_read_current_error(self):
        db = self._closing(DB(self._storage))
        conn = db.open()
        root = conn.root()
        root['object1'] = MinPO('object1')
        root['object2'] = MinPO('object2')
        transaction.commit()

        obj1_oid = root['object1']._p_oid
        obj2_oid = root['object2']._p_oid
        obj1_tid = root['object1']._p_serial
        assert obj1_tid == root['object2']._p_serial

        conn.close()
        # We can't close the DB, that will close the storage that we
        # still need.
        return obj1_oid, obj2_oid, obj1_tid, db
Beispiel #14
0
    def test_do_not_depend_on_cwd(self):
        bs = self.blob_storage
        here = os.getcwd()
        os.mkdir('evil')
        os.chdir('evil')
        db = DB(bs)
        conn = db.open()
        conn.root()['blob'] = ZODB.blob.Blob()
        with conn.root()['blob'].open('w') as f:
            f.write(b'data')
        transaction.commit()
        os.chdir(here)
        with conn.root()['blob'].open() as f:
            data = f.read()
        self.assertEqual(data, b'data')

        bs.close()
Beispiel #15
0
 def checkDoubleCommitter(self):
     # Verify we can store an object that gets committed twice in
     # a single transaction.
     db = DB(self._storage)
     try:
         conn = db.open()
         try:
             conn.root()['dc'] = DoubleCommitter()
             transaction.commit()
             conn2 = db.open()
             self.assertEquals(conn2.root()['dc'].new_attribute, 1)
             conn2.close()
         finally:
             transaction.abort()
             conn.close()
     finally:
         db.close()
Beispiel #16
0
 def checkHistoryWithExtension(self):
     # Verify the history method works with transactions that have
     # extended info.
     db = DB(self._storage)
     try:
         conn = db.open()
         try:
             conn.root()['pi'] = 3.14
             transaction.get().setExtendedInfo("digits", 3)
             transaction.commit()
             history = self._storage.history(conn.root()._p_oid)
             self.assertEqual(len(history), 1)
             if self.keep_history:
                 self.assertEqual(history[0]['digits'], 3)
         finally:
             conn.close()
     finally:
         db.close()
Beispiel #17
0
    def __init__(self, filename, appname, **kw):
        """ ``filename`` is a filename to the FileStorage storage,
        ``appname`` is a key name in the root of the FileStorage in
        which to store the catalog, and ``**kw`` is passed as extra
        keyword arguments to :class:`ZODB.DB.DB` when creating a
        database.  Note that when we create a :class:`ZODB.DB.DB`
        instance, if a ``cache_size`` is not passed in ``*kw``, we
        override the default ``cache_size`` value with ``50000`` in
        order to provide a more realistic cache size for modern apps"""
        cache_size = kw.get('cache_size')
        if cache_size is None:
            kw['cache_size'] = 50000

        from ZODB.FileStorage.FileStorage import FileStorage
        from ZODB.DB import DB
        f = FileStorage(filename)
        self.db = DB(f, **kw)
        self.appname = appname
Beispiel #18
0
def createDatabase():
    # XXX We have to import and init products in order for PluginIndexes to
    # be registered.
    OFS.Application.import_products()

    # Create a DemoStorage and put an Application in it
    db = DB(DemoStorage())
    conn = db.open()
    root = conn.root()
    app = OFS.Application.Application()
    root['Application'] = app
    transaction.commit()

    # Init products
    #OFS.Application.initialize(app)
    OFS.Application.install_products(app)  # XXX: this is still icky

    return app
 def createTestData(cls):
     storage = FileStorage(cls.data_fs)
     db = DB(storage)
     # Create root folder and all that jazz
     bootStrapSubscriber(DatabaseOpened(db))
     connection = db.open()
     root = connection.root()
     root_folder = root[ZopePublication.root_name]
     transaction.get().note(u"setUp creating root folder")
     transaction.commit()
     # This is not a great way to set up test fixtures, but it'll do
     # for now
     cls.createTestDataForBrowsing(root_folder)
     cls.createTestDataForRollbacking(root_folder)
     cls.createTestDataForRollbackCanBeCancelled(root_folder)
     cls.createTestDataForImplementsOnly(root_folder)
     cls.createTestDataForTruncation(root_folder)
     connection.close()
     db.close()
Beispiel #20
0
    def testUndo(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        transaction.begin()
        blob = Blob()
        blob.open('w').write('this is state 1')
        root['blob'] = blob
        transaction.commit()

        transaction.begin()
        blob = root['blob']
        blob.open('w').write('this is state 2')
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()
        self.assertEqual(blob.open('r').read(), 'this is state 1')

        database.close()
Beispiel #21
0
    def checkCrossConnectionInvalidation(self):
        # Verify connections see updated state at txn boundaries.
        # This will fail if the Connection doesn't poll for changes.
        db = DB(self._storage)
        try:
            c1 = db.open(transaction.TransactionManager())
            r1 = c1.root()
            r1['myobj'] = 'yes'
            c2 = db.open(transaction.TransactionManager())
            r2 = c2.root()
            self.assertTrue('myobj' not in r2)

            c1.transaction_manager.commit()
            self.assertTrue('myobj' not in r2)

            c2.sync()
            self.assertTrue('myobj' in r2)
            self.assertTrue(r2['myobj'] == 'yes')
        finally:
            db.close()
Beispiel #22
0
    def __store_two_for_read_current_error(self, release_extra_storage=False):
        db = self._closing(DB(self._storage, pool_size=1))
        conn = db.open()
        root = conn.root()
        root['object1'] = MinPO('object1')
        root['object2'] = MinPO('object2')
        transaction.commit()

        obj1_oid = root['object1']._p_oid
        obj2_oid = root['object2']._p_oid
        obj1_tid = root['object1']._p_serial
        assert obj1_tid == root['object2']._p_serial

        conn.close()
        # We can't close the DB, that will close the storage that we
        # still need. But we can release its storage, since we'll never use
        # this again.
        if release_extra_storage:
            conn._normal_storage.release()
        return obj1_oid, obj2_oid, obj1_tid, db
Beispiel #23
0
    def checkUseCache(self):
        # Store an object, cache it, then retrieve it from the cache
        self._storage = self.make_storage(
            cache_servers='x:1 y:2',
            cache_module_name=fakecache.__name__,
            cache_prefix='zzz',
        )

        fakecache.data.clear()
        db = DB(self._storage)
        try:
            c1 = db.open()
            self.assertEqual(
                c1._storage._cache.clients_global_first[0].servers,
                ['x:1', 'y:2'])
            r1 = c1.root()
            # The root state and checkpoints should now be cached.
            # A commit count *might* be cached depending on the ZODB version.
            self.assertTrue('zzz:checkpoints' in fakecache.data)
            self.assertEqual(
                sorted(fakecache.data.keys())[-1][:10], 'zzz:state:')
            r1['alpha'] = PersistentMapping()
            transaction.commit()
            self.assertEqual(len(fakecache.data.keys()), 5)

            oid = r1['alpha']._p_oid
            got, serial = c1._storage.load(oid, '')
            # another state should now be cached
            self.assertEqual(len(fakecache.data.keys()), 5)

            # make a change
            r1['beta'] = 0
            transaction.commit()
            self.assertEqual(len(fakecache.data.keys()), 6)

            got, serial = c1._storage.load(oid, '')

            # try to load an object that doesn't exist
            self.assertRaises(KeyError, c1._storage.load, 'bad.oid.', '')
        finally:
            db.close()
Beispiel #24
0
    def checkNonASCIITransactionMetadata(self):
        # Verify the database stores and retrieves non-ASCII text
        # in transaction metadata.
        ugly_string = ''.join(chr(c) for c in range(256))

        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['alpha'] = 1
            transaction.get().setUser(ugly_string)
            transaction.commit()
            r1['alpha'] = 2
            transaction.get().note(ugly_string)
            transaction.commit()

            info = self._storage.undoInfo()
            self.assertEqual(info[0]['description'], ugly_string)
            self.assertEqual(info[1]['user_name'], '/ ' + ugly_string)
        finally:
            db.close()
Beispiel #25
0
 def testSimpleBlobRecovery(self):
     self.assert_(
         ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage))
     db = DB(self._storage)
     conn = db.open()
     conn.root()[1] = ZODB.blob.Blob()
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     conn.root()[2].open('w').write('some data')
     transaction.commit()
     conn.root()[3] = ZODB.blob.Blob()
     conn.root()[3].open('w').write((''.join(
         struct.pack(">I", random.randint(0, (1 << 32) - 1))
         for i in range(random.randint(10000, 20000)))
                                     )[:-random.randint(1, 4)])
     transaction.commit()
     conn.root()[2] = ZODB.blob.Blob()
     conn.root()[2].open('w').write('some other data')
     transaction.commit()
     self._dst.copyTransactionsFrom(self._storage)
     self.compare(self._storage, self._dst)
Beispiel #26
0
    def testRedoOfCreation(self):
        database = DB(self._storage)
        connection = database.open()
        root = connection.root()
        blob = Blob()

        transaction.begin()
        blob.open('w').write('this is state 1')
        root['blob'] = blob
        transaction.commit()

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertRaises(KeyError, root.__getitem__, 'blob')

        database.undo(database.undoLog(0, 1)[0]['id'])
        transaction.commit()

        self.assertEqual(blob.open('r').read(), 'this is state 1')

        database.close()
Beispiel #27
0
    def check_index_oid_ignored(self):
        # Prior to ZODB 3.2.6, the 'oid' value stored in the .index file
        # was believed.  But there were cases where adding larger oids
        # didn't update the FileStorage ._oid attribute -- the restore()
        # method in particular didn't update it, and that's about the only
        # method copyTransactionsFrom() uses.  A database copy created that
        # way then stored an 'oid' of z64 in the .index file.  This created
        # torturous problems, as when that file was opened, "new" oids got
        # generated starting over from 0 again.
        # Now the cached 'oid' value is ignored:  verify that this is so.
        import cPickle as pickle
        from ZODB.utils import z64
        from ZODB.DB import DB

        # Create some data.
        db = DB(self._storage)
        conn = db.open()
        conn.root()['xyz'] = 1
        transaction.commit()
        true_max_oid = self._storage._oid

        # Save away the index, and poke in a bad 'oid' value by hand.
        db.close()
        f = open('FileStorageTests.fs.index', 'r+b')
        p = pickle.Unpickler(f)
        data = p.load()
        saved_oid = data['oid']
        self.assertEqual(true_max_oid, saved_oid)
        data['oid'] = z64
        f.seek(0)
        f.truncate()
        p = pickle.Pickler(f, 1)
        p.dump(data)
        f.close()

        # Verify that we get the correct oid again when we reopen, despite
        # that we stored nonsense in the .index file's 'oid'.
        self.open()
        self.assertEqual(self._storage._oid, true_max_oid)
Beispiel #28
0
    def indexer(self, **kwargs):
        from ZODB.FileStorage import FileStorage  # @UnresolvedImport
        from ZODB.DB import DB  # @UnresolvedImport
        from zcatalog import catalog  # @UnresolvedImport
        import transaction  # @UnresolvedImport

        dir = os.path.join(self.options.dir,
                           "%s_zcatalog" % self.options.indexname)
        if os.path.exists(dir):
            rmtree(dir)
        os.mkdir(dir)

        storage = FileStorage(os.path.join(dir, "index"))
        db = DB(storage)
        conn = db.open()

        self.cat = catalog.Catalog()
        self.bench.spec.zcatalog_setup(self.cat)
        conn.root()["cat"] = self.cat
        transaction.commit()

        self.zcatalog_count = 0
Beispiel #29
0
    def setUp(self):
        super(BasePublicationTests, self).setUp()
        from zope.security.management import endInteraction
        endInteraction()
        ztapi.provideAdapter(IHTTPRequest, IUserPreferredCharsets,
                             HTTPCharsets)
        self.policy = setSecurityPolicy(
            simplepolicies.PermissiveSecurityPolicy)
        self.storage = DemoStorage('test_storage')
        self.db = db = DB(self.storage)

        ztapi.provideUtility(IAuthentication, principalRegistry)

        connection = db.open()
        root = connection.root()
        app = getattr(root, ZopePublication.root_name, None)

        if app is None:
            from zope.app.folder import rootFolder
            app = rootFolder()
            root[ZopePublication.root_name] = app
            transaction.commit()

        connection.close()
        self.app = app

        from zope.traversing.namespace import view, resource, etc
        ztapi.provideNamespaceHandler('view', view)
        ztapi.provideNamespaceHandler('resource', resource)
        ztapi.provideNamespaceHandler('etc', etc)

        self.request = TestRequest('/f1/f2')
        self.user = Principal('test.principal')
        self.request.setPrincipal(self.user)
        from zope.interface import Interface
        self.presentation_type = Interface
        self.request._presentation_type = self.presentation_type
        self.object = object()
        self.publication = ZopePublication(self.db)
Beispiel #30
0
    def test_visit_sublocations_check_class_only_not_activate(self):
        from ZODB.DB import DB
        from ZODB.DemoStorage import DemoStorage
        import transaction
        from zope.interface import alsoProvides

        cat = self._makeOne()
        content = PersistentContent()
        alsoProvides(content, INoAutoIndex)
        cat.mock_catalog_data.append((4, content))

        db = DB(DemoStorage())
        self.addCleanup(db.close)
        transaction.begin()
        conn = db.open()
        self.addCleanup(conn.close)
        conn.root.cat = cat
        transaction.commit()

        transaction.begin()
        conn.cacheMinimize()
        assert_that(conn.root.cat.mock_catalog_data[1][1], is_(NoIndexContent))
        assert_that(conn.root.cat.mock_catalog_data[1][1]._p_status,
                    is_('ghost'))
        assert_that(conn.root.cat.mock_catalog_data[-1][1],
                    is_(PersistentContent))
        assert_that(conn.root.cat.mock_catalog_data[-1][1]._p_status,
                    is_('ghost'))

        locs = list(cat._visitSublocations())
        assert_that(locs, has_length(1))
        assert_that(locs[0], contains(1, is_(Content)))
        # Still a ghost
        assert_that(conn.root.cat.mock_catalog_data[1][1]._p_status,
                    is_('ghost'))
        # But the one that alsoProvides() had to wake up
        assert_that(conn.root.cat.mock_catalog_data[-1][1]._p_status,
                    is_('saved'))