Exemplo n.º 1
0
    def _populate_root_and_mapping(self):
        """
        Creates the following structure in ``self._storage``::

            root.myobj1 = PersistentMapping()
            root.myobj1.key = PersistentMapping()
            root.myobj = 3

        Does this over several transactions. Returns
        the tid of the last time the root changed, and the tid
        of ``root.myobj1``, which is later than the root TID and which
        is current, and the database opened on the storage.
        """
        tx1 = transaction.TransactionManager()
        storage1 = self._storage
        db1 = self._closing(DB(storage1))
        c1 = db1.open(tx1)
        root = c1.root
        root().myobj1 = root.myobj1 = mapping = PersistentMapping()
        root().myobj = root.myobj = 1
        tx1.commit()
        c1._storage._cache.clear(load_persistent=False)

        c1._storage.poll_invalidations()
        root().myobj = root.myobj = 2
        tx1.commit()
        c1._storage._cache.clear(load_persistent=False)

        c1._storage.poll_invalidations()
        root().myobj = root.myobj = 3
        tx1.commit()
        root_tid = self.assert_oid_known(ROOT_OID, c1)
        c1._storage._cache.clear(load_persistent=False)

        # Now, mutate an object that's not the root
        # so that we get a new transaction after the root was
        # modified. This transaction will be included in
        # a persistent cache.
        c1._storage.poll_invalidations()
        root().myobj1.key = root.myobj1.key = PersistentMapping()
        mapping_oid = mapping._p_oid
        mapping_oid_int = bytes8_to_int64(mapping_oid)
        tx1.commit()
        mapping_tid = self.assert_oid_known(mapping_oid_int, c1)

        # self.assert_checkpoints(c1, (root_tid, root_tid))
        self.assert_oid_current(mapping_oid_int, c1)

        # the root is not in a delta
        self.assert_oid_not_known(ROOT_OID, c1)
        # Nor is it in the cache, because the Connection's
        # object cache still had the root and we were never
        # asked.
        self.assert_oid_not_cached(ROOT_OID, c1)
        # So lets get it in the cache with its current TID.
        c1._storage.load(z64)
        self.assert_cached_exact(ROOT_OID, root_tid, c1)

        c1.close()
        return root_tid, mapping_tid, db1
def tileCreated(tile, event):
    # avoid attributes acquisition
    context = aq_base(event.newParent)
    tile_id = event.newName
    if not context:
        return

    managerId = getManagerId(tile)

    new_tile = PersistentMapping()
    new_tile['tile_id'] = tile_id
    new_tile['tile_hidden'] = False
    new_tile['tile_style'] = ''

    try:
        tile_type = re.search('@@(.*?)/', tile.url).group(1)
    except AttributeError:
        tile_type = ''
    if tile_type:
        new_tile['tile_type'] = tile_type

    # store tiles_order in persistent object attribute.
    if not getattr(context, 'tiles_list', {}):
        context.tiles_list = PersistentMapping()
    if managerId not in context.tiles_list:
        context.tiles_list[managerId] = PersistentList()
    context.tiles_list[managerId].append(new_tile)
Exemplo n.º 3
0
def change_pools_autonaming_scheme(root, registry):  # pragma: no cover
    """Change pool autonaming scheme."""
    prefixes = _get_autonaming_prefixes(registry)
    catalogs = find_service(root, 'catalogs')
    pools = _search_for_interfaces(catalogs, (IPool, IFolder))
    count = len(pools)
    for index, pool in enumerate(pools):
        logger.info('Migrating {0} of {1}: {2}'.format(index + 1, count, pool))
        if not pool:
            continue
        if hasattr(pool, '_autoname_last'):
            pool._autoname_lasts = PersistentMapping()
            for prefix in prefixes:
                pool._autoname_lasts[prefix] = Length(pool._autoname_last + 1)
            del pool._autoname_last
        elif not hasattr(pool, '_autoname_lasts'):
            pool._autoname_lasts = PersistentMapping()
            for prefix in prefixes:
                pool._autoname_lasts[prefix] = Length()
        if hasattr(pool, '_autoname_lasts'):
            # convert int to Length
            for prefix in pool._autoname_lasts.keys():
                if isinstance(pool._autoname_lasts[prefix], int):
                    pool._autoname_lasts[prefix] \
                        = Length(pool._autoname_lasts[prefix].value)
                elif isinstance(pool._autoname_lasts[prefix].value, Length):
                    pool._autoname_lasts[prefix] = Length(1)
            # convert dict to PersistentMapping
            if not isinstance(pool._autoname_lasts, PersistentMapping):
                pool._autoname_lasts = PersistentMapping(pool._autoname_lasts)
Exemplo n.º 4
0
    def _create_initial_state(self):
        # Given a set of referencing objects present at the beginning
        # of the pre pack:
        #          0    1    2    3
        #   T1: root -> A -> B -> C
        #
        # If a new transaction is committed such that the graph becomes:
        #
        #         0    1
        #  T2: root -> A
        #          \-> B -> D -> C
        #              2    4    3
        #
        # That is, C is no longer referenced from B but a new object
        # D, B is referenced not from A but from the root.
        txm = transaction.TransactionManager(explicit=True)
        conn = self.main_db.open(txm)
        txm.begin()

        A = conn.root.A = PersistentMapping()  # OID 0x1
        B = A['B'] = PersistentMapping()  # OID 0x2
        C = B['C'] = PersistentMapping()  # OID 0x3

        txm.commit()
        oids = {
            'A': A._p_oid,
            'B': B._p_oid,
            'C': C._p_oid,
        }
        conn.close()

        return oids
Exemplo n.º 5
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            A_B = PersistentMapping()
            r1['A']['B'] = A_B
            transaction.get().note(u'add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note(u'remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note(u'add C (non-persistent) to A')
            transaction.commit()

            packtime = c1._storage.lastTransactionInt()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            with self.assertRaises(KeyError):
                __traceback_info__ = bytes8_to_int64(A_B._p_oid)
                self._storage.load(A_B._p_oid)

        finally:
            db.close()
Exemplo n.º 6
0
    def checkPackKeepNewObjects(self):
        # Packing should not remove objects created or modified after
        # the pack time, even if they are unreferenced.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            extra1 = PersistentMapping()
            c.add(extra1)
            extra2 = PersistentMapping()
            c.add(extra2)
            transaction.commit()

            # Choose the pack time
            now = packtime = time.time()
            while packtime <= now:
                time.sleep(0.1)
                packtime = time.time()
            while packtime == time.time():
                time.sleep(0.1)

            extra2.foo = 'bar'
            extra3 = PersistentMapping()
            c.add(extra3)
            transaction.commit()

            self._storage.pack(packtime, referencesf)

            # extra1 should have been garbage collected
            self.assertRaises(KeyError, self._storage.load, extra1._p_oid, '')
            # extra2 and extra3 should both still exist
            self._storage.load(extra2._p_oid, '')
            self._storage.load(extra3._p_oid, '')
        finally:
            db.close()
Exemplo n.º 7
0
    def checkPackOldUnreferenced(self):
        db = DB(self._storage)
        try:
            c1 = db.open()
            r1 = c1.root()
            r1['A'] = PersistentMapping()
            B = PersistentMapping()
            r1['A']['B'] = B
            transaction.get().note('add A then add B to A')
            transaction.commit()

            del r1['A']['B']
            transaction.get().note('remove B from A')
            transaction.commit()

            r1['A']['C'] = ''
            transaction.get().note('add C to A')
            transaction.commit()

            now = packtime = time.time()
            while packtime <= now:
                packtime = time.time()
            self._storage.pack(packtime, referencesf)

            # B should be gone, since nothing refers to it.
            self.assertRaises(KeyError, self._storage.load, B._p_oid, '')

        finally:
            db.close()
Exemplo n.º 8
0
 def __init__(
     self,
     firstname='',
     lastname='',
     email='',
     phone='',
     extension='',
     fax='',
     department='',
     position='',
     organization='',
     industry='',
     location='',
     country='US',
     websites=None,
     languages='',
     office='',
     room_no='',
     biography='',
     date_format='en-US',
     data=None,
     home_path=None,
     preferred_communities=None,
     two_factor_phone='',
     two_factor_verified=False,
 ):
     super(Profile, self).__init__(data)
     self.firstname = firstname
     self.lastname = lastname
     self.email = email
     self.phone = phone
     self.fax = fax
     self.extension = extension
     self.department = department
     self.position = position
     self.organization = organization
     self.industry = industry
     self.location = location
     if country not in countries.as_dict:
         country = 'XX'
     self.country = country
     if websites is not None:
         self.websites = websites
     self.languages = languages
     self.office = office
     self.room_no = room_no
     self.biography = biography
     if date_format not in cultures.as_dict:
         date_format = None
     self.date_format = date_format
     self.home_path = home_path
     self._alert_prefs = PersistentMapping()
     self._pending_alerts = Accumulator()
     self.categories = PersistentMapping()
     self.password_reset_key = None
     self.password_reset_time = None
     self.preferred_communities = preferred_communities
     self.last_login_time = None
     self.two_factor_phone = two_factor_phone
     self.two_factor_verified = two_factor_verified
Exemplo n.º 9
0
    def test_get_url_private_expired(self):
        # this is creating a bucket in the moto/mock s3 service
        s3conn = boto3.resource('s3')
        s3conn.create_bucket(Bucket='castletest')

        fileOb = upload_file_to_castle(self)
        aws.move_file(fileOb)
        fileOb = api.content.get(path='/file-repository/foobar.bin')

        # move the generated further into the past
        annotations = IAnnotations(fileOb)
        info = annotations.get(aws.STORAGE_KEY, PersistentMapping())
        newgeneratedon = time() - aws.EXPIRES_IN - 1000
        info.update({
            'generated_on': newgeneratedon,
        })
        annotations[aws.STORAGE_KEY] = info

        resulturl = aws.get_url(fileOb)
        self.assertTrue(resulturl.startswith(self.test_base_url))

        fileOb = api.content.get(path='/file-repository/foobar.bin')
        annotations = IAnnotations(fileOb)
        info = annotations.get(aws.STORAGE_KEY, PersistentMapping())
        self.assertNotEqual(info["generated_on"], newgeneratedon)
        self.assertEqual(info["expires_in"], aws.EXPIRES_IN)
Exemplo n.º 10
0
def bootstrap(zodb_root):
    if 'my_zodb' not in zodb_root:
        root = Root('firstpyramid')
        root['users'] = PersistentMapping()
        root['images'] = PersistentMapping()
        zodb_root['my_zodb'] = root
        transaction.commit()
    return zodb_root['my_zodb']
Exemplo n.º 11
0
 def test_non_ascii_zoid(self):
     root = self.root
     for i in range(200):
         self.conn.add(PersistentMapping())
     root.x = PersistentMapping()
     self.commit()
     _ = self.load()
     _ = self.load()
Exemplo n.º 12
0
 def db_setup_tickets(self, root):
     # Tickets
     root.ticket_pools = PersistentMapping()
     # Payments
     root.payments = PersistentMapping()
     # Queue
     root.queue = PersistentList()
     root.active = PersistentMapping()
Exemplo n.º 13
0
 def __init__(self, filename='data/metadatad.db'):
     storage = FileStorage(filename)
     self.db = DB(storage)
     self.connection = self.db.open()
     self.root = self.connection.root()
     self.files = self.root.setdefault(File.table, PersistentMapping())
     self.tracks = self.root.setdefault(Track.table, PersistentMapping())
     self.artists = self.root.setdefault(Artist.table, PersistentMapping())
     self.roots = self.root.setdefault(Root.table, PersistentMapping())
Exemplo n.º 14
0
 def populate(self):
     transaction.begin()
     conn = self._db.open()
     root = conn.root()
     root['test'] = pm = PersistentMapping()
     for n in range(100):
         pm[n] = PersistentMapping({0: 100 - n})
     transaction.get().note('created test data')
     transaction.commit()
     conn.close()
Exemplo n.º 15
0
def migration_infos(context):
    path = '/'.join(context.getPhysicalPath())
    purl = getToolByName(context, 'portal_url')
    pobj = purl.getPortalObject()
    annotations = IAnnotations(pobj)
    if not PRODUCT in annotations:
        annotations[PRODUCT] = PersistentMapping()
    if not path in annotations[PRODUCT]:
        annotations[PRODUCT][path] = PersistentMapping()
    return annotations[PRODUCT][path]
Exemplo n.º 16
0
    def checkBackwardTimeTravelWithRevertWhenStale(self):
        # If revert_when_stale is true, when the database
        # connection is stale (such as through failover to an
        # asynchronous slave that is not fully up to date), the poller
        # should notice that backward time travel has occurred and
        # invalidate all objects that have changed in the interval.
        self._storage = self.make_storage(revert_when_stale=True)

        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage
        db = DB(self._storage)
        try:
            transaction.begin()
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                transaction.begin()
                fs = FileStorage(os.path.join(d, 'Data.fs'))
                fs.copyTransactionsFrom(c._storage)

                r['beta'] = PersistentMapping()
                transaction.commit()
                self.assertTrue('beta' in r)

                c._storage.zap_all(reset_oid=False, slow=True)
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # r should still be in the cache.
            self.assertTrue('beta' in r)

            # Now sync, which will call poll_invalidations().
            c.sync()

            # r should have been invalidated
            self.assertEqual(r._p_changed, None)

            # r should be reverted to its earlier state.
            self.assertFalse('beta' in r)

        finally:
            db.close()
Exemplo n.º 17
0
 def __init__(self):
     self.name = "New Method"
     self.short_name = "New Method"
     self.description = ""
     self.__name__ = Coding().generateUniqueCode(short=True, withdash=False)
     self.settings = PersistentMapping()
     self.enabled = False
     self.public = True
     self.deadlined = False
     self.transaction_properties = PersistentMapping()
     self.groups = PersistentList()
Exemplo n.º 18
0
def set_meeting_item_attendee_position(meeting, item_uid, hp_uid,
                                       position_type):
    """ """
    updated = False
    item_attendees_positions = meeting.item_attendees_positions.get(
        item_uid, PersistentMapping())
    if hp_uid not in item_attendees_positions.values():
        updated = True
        item_attendees_positions[hp_uid] = PersistentMapping(
            {'position_type': position_type})
        meeting.item_attendees_positions[item_uid] = item_attendees_positions
    return updated
Exemplo n.º 19
0
    def checkBackwardTimeTravel(self):
        # When a failover event causes the storage to switch to an
        # asynchronous slave that is not fully up to date, the poller
        # should notice that backward time travel has occurred and
        # handle the situation by invalidating all objects that have
        # changed in the interval. (Currently, we simply invalidate all
        # objects when backward time travel occurs.)
        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage
        db = DB(self._storage)
        try:
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                fs = FileStorage(os.path.join(d, 'Data.fs'))
                fs.copyTransactionsFrom(c._storage)

                r['beta'] = PersistentMapping()
                transaction.commit()
                self.assertTrue('beta' in r)

                c._storage.zap_all()
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # r should still be in the cache.
            self.assertTrue('beta' in r)

            # Now sync, which will call poll_invalidations().
            c.sync()

            # r should have been invalidated
            self.assertEqual(r._p_changed, None)

            # r should be reverted to its earlier state.
            self.assertFalse('beta' in r)

        finally:
            db.close()
Exemplo n.º 20
0
    def update_unit_info(self):
        """returns HPs, Locs."""
        HPs = PersistentMapping()
        locs = PersistentMapping()

        for unit in self.map:
            num = self.map[unit]
            loc = unit.location
            if loc[0] >= 0:
                locs[num] = loc
                HPs[num] = unit.hp

        return HPs, locs
Exemplo n.º 21
0
    def checkLen(self):
        # Override the version from BasicStorage because we
        # actually do guarantee to keep track of the counts.

        # len(storage) reports the number of objects.
        # check it is zero when empty
        self.assertEqual(len(self._storage), 0)
        # check it is correct when the storage contains two object.
        # len may also be zero, for storages that do not keep track
        # of this number
        self._dostore(data=PersistentMapping())
        self._dostore(data=PersistentMapping())
        self.assertEqual(len(self._storage), 2)
Exemplo n.º 22
0
 def test_nested_persistent_mapping(self):
     input = PersistentMapping({
         'foo':
         'bar',
         'bar':
         PersistentList(['foo', 'bar']),
         'baz':
         PersistentMapping({
             'foo': 'bar',
             'bar': 'baz'
         })
     })
     output = self.transport(input)
     self.assertEqual(output, input)
     self.assertEqual(type(output), PersistentMapping)
Exemplo n.º 23
0
    def checkBackwardTimeTravelWithoutRevertWhenStale(self):
        # If revert_when_stale is false (the default), when the database
        # connection is stale (such as through failover to an
        # asynchronous slave that is not fully up to date), the poller
        # should notice that backward time travel has occurred and
        # raise a ReadConflictError.
        self._storage = self.make_storage(revert_when_stale=False)

        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage
        db = DB(self._storage)
        try:
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                fs = FileStorage(os.path.join(d, 'Data.fs'))
                fs.copyTransactionsFrom(c._storage)

                r['beta'] = PersistentMapping()
                transaction.commit()
                self.assertTrue('beta' in r)

                c._storage.zap_all(reset_oid=False)
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # Sync, which will call poll_invalidations().
            c.sync()

            # Try to load an object, which should cause ReadConflictError.
            r._p_deactivate()
            self.assertRaises(ReadConflictError, lambda: r['beta'])

        finally:
            db.close()
Exemplo n.º 24
0
 def inject_changes():
     # Change the database just after the list of objects
     # to analyze has been determined.
     child2 = PersistentMapping()
     root['child2'] = child2
     transaction.commit()
     expect_oids.append(child2._p_oid)
Exemplo n.º 25
0
    def checkPackWhileReferringObjectChanges(self):
        # Packing should not remove objects referenced by an
        # object that changes during packing.
        db = DB(self._storage)
        try:
            # add some data to be packed
            c = db.open()
            root = c.root()
            child = PersistentMapping()
            root['child'] = child
            transaction.commit()
            expect_oids = [child._p_oid]

            def inject_changes():
                # Change the database just after the list of objects
                # to analyze has been determined.
                child2 = PersistentMapping()
                root['child2'] = child2
                transaction.commit()
                expect_oids.append(child2._p_oid)

            adapter = self._storage._adapter
            adapter.packundo.on_filling_object_refs = inject_changes
            packtime = time.time()
            self._storage.pack(packtime, referencesf)

            self.assertEqual(
                len(expect_oids), 2,
                "The on_filling_object_refs hook should have been called once")
            # Both children should still exist.
            self._storage.load(expect_oids[0], '')
            self._storage.load(expect_oids[1], '')
        finally:
            db.close()
Exemplo n.º 26
0
    def acquire(self, commit=False):
        """Acquire a resolve lock for a dossier.

        Will overwrite a possibly existing expired lock.
        """
        self.log("Acquiring resolve lock for %s..." % self.context)

        if self.txn_is_dirty():
            # Acquiring and committing the lock should always be the first
            # thing that's being done when resolving the dossier, otherwise
            # we would be committing unrelated, unexpected changes.
            #
            # Detect if that happens, but still proceed and log to sentry.
            msg = 'Dirty transaction when comitting resolve lock'
            self.log(msg)
            self.log('Registered objects: %r' % self._registered_objects())
            log_msg_to_sentry(
                msg,
                level='warning',
                extra={'registered_objects': repr(self._registered_objects())})

        ann = IAnnotations(self.context)
        lockinfo = PersistentMapping({
            'timestamp': datetime.now(),
            'userid': api.user.get_current().id,
        })
        ann[RESOLVE_LOCK_KEY] = lockinfo
        self.invalidate_cache()

        if commit:
            transaction.commit()

        self.log("Resolve lock acquired.")
Exemplo n.º 27
0
    def get_thread_dict(self, root):
        # This is vicious:  multiple threads are slamming changes into the
        # root object, then trying to read the root object, simultaneously
        # and without any coordination.  Conflict errors are rampant.  It
        # used to go around at most 10 times, but that fairly often failed
        # to make progress in the 7-thread tests on some test boxes.  Going
        # around (at most) 1000 times was enough so that a 100-thread test
        # reliably passed on Tim's hyperthreaded WinXP box (but at the
        # original 10 retries, the same test reliably failed with 15 threads).
        name = self.getName()
        MAXRETRIES = 1000

        for i in range(MAXRETRIES):
            try:
                root[name] = PersistentMapping()
                transaction.commit()
                break
            except ConflictError:
                root._p_jar.sync()
        else:
            raise ConflictError("Exceeded %d attempts to store" % MAXRETRIES)

        for j in range(MAXRETRIES):
            try:
                return root.get(name)
            except ConflictError:
                root._p_jar.sync()

        raise ConflictError("Exceeded %d attempts to read" % MAXRETRIES)
Exemplo n.º 28
0
    def test_uuids_converted_to_dict(self):
        title = u'Revert PersistentMapping back to dict'
        step = self._get_upgrade_step(title)
        self.assertIsNotNone(step)

        # simulate state on previous version
        cover = self._create_cover('test-cover', 'Empty layout')
        cover.cover_layout = (
            '[{"type": "row", "children": [{"column-size": 16, "type": '
            '"group", "children": [{"tile-type": '
            '"collective.cover.carousel", "type": "tile", "id": '
            '"ca6ba6675ef145e4a569c5e410af7511"}], "roles": ["Manager"]}]}]')

        tile = cover.get_tile('ca6ba6675ef145e4a569c5e410af7511')
        old_data = ITileDataManager(tile).get()
        old_dict = PersistentMapping()
        old_dict['uuid1'] = {'order': u'0'}
        old_dict['uuid2'] = {'order': u'1'}
        old_dict['uuid3'] = {'order': u'2'}
        old_data['uuids'] = old_dict
        ITileDataManager(tile).set(old_data)

        # run the upgrade step to validate the update
        self._do_upgrade_step(step)
        old_data = ITileDataManager(tile).get()
        self.assertFalse(isinstance(old_data['uuids'], PersistentMapping))
        self.assertTrue(isinstance(old_data['uuids'], dict))
        self.assertEqual(old_data['uuids']['uuid1']['order'], u'0')
        self.assertEqual(old_data['uuids']['uuid2']['order'], u'1')
        self.assertEqual(old_data['uuids']['uuid3']['order'], u'2')
Exemplo n.º 29
0
def create_root(storage, oid=z64, check_new=True):
    """
    Creates public or private root in storage.
    Root has the type PersistentMapping.

    :param storage: ZODB storage to create the root in
    :param str oid: Object id to give to the root (z64 is global root)
    :param bool check_new: If True, do nothing if the root exists
    """

    if check_new:
        try:
            storage.load(oid, '')
            return
        except KeyError:
            pass
    # Create the database's root in the storage if it doesn't exist
    from persistent.mapping import PersistentMapping
    root = PersistentMapping()
    # Manually create a pickle for the root to put in the storage.
    # The pickle must be in the special ZODB format.
    file = BytesIO()
    p = Pickler(file, _protocol)
    p.dump((root.__class__, None))
    p.dump(root.__getstate__())
    t = transaction.Transaction()
    t.description = 'initial database creation'
    storage.tpc_begin(t)
    storage.store(oid, None, file.getvalue(), '', t)
    storage.tpc_vote(t)
    storage.tpc_finish(t)
Exemplo n.º 30
0
    def checkAutoReconnectOnSync(self):
        # Verify auto-reconnect.
        db = DB(self._storage)
        try:
            c1 = db.open()
            r = c1.root()

            c1._storage._load_conn.close()
            c1._storage.sync()
            # ZODB5 calls sync when a connection is opened. Our monkey
            # patch on a Connection makes sure that works in earlier
            # versions, but we don't have that patch on ZODB5. So test
            # the storage directly. NOTE: The load connection must be open.
            # to trigger the actual sync.

            r = c1.root()
            r['alpha'] = 1
            transaction.commit()
            c1.close()

            c1._storage._load_conn.close()
            c1._storage._store_conn.close()

            c2 = db.open()
            self.assertIs(c2, c1)

            r = c2.root()
            self.assertEqual(r['alpha'], 1)
            r['beta'] = PersistentMapping()
            c2.add(r['beta'])
            transaction.commit()
            c2.close()
        finally:
            db.close()