예제 #1
0
    def test_search_eot(self):
        # Populate the tree and reopen the connection, forcing it to disk
        # and moving the records to an on-page format.
        ds = SimpleDataSet(self,
                           self.uri,
                           100,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()
        self.reopen_conn()

        # Open a cursor.
        cursor = self.session.open_cursor(self.uri, None)

        # Search for a record at the end of the table, which should succeed.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search(), 0)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))

        # Search-near for a record at the end of the table, which should
        # succeed, returning the last record.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search_near(), 0)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))

        # Search for a record past the end of the table, which should fail.
        cursor.set_key(ds.key(200))
        self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for a record past the end of the table, which should
        # succeed, returning the last record.
        cursor.set_key(ds.key(200))
        self.assertEqual(cursor.search_near(), -1)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))
예제 #2
0
    def test_tiered(self):
        self.flushed_objects = 0
        args = 'key_format=S,block_allocation=log-structured'
        self.verbose(3, 'Test log-structured allocation with config: ' + args)

        ds = SimpleDataSet(self, self.uri, 10, config=args)
        ds.populate()
        ds.check()
        self.session.checkpoint()
        # For some reason, every checkpoint does not cause a flush.
        # As we're about to move to a new model of flushing, we're not going to chase this error.
        #self.confirm_flush()

        ds = SimpleDataSet(self, self.uri, 50, config=args)
        ds.populate()
        ds.check()
        self.session.checkpoint()
        self.confirm_flush()

        ds = SimpleDataSet(self, self.uri, 100, config=args)
        ds.populate()
        ds.check()
        self.session.checkpoint()
        self.confirm_flush()

        ds = SimpleDataSet(self, self.uri, 200, config=args)
        ds.populate()
        ds.check()
        self.close_conn()
        self.confirm_flush()  # closing the connection does a checkpoint

        self.reopen_conn()
        # Check what was there before
        ds = SimpleDataSet(self, self.uri, 200, config=args)
        ds.check()

        # Now add some more.
        ds = SimpleDataSet(self, self.uri, 300, config=args)
        ds.populate()
        ds.check()

        # We haven't done a checkpoint/flush so there should be
        # nothing extra on the shared tier.
        self.confirm_flush(increase=False)
예제 #3
0
    def test_column_store_gap(self):
        uri = 'table:gap'
        # Initially just create tables.
        ds = SimpleDataSet(self, uri, 0, key_format='r')
        ds.populate()
        cursor = self.session.open_cursor(uri, None, None)
        self.nentries = 0

        # Create a column-store table with large gaps in the name-space.
        v = [ 1000, 2000000000000, 30000000000000 ]
        for i in v:
            cursor[ds.key(i)] = ds.value(i)
            self.nentries += 1

        # In-memory cursor forward, backward.
        self.forward(cursor, v)
        self.backward(cursor, list(reversed(v)))

        self.reopen_conn()
        cursor = self.session.open_cursor(uri, None, None)

        # Disk page cursor forward, backward.
        self.forward(cursor, v)
        self.backward(cursor, list(reversed(v)))
    def test_eviction(self):
        cursors = []
        datasets = []
        for i in range(0, self.ntables):
            this_uri = 'table:%s-%05d' % (self.table_name, i)
            ds = SimpleDataSet(self, this_uri, self.nrows,
                               config='allocation_size=1KB,leaf_page_max=1KB')
            ds.populate()
            datasets.append(ds)

        # Switch over to on-disk trees with multiple leaf pages
        self.reopen_conn()

        # Make sure we have a cursor for every table so it stays in cache.
        for i in range(0, self.ntables):
            this_uri = 'table:%s-%05d' % (self.table_name, i)
            cursors.append(self.session.open_cursor(this_uri, None))

        # Make use of the cache.
        for i in range(0, self.nops):
            for i in range(0, self.ntables):
                cursors[i].set_key(ds.key(random.randint(0, self.nrows - 1)))
                cursors[i].search()
                cursors[i].reset()
예제 #5
0
    def test_reconfig_fail(self):
        uri = 'table:reconfig_fail'
        ds = SimpleDataSet(self, uri, 100, key_format='S')
        ds.populate()

        # Reconfigure to an older version.
        compat_str = 'compatibility=(release="2.6")'
        self.conn.reconfigure(compat_str)

        self.session.begin_transaction()
        c = self.session.open_cursor(uri, None)
        c.set_key(ds.key(20))
        c.set_value("abcde")
        self.assertEquals(c.update(), 0)

        # Make sure we can reconfigure unrelated things while downgraded
        # and we have an active transaction.
        self.conn.reconfigure("cache_size=100M")

        compat_str = 'compatibility=(release="3.0.0")'
        msg = '/system must be quiescent/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: self.conn.reconfigure(compat_str),
                                     msg)
예제 #6
0
    def test_prepare_hs(self):
        nrows = 100
        ds = SimpleDataSet(self,
                           self.uri,
                           nrows,
                           key_format="S",
                           value_format='u')
        ds.populate()
        bigvalue = b"aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(self.uri)
        for i in range(1, 10000):
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(bigvalue)
            self.assertEquals(cursor.insert(), 0)
        cursor.close()
        self.session.checkpoint()

        # We put prepared updates in multiple sessions so that we do not hang
        # because of cache being full with uncommitted updates.
        nsessions = 3
        nkeys = 4000
        self.prepare_updates(ds, nrows, nsessions, nkeys)
    def test_prepare_lookaside(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        # Create a small table.
        uri = "table:test_prepare_lookaside01"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(bigvalue)
            self.assertEquals(cursor.insert(), 0)
        cursor.close()
        self.session.checkpoint()

        # Check to see lookaside working with prepare transactions.
        bigvalue1 = "bbbbb" * 100
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))
        self.prepare_updates(self.session, uri, bigvalue1, ds, nrows)
예제 #8
0
    def test_modify_many(self):
        ds = SimpleDataSet(self,
                           self.uri,
                           20,
                           key_format=self.keyfmt,
                           value_format='u')
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        orig = 'abcdefghijklmnopqrstuvwxyz'
        c.set_value(orig)
        self.assertEquals(c.update(), 0)
        for i in range(0, 50000):
            new = "".join([random.choice(string.digits) for i in xrange(5)])
            orig = orig[:10] + new + orig[15:]
            mods = []
            mod = wiredtiger.Modify(new, 10, 5)
            mods.append(mod)
            self.assertEquals(c.modify(mods), 0)

        c.set_key(ds.key(10))
        self.assertEquals(c.search(), 0)
        self.assertEquals(c.get_value(), orig)
예제 #9
0
    def test_rollback_to_stable_same_ts(self):
        nrows = 1500

        # Create a table without logging.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(
            self, uri, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
            ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, 30)
        # prepare cannot use same timestamp always, so use a different timestamps that are aborted.
        if self.prepare:
            self.large_modifies(uri, 'R', ds, 1, 1, nrows, 51)
            self.large_modifies(uri, 'S', ds, 2, 1, nrows, 55)
            self.large_modifies(uri, 'T', ds, 3, 1, nrows, 60)
        else:
            self.large_modifies(uri, 'R', ds, 1, 1, nrows, 60)
            self.large_modifies(uri, 'S', ds, 2, 1, nrows, 60)
            self.large_modifies(uri, 'T', ds, 3, 1, nrows, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)
        self.check(value_modQ, uri, nrows, 30)
        self.check(value_modT, uri, nrows, 60)

        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            retry_rollback(self, 'modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, 4, 1, nrows, 70))
            retry_rollback(self, 'modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, 5, 1, nrows, 80))
            retry_rollback(self, 'modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, 6, 1, nrows, 90))
            retry_rollback(self, 'modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, 7, 1, nrows, 100))
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        self.simulate_crash_restart(".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows * 3)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, 20)
        self.check(value_modQ, uri, nrows, 30)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
예제 #10
0
    def test_rollback_to_stable(self):
        nrows = 10000

        # Create a table.
        uri = "table:rollback_to_stable02"
        ds_config = self.extraconfig
        ds_config += ',log=(enabled=false)' if self.in_memory else ''
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config=ds_config)
        ds.populate()

        if self.value_format == '8t':
            valuea = 97
            valueb = 98
            valuec = 99
            valued = 100
        else:
            valuea = "aaaaa" * 100
            valueb = "bbbbb" * 100
            valuec = "ccccc" * 100
            valued = "ddddd" * 100

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
                                ',stable_timestamp=' + self.timestamp_str(1))

        self.large_updates(uri, valuea, ds, nrows, self.prepare, 10)
        # Check that all updates are seen.
        self.check(valuea, uri, nrows, None, 10)

        self.large_updates(uri, valueb, ds, nrows, self.prepare, 20)
        # Check that the new updates are only seen after the update timestamp.
        self.check(valueb, uri, nrows, None, 20)

        self.large_updates(uri, valuec, ds, nrows, self.prepare, 30)
        # Check that the new updates are only seen after the update timestamp.
        self.check(valuec, uri, nrows, None, 30)

        self.large_updates(uri, valued, ds, nrows, self.prepare, 40)
        # Check that the new updates are only seen after the update timestamp.
        self.check(valued, uri, nrows, None, 40)

        # Pin stable to timestamp 30 if prepare otherwise 20.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(30))
        else:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(20))
        # Checkpoint to ensure that all the data is flushed.
        self.session.breakpoint()
        if not self.in_memory:
            self.session.checkpoint()

        self.conn.rollback_to_stable()
        # Check that the new updates are only seen after the update timestamp.
        self.session.breakpoint()
        self.check(valueb, uri, nrows, None, 40)
        self.check(valueb, uri, nrows, None, 20)
        self.check(valuea, uri, nrows, None, 10)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        upd_aborted = (stat_cursor[stat.conn.txn_rts_upd_aborted][2] +
                       stat_cursor[stat.conn.txn_rts_hs_removed][2])
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(upd_aborted, nrows * 2)
예제 #11
0
    def test_timestamp_usage(self):
        if wiredtiger.diagnostic_build():
            self.skipTest('requires a non-diagnostic build')

        # Create an object that's never written, it's just used to generate valid k/v pairs.
        ds = SimpleDataSet(self,
                           'file:notused',
                           10,
                           key_format=self.key_format,
                           value_format=self.value_format)

        # Create the table with the key consistency checking turned on. That checking will verify
        # any individual key is always or never used with a timestamp. And if it is used with a
        # timestamp that the timestamps are in increasing order for that key.
        uri = 'file:assert06'
        self.session.create(
            uri, 'key_format={},value_format={},'.format(
                self.key_format, self.value_format) +
            'write_timestamp_usage=ordered,assert=(write_timestamp=on)')
        c = self.session.open_cursor(uri)

        # Insert a data item at timestamp 2.
        self.session.begin_transaction()
        c[ds.key(1)] = ds.value(1)
        self.apply_timestamps(2, True)
        self.session.commit_transaction()

        # Make sure we can successfully add a different key at timestamp 1.
        self.session.begin_transaction()
        c[ds.key(2)] = ds.value(2)
        self.apply_timestamps(1, True)
        self.session.commit_transaction()

        # Insert key_ts3 at timestamp 10 and key_ts4 at 15, then modify both keys in one transaction
        # at timestamp 13, which should result in an error message.
        c = self.session.open_cursor(uri)
        self.session.begin_transaction()
        c[ds.key(3)] = ds.value(3)
        self.apply_timestamps(10, True)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[ds.key(4)] = ds.value(4)
        self.apply_timestamps(15, True)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[ds.key(3)] = ds.value(5)
        c[ds.key(4)] = ds.value(6)
        self.apply_timestamps(13, False)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: self.session.commit_transaction(),
                                     '/unexpected timestamp usage/')
        self.assertEquals(c[ds.key(3)], ds.value(3))
        self.assertEquals(c[ds.key(4)], ds.value(4))

        # Modify a key previously used with timestamps without one. We should get the inconsistent
        # usage message.
        key = ds.key(5)
        self.session.begin_transaction()
        c[key] = ds.value(7)
        self.apply_timestamps(14, True)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[key] = ds.value(8)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: self.session.commit_transaction(),
                                     self.msg_usage)

        # Set the timestamp in the beginning, middle or end of the transaction.
        key = ds.key(6)
        self.session.begin_transaction()
        self.session.timestamp_transaction('commit_timestamp=' +
                                           self.timestamp_str(16))
        c[key] = ds.value(9)
        self.session.commit_transaction()
        self.assertEquals(c[key], ds.value(9))

        key = ds.key(7)
        self.session.begin_transaction()
        c[key] = ds.value(10)
        c[key] = ds.value(11)
        self.session.timestamp_transaction('commit_timestamp=' +
                                           self.timestamp_str(17))
        c[key] = ds.value(12)
        c[key] = ds.value(13)
        self.session.commit_transaction()
        self.assertEquals(c[key], ds.value(13))

        key = ds.key(8)
        self.session.begin_transaction()
        c[key] = ds.value(14)
        self.apply_timestamps(18, True)
        self.session.commit_transaction()
        self.assertEquals(c[key], ds.value(14))

        # Confirm it is okay to set the durable timestamp on the commit call.
        key = ds.key(9)
        self.session.begin_transaction()
        c[key] = ds.value(15)
        c[key] = ds.value(16)
        self.session.prepare_transaction('prepare_timestamp=' +
                                         self.timestamp_str(22))
        self.session.timestamp_transaction('commit_timestamp=' +
                                           self.timestamp_str(22))
        self.session.timestamp_transaction('durable_timestamp=' +
                                           self.timestamp_str(22))
        self.session.commit_transaction()

        # Confirm that rolling back after preparing doesn't fire an assertion.
        key = ds.key(10)
        self.session.begin_transaction()
        c[key] = ds.value(17)
        self.session.prepare_transaction('prepare_timestamp=' +
                                         self.timestamp_str(30))
        self.session.rollback_transaction()
예제 #12
0
    def test_rollback_to_stable(self):
        nrows = 1000000

        # Create a table without logging.
        uri = "table:rollback_to_stable12"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format="i",
                           value_format="S",
                           config='split_pct=50,log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, 20)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)

        # Pin stable to timestamp 30 if prepare otherwise 20.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(30))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(20))

        # Load a single row modification to be removed.
        commit_ts = 30
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        cursor[ds.key(1)] = value_b
        if self.prepare:
            self.session.prepare_transaction('prepare_timestamp=' +
                                             timestamp_str(commit_ts - 1))
            self.session.timestamp_transaction('commit_timestamp=' +
                                               timestamp_str(commit_ts))
            self.session.timestamp_transaction('durable_timestamp=' +
                                               timestamp_str(commit_ts + 1))
            self.session.commit_transaction()
        else:
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(commit_ts))
        cursor.close()

        self.session.checkpoint()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, 30)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        pages_walk_skipped = stat_cursor[
            stat.conn.txn_rts_tree_walk_skip_pages][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertEqual(hs_sweep, 0)
        self.assertGreaterEqual(pages_walk_skipped, 0)
예제 #13
0
    def test_rollback_to_stable(self):
        nrows = 10000

        # Prepare transactions for column store table is not yet supported.
        if self.prepare and self.key_format == 'r':
            self.skipTest(
                'Prepare transactions for column store table is not yet supported'
            )

        # Create a table without logging.
        uri = "table:rollback_to_stable01"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
                                ',stable_timestamp=' + timestamp_str(1))

        valuea = "aaaaa" * 100
        self.large_updates(uri, valuea, ds, nrows, self.prepare, 10)
        # Check that all updates are seen.
        self.check(valuea, uri, nrows, 10)

        # Remove all keys with newer timestamp.
        self.large_removes(uri, ds, nrows, self.prepare, 20)
        # Check that the no keys should be visible.
        self.check(valuea, uri, 0, 20)

        # Pin stable to timestamp 20 if prepare otherwise 10.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(20))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(10))
        # Checkpoint to ensure that all the updates are flushed to disk.
        if not self.in_memory:
            self.session.checkpoint()

        self.conn.rollback_to_stable()
        # Check that the new updates are only seen after the update timestamp.
        self.check(valuea, uri, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(hs_removed, 0)
        self.assertEqual(keys_removed, 0)
        if self.in_memory:
            self.assertEqual(upd_aborted, nrows)
        else:
            self.assertEqual(upd_aborted + keys_restored, nrows)
        self.assertGreaterEqual(keys_restored, 0)
        self.assertGreater(pages_visited, 0)
예제 #14
0
    def test_hs(self):

        # Create a file that contains active history (content newer than the oldest timestamp).
        table_uri = 'table:hs27'
        ds = SimpleDataSet(self,
                           table_uri,
                           0,
                           key_format='r',
                           value_format='S',
                           config='log=(enabled=false)')
        ds.populate()
        self.session.checkpoint()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
                                ',stable_timestamp=' + self.timestamp_str(1))

        # Write the initial values, if requested.
        if self.doinit:
            self.initialize(ds.uri, ds)

        # Create a long running read transaction in a separate session.
        # (Is it necessary for it to be separate? Not sure.)
        session_read = self.conn.open_session()
        session_read.begin_transaction('read_timestamp=' +
                                       self.timestamp_str(2))

        # Check that the initial writes (at timestamp 1) are seen (at timestamp 2).
        self.check(session_read, ds.uri, ds, 2, make_own_txn=False)

        # Write more values at assorted timestamps.
        self.updateall(ds.uri, ds)

        # Check that the new updates are appropriately visible.
        self.checkall(self.session, ds.uri, ds)

        self.session.breakpoint()

        # Now forcibly evict, so that all the pages are RLE-encoded and then read back in.
        # There doesn't seem to be any way to just forcibly evict an entire table, so what
        # I'm going to do is assume that what we care about is evicting the updates (the
        # initial values are not so interesting) and they are on a maximum of two pages,
        # so we can evict the first and last key. If this evicts the same page twice, it
        # won't really hurt anything. (This also avoids having to worry about whether we
        # wrote initial values or not.)

        evict_cursor = self.session.open_cursor(ds.uri, None,
                                                "debug=(release_evict)")
        self.session.begin_transaction()
        firstkey = self.get_key(0, 0)
        lastkey = self.get_key(self.nkeys - 1, self.ntimes - 1)
        for k in [firstkey, lastkey]:
            # Search the key to evict it.
            v = evict_cursor[ds.key(k)]
            self.assertEqual(v, self.value_2)
        self.assertEqual(evict_cursor.reset(), 0)
        self.session.rollback_transaction()

        # Check that the long-running read transaction still reads the correct data.
        self.check(session_read, ds.uri, ds, 2, make_own_txn=False)

        # Check that our main session reads the correct data.
        self.checkall(self.session, ds.uri, ds)

        # Drop the long running read transaction.
        session_read.rollback_transaction()

        # Check that our main session can still read the latest data.
        self.check(self.session, ds.uri, ds, 100)
예제 #15
0
    def test_checkpoint_snapshot_with_txnid_and_timestamp(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        session1 = self.conn.open_session()
        session1.begin_transaction()

        self.large_updates(self.uri, self.valuea, ds, self.nrows, 20)
        self.check(self.valuea, self.uri, self.nrows, 20, False)

        session2 = self.conn.open_session()
        session2.begin_transaction()
        cursor2 = session2.open_cursor(self.uri)

        for i in range((self.nrows+1), (self.nrows*2)+1):
            cursor2.set_key(ds.key(i))
            cursor2.set_value(self.valuea)
            self.assertEqual(cursor2.insert(), 0)
        session1.timestamp_transaction('commit_timestamp=' + self.timestamp_str(30))

        # Set stable timestamp to 40
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session2.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        session1.rollback_transaction()
        #Simulate a crash by copying to a new directory(RESTART).
        simulate_crash_restart(self, ".", "RESTART")

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 30, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)

        simulate_crash_restart(self, "RESTART", "RESTART2")
        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 30, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreaterEqual(inconsistent_ckpt, 0)
        self.assertEqual(keys_removed, 0)
예제 #16
0
    def test_hs(self):

        # Create a file that contains active history (content newer than the oldest timestamp).
        table_uri = 'table:hs26'
        ds = SimpleDataSet(self,
                           table_uri,
                           0,
                           key_format='r',
                           value_format='S',
                           config='log=(enabled=false)')
        ds.populate()
        self.session.checkpoint()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
                                ',stable_timestamp=' + self.timestamp_str(1))

        # Write the first set of values at timestamp_1.
        self.make_updates(ds.uri, ds, self.value_1, self.value_modulus_1,
                          self.nrows_1, self.timestamp_1)

        # Optionally make the first set of values globally visible (and stable).
        if self.ts1_globally_visible:
            self.conn.set_timestamp('oldest_timestamp=' +
                                    self.timestamp_str(self.timestamp_1) +
                                    ',stable_timestamp=' +
                                    self.timestamp_str(self.timestamp_1))

        # Create a long running read transaction in a separate session.
        session_read = self.conn.open_session()
        session_read.begin_transaction('read_timestamp=' +
                                       self.timestamp_str(self.timestamp_1))

        # Check that the initial writes (at timestamp_1) are seen.
        self.check(session_read, ds.uri, self.timestamp_1)

        # Write different values at a later timestamp.
        self.make_updates(ds.uri, ds, self.value_2, self.value_modulus_2,
                          self.nrows_2, self.timestamp_2)

        # Check that the new updates are only seen after the update timestamp.
        self.check(self.session, ds.uri, self.timestamp_1, self.timestamp_1)
        self.check(self.session, ds.uri, self.timestamp_2, self.timestamp_2)

        # Now forcibly evict, so that all the pages are RLE-encoded and then read back in.
        # There doesn't seem to be any way to just forcibly evict an entire table, so what
        # I'm going to do is assume that each page can hold at least 41 values, and evict
        # every 41st key. If this evicts pages repeatedly it won't really hurt anything,
        # just waste some time.

        evict_cursor = self.session.open_cursor(ds.uri, None,
                                                "debug=(release_evict)")
        self.session.begin_transaction()
        for k in range(1, max(self.nrows_1, self.nrows_2) + 1, 41):
            # Search the key to evict it.
            v = evict_cursor[ds.key(k)]
            xv = self.expected_value(k, self.timestamp_2)
            self.assertEqual(v, xv)
        self.assertEqual(evict_cursor.reset(), 0)
        self.session.rollback_transaction()

        # Using the long running read transaction, check that the correct data can still be read.
        # It should see all the updates at timestamp_1.
        self.check(session_read, ds.uri, self.timestamp_1)
        session_read.rollback_transaction()

        # Also check that the most recent transaction has the later data.
        self.check(self.session, ds.uri, self.timestamp_2, self.timestamp_2)
예제 #17
0
 def test_modify_smoke_single(self):
     ds = SimpleDataSet(self,
         self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
     ds.populate()
     self.modify_load(ds, True)
예제 #18
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri = "table:rollback_to_stable03"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
                                ',stable_timestamp=' + self.timestamp_str(1))

        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100
        valuec = "ccccc" * 100
        self.large_updates(uri, valuea, ds, nrows, self.prepare, 10)
        # Check that all updates are seen.
        self.check(valuea, uri, nrows, 10)

        self.large_updates(uri, valueb, ds, nrows, self.prepare, 20)
        # Check that all updates are seen.
        self.check(valueb, uri, nrows, 20)

        self.large_updates(uri, valuec, ds, nrows, self.prepare, 30)
        # Check that all updates are seen.
        self.check(valuec, uri, nrows, 30)

        # Pin stable to timestamp 30 if prepare otherwise 20.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(30))
        else:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(20))
        # Checkpoint to ensure that all the updates are flushed to disk.
        if not self.in_memory:
            self.session.checkpoint()

        self.conn.rollback_to_stable()
        # Check that the old updates are only seen even with the update timestamp.
        self.check(valueb, uri, nrows, 20)
        self.check(valuea, uri, nrows, 10)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        if self.in_memory:
            self.assertEqual(upd_aborted, nrows)
            self.assertEqual(hs_removed, 0)
        else:
            self.assertGreaterEqual(upd_aborted + hs_removed, nrows)
        self.assertGreater(pages_visited, 0)
예제 #19
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create two tables without logging.
        uri_1 = "table:rollback_to_stable05_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        uri_2 = "table:rollback_to_stable05_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100
        valuec = "ccccc" * 100
        valued = "ddddd" * 100
        self.large_updates(uri_1, valuea, ds_1, nrows, 0)
        self.check(valuea, uri_1, nrows, 0)

        self.large_updates(uri_2, valuea, ds_2, nrows, 0)
        self.check(valuea, uri_2, nrows, 0)

        # Start a long running transaction and keep it open.
        session_2 = self.conn.open_session()
        session_2.begin_transaction('isolation=snapshot')

        self.large_updates(uri_1, valueb, ds_1, nrows, 0)
        self.check(valueb, uri_1, nrows, 0)

        self.large_updates(uri_1, valuec, ds_1, nrows, 0)
        self.check(valuec, uri_1, nrows, 0)

        self.large_updates(uri_1, valued, ds_1, nrows, 0)
        self.check(valued, uri_1, nrows, 0)

        # Add updates to the another table.
        self.large_updates(uri_2, valueb, ds_2, nrows, 0)
        self.check(valueb, uri_2, nrows, 0)

        self.large_updates(uri_2, valuec, ds_2, nrows, 0)
        self.check(valuec, uri_2, nrows, 0)

        self.large_updates(uri_2, valued, ds_2, nrows, 0)
        self.check(valued, uri_2, nrows, 0)

        # Checkpoint to ensure that all the data is flushed.
        if not self.in_memory:
            self.session.checkpoint()

        # Clear all running transactions before rollback to stable.
        session_2.commit_transaction()
        session_2.close()

        self.conn.rollback_to_stable()
        self.check(valued, uri_1, nrows, 0)
        self.check(valued, uri_2, nrows, 0)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(pages_visited, 0)
        if self.in_memory:
            self.assertEqual(upd_aborted, 0)
            self.assertEqual(hs_removed, 0)
        else:
            self.assertEqual(upd_aborted, 0)
            self.assertEqual(hs_removed, 0)
예제 #20
0
    def test_gc(self):
        nrows = 100000

        # Create a table without logging.
        uri = "table:gc02"
        ds = SimpleDataSet(
            self, uri, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
            ',stable_timestamp=' + timestamp_str(1))

        bigvalue = "aaaaa" * 100
        bigvalue2 = "ddddd" * 100
        self.large_updates(uri, bigvalue, ds, nrows, 10)

        # Check that all updates are seen.
        self.check(bigvalue, uri, nrows, 10)

        self.large_updates(uri, bigvalue2, ds, nrows, 100)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 100)

        # Check that old updates are seen.
        self.check(bigvalue, uri, nrows, 10)

        # Checkpoint to ensure that the history store is checkpointed and not cleaned.
        self.session.checkpoint()
        c = self.session.open_cursor('statistics:')
        self.assertEqual(c[stat.conn.cc_pages_evict][2], 0)
        self.assertEqual(c[stat.conn.cc_pages_removed][2], 0)
        self.assertGreater(c[stat.conn.cc_pages_visited][2], 0)
        c.close()

        # Pin oldest and stable to timestamp 100.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(100) +
            ',stable_timestamp=' + timestamp_str(100))

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 100)

        # Load a slight modification with a later timestamp.
        self.large_modifies(uri, 'A', ds, 10, 1, nrows, 110)
        self.large_modifies(uri, 'B', ds, 20, 1, nrows, 120)
        self.large_modifies(uri, 'C', ds, 30, 1, nrows, 130)

        # Set of update operations with increased timestamp.
        self.large_updates(uri, bigvalue, ds, nrows, 150)

        # Set of update operations with increased timestamp.
        self.large_updates(uri, bigvalue2, ds, nrows, 180)

        # Set of update operations with increased timestamp.
        self.large_updates(uri, bigvalue, ds, nrows, 200)

        # Check that the modifies are seen.
        bigvalue_modA = bigvalue2[0:10] + 'A' + bigvalue2[11:]
        bigvalue_modB = bigvalue_modA[0:20] + 'B' + bigvalue_modA[21:]
        bigvalue_modC = bigvalue_modB[0:30] + 'C' + bigvalue_modB[31:]
        self.check(bigvalue_modA, uri, nrows, 110)
        self.check(bigvalue_modB, uri, nrows, 120)
        self.check(bigvalue_modC, uri, nrows, 130)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue, uri, nrows, 150)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 180)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue, uri, nrows, 200)

        # Pin oldest and stable to timestamp 200.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(200) +
            ',stable_timestamp=' + timestamp_str(200))

        # Checkpoint to ensure that the history store is cleaned.
        self.session.checkpoint()
        self.check_gc_stats()

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue, uri, nrows, 200)
예제 #21
0
    def test_checkpoint_snapshot(self):

        ds = SimpleDataSet(self,
                           self.uri,
                           0,
                           key_format="S",
                           value_format="S",
                           config='log=(enabled=false),leaf_page_max=4k')
        ds.populate()
        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100
        valuec = "ccccc" * 100

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)
        for i in range(self.nrows, self.nrows + 1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valueb)
            self.assertEqual(cursor1.insert(), 0)

        self.large_updates(self.uri, valuea, ds, self.nrows)
        self.check(valuea, self.uri, self.nrows)

        self.session.checkpoint()
        session1.rollback_transaction()
        self.reopen_conn()

        # Check the table contains the last checkpointed value.
        self.check(valuea, self.uri, self.nrows)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)
        for i in range(self.nrows, self.nrows + 1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valueb)
            self.assertEqual(cursor1.insert(), 0)

        self.session.begin_transaction()
        cursor = self.session.open_cursor(self.uri)
        for i in range(0, 1):
            cursor.set_key(ds.key(i))
            cursor.set_value(valuec)
            self.assertEqual(cursor.update(), 0)
        self.session.commit_transaction()

        self.session.checkpoint()
        session1.rollback_transaction()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_skipped = stat_cursor[stat.conn.txn_rts_tree_walk_skip_pages][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_skipped, 0)
예제 #22
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100
        value_f = "fffff" * 100

        # Perform several updates.
        self.large_updates(uri_1, value_d, ds_1, nrows, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        ckpt.start()

        # Perform several updates in parallel with checkpoint.
        self.large_updates(uri_1, value_e, ds_1, nrows, 70)
        self.large_updates(uri_2, value_e, ds_2, nrows, 70)
        self.large_updates(uri_1, value_f, ds_1, nrows, 80)
        self.large_updates(uri_2, value_f, ds_2, nrows, 80)

        done.set()
        ckpt.join()

        # Simulate a server crash and restart.
        self.simulate_crash_restart(".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)
    def test_readonly(self):
        create_params = 'key_format=i,value_format=i'
        entries = 10
        # Create a database and a table.
        SimpleDataSet(self,
                      self.uri,
                      entries,
                      key_format='i',
                      value_format='i').populate()

        #
        # Now close and reopen.  Note that the connection function
        # above will reopen it readonly.
        self.reopen_conn()
        msg = '/Unsupported/'
        c = self.session.open_cursor(self.uri, None, None)
        for op in self.cursor_ops:
            c.set_key(1)
            c.set_value(1)
            if op == 'insert':
                self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                             lambda: c.insert(), msg)
            elif op == 'remove':
                self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                             lambda: c.remove(), msg)
            elif op == 'update':
                self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                             lambda: c.update(), msg)
            else:
                self.fail('Unknown cursor operation: ' + op)
        c.close()
        for op in self.session_ops:
            if op == 'alter':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.alter(self.uri, None), msg)
            elif op == 'create':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.create(self.uri2, create_params), msg)
            elif op == 'compact':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.compact(self.uri, None), msg)
            elif op == 'drop':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.drop(self.uri, None), msg)
            elif op == 'log_flush':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.log_flush(None), msg)
            elif op == 'log_printf':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.log_printf("test"), msg)
            elif op == 'rename':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.rename(self.uri, self.uri2, None),
                    msg)
            elif op == 'salvage':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.salvage(self.uri, None), msg)
            elif op == 'truncate':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.truncate(self.uri, None, None, None),
                    msg)
            elif op == 'upgrade':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.upgrade(self.uri, None), msg)
            else:
                self.fail('Unknown session method: ' + op)
예제 #24
0
    def test_rollback_to_stable(self):
        uri = 'table:test_rollback_to_stable37'
        nrows = 1000

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
        else:
            value_a = 'a' * 10
            value_b = 'b' * 10
            value_c = 'c' * 10
            value_d = 'd' * 10

        # Create our table.
        ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format=self.value_format)
        ds.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
            ',stable_timestamp=' + self.timestamp_str(1))

        # Insert 300 updates to the same key.
        for i in range (20, 320):
            if self.value_format == '8t':
                self.large_updates(uri, value_a, ds, nrows, False, i)
            else:
                self.large_updates(uri, value_a + str(i), ds, nrows, False, i)

        old_reader_session = self.conn.open_session()
        old_reader_session.begin_transaction('read_timestamp=' + self.timestamp_str(10))

        self.large_updates(uri, value_b, ds, nrows, False, 2000)
        self.check(value_b, uri, nrows, None, 2000)

        self.evict_cursor(uri, nrows, value_b)

        # Insert update without a timestamp.
        self.large_updates(uri, value_c, ds, nrows, False, 0)
        self.check(value_c, uri, nrows, None, 0)

        self.evict_cursor(uri, nrows, value_c)

        self.large_updates(uri, value_d, ds, nrows, False, 3000)
        self.check(value_d, uri, nrows, None, 3000)

        old_reader_session.rollback_transaction()
        self.session.checkpoint()

        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(2000))
        self.session.checkpoint()

        self.conn.rollback_to_stable()

        self.check(value_c, uri, nrows, None, 1000)
        self.check(value_c, uri, nrows, None, 2000)
        self.check(value_c, uri, nrows, None, 3000)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertEqual(keys_removed, 0)
예제 #25
0
    def test_timestamp_alter(self):
        if wiredtiger.diagnostic_build():
            self.skipTest('requires a non-diagnostic build')

        # Create an object that's never written, it's just used to generate valid k/v pairs.
        ds = SimpleDataSet(self,
                           'file:notused',
                           10,
                           key_format=self.key_format,
                           value_format=self.value_format)

        cfg_on = 'write_timestamp_usage=ordered'
        cfg_off = 'write_timestamp_usage=none'

        # Create the table without the key consistency checking turned on.
        # Create a few items breaking the rules.
        # Then alter the setting and verify the inconsistent usage is detected.
        uri = 'file:assert06'
        self.session.create(
            uri,
            'key_format={},value_format={}'.format(self.key_format,
                                                   self.value_format))
        c = self.session.open_cursor(uri)

        # Insert a data item at timestamp 2.
        key = ds.key(1)
        self.session.begin_transaction()
        c[key] = ds.value(1)
        self.apply_timestamps(2, True)
        self.session.commit_transaction()

        # Modify the data item at timestamp 1, illegally moving the timestamp backward.
        self.session.begin_transaction()
        c[key] = ds.value(2)
        self.apply_timestamps(1, True)
        self.session.commit_transaction()

        # Insert a non-timestamped item.
        # Then illegally modify with a timestamp.
        # Then illegally modify without a timestamp.
        key = ds.key(2)
        self.session.begin_transaction()
        c[key] = ds.value(3)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[key] = ds.value(4)
        self.apply_timestamps(2, True)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[key] = ds.value(5)
        self.session.commit_transaction()

        # Now alter the setting and make sure we detect incorrect usage.
        # We must move the oldest timestamp forward in order to alter, otherwise alter closing the
        # file will fail with EBUSY.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(2))
        c.close()
        self.session.alter(uri, cfg_on)
        c = self.session.open_cursor(uri)

        # Update at timestamp 5, then detect not using a timestamp.
        key = ds.key(3)
        self.session.begin_transaction()
        c[key] = ds.value(6)
        self.apply_timestamps(5, True)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[key] = ds.value(6)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: self.session.commit_transaction(),
                                     self.msg_usage)

        # Detect using a timestamp on a non-timestamp key. We must first use a non-timestamped
        # operation on the key in order to violate the key consistency condition in the following
        # transaction.
        key = ds.key(4)
        self.session.begin_transaction()
        c[key] = ds.value(7)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[key] = ds.value(8)
        self.session.commit_transaction('commit_timestamp=' +
                                        self.timestamp_str(3))

        # Test to make sure that key consistency can be turned off after turning it on.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(4))
        c.close()
        self.session.alter(uri, cfg_off)
        c = self.session.open_cursor(uri)

        # Detection is off we can successfully change the same key with and without a timestamp.
        key = ds.key(5)
        self.session.begin_transaction()
        c[key] = ds.value(9)
        self.session.commit_transaction()
        self.session.begin_transaction()
        c[key] = ds.value(1)
        self.apply_timestamps(6, True)
        self.session.commit_transaction()
예제 #26
0
    def test_rollback_to_stable(self):
        nrows = 1

        # Create a table without logging.
        uri = "table:rollback_to_stable11"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 12)
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 14)
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 16)
        self.large_updates(uri, value_b, ds, nrows, self.prepare, 20)

        # Verify data is visible and correct.
        self.check(value_b, uri, nrows, None, 21 if self.prepare else 20)

        # Pin stable to timestamp 28 if prepare otherwise 20.
        # large_updates() prepares at 1 before the timestamp passed (so 29)
        # and this is required to be strictly greater than (not >=) stable.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(28))
        else:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(20))

        # Checkpoint to ensure that all the updates are flushed to disk.
        self.session.checkpoint()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_b, uri, nrows, None, 20)

        # Perform several updates.
        self.large_updates(uri, value_c, ds, nrows, self.prepare, 30)
        self.large_updates(uri, value_c, ds, nrows, self.prepare, 32)
        self.large_updates(uri, value_c, ds, nrows, self.prepare, 34)
        self.large_updates(uri, value_d, ds, nrows, self.prepare, 36)

        # Verify data is visible and correct.
        self.check(value_d, uri, nrows, None, 37 if self.prepare else 36)

        # Checkpoint to ensure that all the updates are flushed to disk.
        self.session.checkpoint()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, "RESTART", "RESTART2")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_b, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 40)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertEqual(hs_removed, 4)
        self.assertEqual(hs_sweep, 0)
예제 #27
0
    def test_search_invisible_one(self):
        # Populate the tree.
        ds = SimpleDataSet(self,
                           self.uri,
                           100,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        # Delete a range of records.
        for i in range(5, 10):
            cursor = self.session.open_cursor(self.uri, None)
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)

        # Reopen the connection, forcing it to disk and moving the records to
        # an on-page format.
        self.reopen_conn()

        # Add updates to the existing records (in both the deleted an undeleted
        # range), as well as some new records after the end. Put the updates in
        # a separate transaction so they're invisible to another cursor.
        self.session.begin_transaction()
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(5, 10):
            cursor[ds.key(i)] = ds.value(i + 1000)
        for i in range(30, 40):
            cursor[ds.key(i)] = ds.value(i + 1000)
        for i in range(100, 140):
            cursor[ds.key(i)] = ds.value(i + 1000)

        # Open a separate session and cursor.
        s = self.conn.open_session()
        cursor = s.open_cursor(self.uri, None)

        # Search for an existing record in the deleted range, should not find
        # it.
        for i in range(5, 10):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Fixed-length column-store rows always exist.
                self.assertEqual(cursor.search(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search for an existing record in the updated range, should see the
        # original value.
        for i in range(30, 40):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.search(), 0)
            self.assertEqual(cursor.get_key(), ds.key(i))

        # Search for a added record, should not find it.
        for i in range(120, 130):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Invisible updates to fixed-length column-store objects are
                # invisible to the reader, but the fact that they exist past
                # the end of the initial records causes the instantiation of
                # empty records: confirm successful return of an empty row.
                self.assertEqual(cursor.search(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                # Otherwise, we should not find any matching records.
                self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for an existing record in the deleted range, should find
        # the next largest record. (This depends on the implementation behavior
        # which currently includes a bias to prefix search.)
        for i in range(5, 10):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Fixed-length column-store rows always exist.
                self.assertEqual(cursor.search_near(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                self.assertEqual(cursor.search_near(), 1)
                self.assertEqual(cursor.get_key(), ds.key(10))

        # Search-near for an existing record in the updated range, should see
        # the original value.
        for i in range(30, 40):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.search_near(), 0)
            self.assertEqual(cursor.get_key(), ds.key(i))

        # Search-near for an added record, should find the previous largest
        # record.
        for i in range(120, 130):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Invisible updates to fixed-length column-store objects are
                # invisible to the reader, but the fact that they exist past
                # the end of the initial records causes the instantiation of
                # empty records: confirm successful return of an empty row.
                self.assertEqual(cursor.search_near(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                self.assertEqual(cursor.search_near(), -1)
                self.assertEqual(cursor.get_key(), ds.key(100))
예제 #28
0
    def test_timestamp(self):

        # Create a file that contains active history (content newer than the oldest timestamp).
        table_uri = 'table:timestamp23'
        ds = SimpleDataSet(self,
                           table_uri,
                           0,
                           key_format=self.key_format,
                           value_format='S',
                           config='log=(enabled=false)')
        ds.populate()
        self.session.checkpoint()

        key = 5
        value_1 = 'a' * 500
        value_2 = 'b' * 500
        value_3 = 'c' * 500

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
                                ',stable_timestamp=' + self.timestamp_str(1))

        cursor = self.session.open_cursor(ds.uri)

        # Write two values at timestamp 10. We'll muck with the first value
        # and use the second to reference the page for eviction.
        self.session.begin_transaction('read_timestamp=10')
        cursor[key] = value_1
        cursor[key + 1] = value_2
        self.session.commit_transaction('commit_timestamp=11')

        # Delete the first value at timestamp 20.
        self.session.begin_transaction('read_timestamp=20')
        cursor.set_key(key)
        cursor.remove()
        self.session.commit_transaction('commit_timestamp=21')

        # Put it back at timestamp 30.
        self.session.begin_transaction('read_timestamp=30')
        cursor[key] = value_3
        self.session.commit_transaction('commit_timestamp=31')

        # Delete it again at timestamp 40.
        self.session.begin_transaction('read_timestamp=40')
        cursor.set_key(key)
        cursor.remove()
        self.session.commit_transaction('commit_timestamp=41')

        # Evict the page using the second key.
        evict_cursor = self.session.open_cursor(ds.uri, None,
                                                "debug=(release_evict)")
        self.session.begin_transaction()
        v = evict_cursor[key + 1]
        self.assertEqual(v, value_2)
        self.assertEqual(evict_cursor.reset(), 0)
        self.session.rollback_transaction()

        # Create a separate session and a cursor to read the original value at timestamp 12.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(ds.uri)
        session2.begin_transaction('read_timestamp=12')
        v = cursor2[key]
        self.assertEqual(v, value_1)

        self.session.breakpoint()

        # Now delete the original value. This _should_ cause WT_ROLLBACK, but with a column
        # store bug seen and fixed in August 2021, it succeeds, and the resulting invalid
        # tombstone will cause reconciliation to assert. (To see this behavior, comment out the
        # self.fail call and let the transaction commit.)
        try:
            cursor2.remove()
            self.fail("Conflicting remove did not fail")
            session2.commit_transaction('commit_timestamp=50')
        except wiredtiger.WiredTigerError as e:
            self.assertTrue(
                wiredtiger.wiredtiger_strerror(wiredtiger.WT_ROLLBACK) in str(
                    e))

        cursor.close()
        cursor2.close()
예제 #29
0
    def test_checkpoint_las_reads(self):
        # Create a small table.
        uri = "table:test_las05"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = b"aaaaa" * 100

        # Initially load huge data.
        # Add 10000 items that have a 500b value that is about 50Mb that
        # is the entire cache. Then checkpoint so that none is required
        # to stay in cache.
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor[ds.key(nrows + i)] = bigvalue
        cursor.close()
        self.session.checkpoint()

        # Pin the oldest timestamp so that all history has to stay.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1))
        # Loop a couple times, partly filling the cache but not
        # overfilling it to see the lookaside score value change
        # even if lookaside is not yet in use.
        #
        # Use smaller values, 50 bytes and fill 8 times, under full cache.
        valstr = 'abcdefghijklmnopqrstuvwxyz'
        loop_start = self.get_stat(stat.conn.cache_lookaside_score)
        for i in range(1, 9):
            bigvalue2 = valstr[i].encode() * 50
            self.conn.set_timestamp('stable_timestamp=' +
                                    timestamp_str(self.stable))
            entries_start = self.get_stat(stat.conn.cache_lookaside_entries)
            score_start = self.get_stat(stat.conn.cache_lookaside_score)
            self.pr("Update iteration: " + str(i) + " Value: " +
                    str(bigvalue2))
            self.pr("Update iteration: " + str(i) + " Score: " +
                    str(score_start))
            self.large_updates(self.session, uri, bigvalue2, ds, nrows, nrows)
            self.stable += nrows
            score_end = self.get_stat(stat.conn.cache_lookaside_score)
            entries_end = self.get_stat(stat.conn.cache_lookaside_entries)
            # We expect to see the lookaside score increase but not writing
            # any new entries to lookaside.
            self.assertGreaterEqual(score_end, score_start)
            self.assertEqual(entries_end, entries_start)

        # While each iteration may or may not increase the score, we expect the
        # score to have strictly increased from before the loop started.
        loop_end = self.get_stat(stat.conn.cache_lookaside_score)
        self.assertGreater(loop_end, loop_start)

        # Now move oldest timestamp forward and insert a couple large updates
        # but we should see the score drop because we allowed the history to move.
        # By moving the oldest after updating we should see the score drop
        # to zero.
        score_start = loop_end
        self.conn.set_timestamp('stable_timestamp=' +
                                timestamp_str(self.stable))
        self.conn.set_timestamp('oldest_timestamp=' +
                                timestamp_str(self.stable))
        for i in range(9, 11):
            bigvalue2 = valstr[i].encode() * 50
            self.pr("Update iteration with oldest: " + str(i) + " Value: " +
                    str(bigvalue2))
            self.large_updates(self.session, uri, bigvalue2, ds, nrows, nrows)
            self.conn.set_timestamp('stable_timestamp=' +
                                    timestamp_str(self.stable))
            self.conn.set_timestamp('oldest_timestamp=' +
                                    timestamp_str(self.stable))
            self.stable += nrows
        score_end = self.get_stat(stat.conn.cache_lookaside_score)
        self.assertLess(score_end, score_start)
        self.assertEqual(score_end, 0)
예제 #30
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri = "table:rollback_to_stable23"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)

        # Perform a combination of modifies and updates.
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, self.prepare, 30)
        self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 40)
        self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 50)
        self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modR, uri, nrows, None, 40)
        self.check(value_modS, uri, nrows, None, 50)
        self.check(value_modT, uri, nrows, None, 60)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Checkpoint the database.
        self.session.checkpoint()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check_with_set_key(ds, value_a, uri, nrows, 20)
        self.check_with_set_key(ds, value_modQ, uri, nrows, 30)
        self.check_with_set_key(ds, value_modR, uri, nrows, 40)
        self.check_with_set_key(ds, value_modS, uri, nrows, 50)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(hs_restore_updates, nrows)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(hs_removed, nrows)