Exemple #1
0
    def backup_check(self, check_value, expected_ts_log, expected_ts_nolog):
        newdir = "BACKUP"
        copy_wiredtiger_home('.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        cur_ts_log = session.open_cursor(self.table_ts_log)
        cur_ts_nolog = session.open_cursor(self.table_ts_nolog)
        # Count how many times the given value is present in the
        # logged timestamp table.
        actual_ts_log = 0
        for k, v in cur_ts_log:
            if check_value == v:
                actual_ts_log += 1
        cur_ts_log.close()
        # Count how many times the given value is present in the
        # not logged timestamp table
        actual_ts_nolog = 0
        for k, v in cur_ts_nolog:
            if check_value == v:
                actual_ts_nolog += 1
        cur_ts_nolog.close()
        conn.close()
        self.assertEqual(actual_ts_log, expected_ts_log)
        self.assertEqual(actual_ts_nolog, expected_ts_nolog)
Exemple #2
0
    def test_bug018(self):
        '''Test closing multiple tables'''

        self.close_conn()
        subdir = 'SUBPROCESS'
        [ignore_result, new_home_dir] = self.run_subprocess_function(
            subdir, 'test_bug018.test_bug018.subprocess_bug018')

        # Make a backup for forensics in case something goes wrong.
        backup_dir = 'BACKUP'
        copy_wiredtiger_home(self, new_home_dir, backup_dir, True)

        # After reopening and running recovery both tables should be in
        # sync even though table 1 was successfully written and table 2
        # had an error on close.
        self.open_conn(new_home_dir)

        results1 = list(self.session.open_cursor(self.uri1))

        # It's possible the second table can't even be opened.
        # That can happen only if the root page was not pushed out.
        # We can't depend on the text of a particular error message to be
        # emitted, so we'll just ignore the error.
        self.captureerr.check(self)  # check there is no error output so far
        try:
            results2 = list(self.session.open_cursor(self.uri2))
        except:
            # Make sure there's some error, but we don't care what.
            self.captureerr.checkAdditionalPattern(self, '.')
            results2 = []
        self.assertEqual(results1, results2)
Exemple #3
0
    def test_compress02(self):
        ds = SimpleDataSet(self,
                           self.uri,
                           0,
                           key_format="S",
                           value_format="S",
                           config='block_compressor=zstd')
        ds.populate()
        valuea = "aaaaa" * 100

        cursor = self.session.open_cursor(self.uri)
        self.large_updates(self.uri, valuea, ds, self.nrows)

        self.check(valuea, self.uri, self.nrows)
        self.session.checkpoint()

        #Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Close the connection and reopen it with a different zstd compression level configuration.
        restart_config = 'builtin_extension_config={zstd={compression_level=9}},cache_size=10MB'
        self.close_conn()
        self.reopen_conn("RESTART", restart_config)

        # Open the new directory.
        self.session = self.setUpSessionOpen(self.conn)

        # Check the table contains the last checkpointed value.
        self.check(valuea, self.uri, self.nrows)
    def test_modify_smoke_recover(self):
        # Close the original database.
        self.conn.close()

        # Open a new database with logging configured.
        self.conn_config = \
            'log=(enabled=true),transaction_sync=(method=dsync,enabled)'
        self.conn = self.setUpConnectionOpen(".")
        self.session = self.setUpSessionOpen(self.conn)

        # Populate a database, and checkpoint it so it exists after recovery.
        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()
        self.session.checkpoint()
        self.modify_load(ds, False)

        # Crash and recover in a new directory.
        newdir = 'RESTART'
        copy_wiredtiger_home('.', newdir)
        self.conn.close()
        self.conn = self.setUpConnectionOpen(newdir)
        self.session = self.setUpSessionOpen(self.conn)
        self.session.verify(self.uri)

        self.modify_confirm(ds, False)
Exemple #5
0
    def backup_check(self, check_value, expected_ts_log, expected_ts_nolog):
        newdir = "BACKUP"
        copy_wiredtiger_home('.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        cur_ts_log      = session.open_cursor(self.table_ts_log)
        cur_ts_nolog    = session.open_cursor(self.table_ts_nolog)
        # Count how many times the given value is present in the
        # logged timestamp table.
        actual_ts_log = 0
        for k, v in cur_ts_log:
            if check_value == v:
                actual_ts_log += 1
        cur_ts_log.close()
        # Count how many times the given value is present in the
        # not logged timestamp table
        actual_ts_nolog = 0
        for k, v in cur_ts_nolog:
            if check_value == v:
                actual_ts_nolog += 1
        cur_ts_nolog.close()
        conn.close()
        self.assertEqual(actual_ts_log, expected_ts_log)
        self.assertEqual(actual_ts_nolog, expected_ts_nolog)
Exemple #6
0
 def check_unclean(self):
     backup = "WT_COPYDIR"
     copy_wiredtiger_home(self, self.home, backup, True)
     msg = '/needs recovery/'
     #   2. an unclean shutdown and reopening readonly
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: self.wiredtiger_open(backup, self.conn_params_rd), msg)
    def test_checkpoint_snapshot(self):

        ds = SimpleDataSet(self,
                           self.uri,
                           self.nrows,
                           key_format="S",
                           value_format='u')
        ds.populate()
        value = b"aaaaa" * 100

        sessions = [0] * self.nsessions
        cursors = [0] * self.nsessions

        for j in range(0, self.nsessions):
            sessions[j] = self.conn.open_session()
            cursors[j] = sessions[j].open_cursor(self.uri)
            sessions[j].begin_transaction('isolation=snapshot')

            start = (j * self.nkeys)
            end = start + self.nkeys

            for i in range(start, end):
                cursors[j].set_key(ds.key(self.nrows + i))
                cursors[j].set_value(value)
                self.assertEquals(cursors[j].insert(), 0)

        session_p2 = self.conn.open_session()
        session_p2.checkpoint()

        #Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(".", "RESTART")

        # Open the new directory.
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
Exemple #8
0
 def check_unclean(self):
     backup = "WT_COPYDIR"
     copy_wiredtiger_home(self.home, backup, True)
     msg = '/needs recovery/'
     #   2. an unclean shutdown and reopening readonly
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: self.wiredtiger_open(backup, self.conn_params_rd), msg)
Exemple #9
0
    def test_bug018(self):
        '''Test closing multiple tables'''

        self.close_conn()
        subdir = 'SUBPROCESS'
        [ignore_result, new_home_dir] = self.run_subprocess_function(
            subdir, 'test_bug018.test_bug018.subprocess_bug018')

        # Make a backup for forensics in case something goes wrong.
        backup_dir = 'BACKUP'
        copy_wiredtiger_home(new_home_dir, backup_dir, True)

        # After reopening and running recovery both tables should be in
        # sync even though table 1 was successfully written and table 2
        # had an error on close.
        self.open_conn(new_home_dir)

        results1 = list(self.session.open_cursor(self.uri1))

        # It's possible the second table can't even be opened.
        # That can happen only if the root page was not pushed out.
        # So if we get an error, make sure we're getting the right
        # error message.

        self.captureerr.check(self)  # check error messages until now
        try:
            results2 = list(self.session.open_cursor(self.uri2))
        except:
            self.captureerr.checkAdditionalPattern(self,
                                                   'unable to read root page')
            results2 = []
        self.assertEqual(results1, results2)
Exemple #10
0
    def test_modify_smoke_recover(self):
        # Close the original database.
        self.conn.close()

        # Open a new database with logging configured.
        self.conn_config = \
            'log=(enabled=true),transaction_sync=(method=dsync,enabled)'
        self.conn = self.setUpConnectionOpen(".")
        self.session = self.setUpSessionOpen(self.conn)

        # Populate a database, and checkpoint it so it exists after recovery.
        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()
        self.session.checkpoint()
        self.modify_load(ds, False)

        # Crash and recover in a new directory.
        newdir = 'RESTART'
        copy_wiredtiger_home('.', newdir)
        self.conn.close()
        self.conn = self.setUpConnectionOpen(newdir)
        self.session = self.setUpSessionOpen(self.conn)
        self.session.verify(self.uri)

        self.modify_confirm(ds, False)
    def test_checkpoint_snapshot(self):

        ds = SimpleDataSet(self, self.uri, 0, key_format="S", value_format="S",config='log=(enabled=false)')
        ds.populate()
        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100
        valuec = "ccccc" * 100
        valued = "ddddd" * 100

        cursor = self.session.open_cursor(self.uri)
        self.large_updates(self.uri, valuea, ds, self.nrows)

        self.check(valuea, self.uri, self.nrows)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows, self.nrows*2):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valuea)
            self.assertEqual(cursor1.insert(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        #Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Open the new directory.
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)

        # Check the table contains the last checkpointed value.
        self.check(valuea, self.uri, self.nrows)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(pages_visited, 0)
Exemple #12
0
    def check_crash_restart(self, olddir, newdir):
        ''' Simulate a crash from olddir and restart in newdir. '''
        # with the connection still open, copy files to new directory
        copy_wiredtiger_home(olddir, newdir)

        # Open the new directory
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        session.verify(self.uri)
        conn.close()
Exemple #13
0
    def check_crash_restart(self, olddir, newdir):
        ''' Simulate a crash from olddir and restart in newdir. '''
        # with the connection still open, copy files to new directory
        copy_wiredtiger_home(olddir, newdir)

        # Open the new directory
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        session.verify(self.uri)
        conn.close()
Exemple #14
0
    def test_backup25(self):
        log2 = "WiredTigerLog.0000000002"

        # Create a logged table.
        self.session.create(self.uri, self.config_log)

        # Insert small amounts of data at a time stopping just after we
        # cross into log file 2.
        while not os.path.exists(log2):
            self.add_data(self.uri, 'key', 'value')

        self.session.checkpoint()
        # Add more data after the checkpoint.
        c = self.session.open_cursor(self.uri)
        c["newkey"] = "newvalue"
        c.close()

        # Open the backup cursor and then add new data to the table.
        bkup_c = self.session.open_cursor('backup:', None, None)

        # Add new data twice and checkpoint to have later checkpoints after the backup
        # cursor is open. Add an uncheckpointed but logged modification too.
        c = self.session.open_cursor(self.uri)
        c["bkupkey1"] = "bkupvalue1"
        c.close()
        self.session.checkpoint()
        c = self.session.open_cursor(self.uri)
        c["bkupkey2"] = "bkupvalue2"
        c.close()
        self.session.checkpoint()
        c = self.session.open_cursor(self.uri)
        c["bkupkey3"] = "bkupvalue3"
        c.close()

        # Make sure any data log records are on disk.
        self.session.log_flush('sync=on')

        # Make a copy of the database to another directory to restart after the "crash"
        # with the backup cursor open.
        os.mkdir(self.dir)
        copy_wiredtiger_home(self, '.', self.dir)
        bkup_c.close()

        # Open the new directory and verify we can see the data after the backup cursor was opened.
        with self.expectedStdoutPattern(
                'Both WiredTiger.turtle and WiredTiger.backup exist.*'):
            new_conn = self.wiredtiger_open(self.dir)
        new_sess = new_conn.open_session()
        c = new_sess.open_cursor(self.uri)
        self.assertEqual(c["bkupkey1"], "bkupvalue1")
        self.assertEqual(c["bkupkey2"], "bkupvalue2")
        self.assertEqual(c["bkupkey3"], "bkupvalue3")
        c.close()

        new_conn.close()
Exemple #15
0
    def backup_check(self, check_value, expected_ts_log, expected_ts_nolog,
                     expected_nots_log, expected_nots_nolog):

        newdir = "BACKUP"
        copy_wiredtiger_home(self, '.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        cur_ts_log = session.open_cursor(self.uri + self.table_ts_log, None)
        cur_ts_nolog = session.open_cursor(self.uri + self.table_ts_nolog,
                                           None)
        cur_nots_log = session.open_cursor(self.uri + self.table_nots_log,
                                           None)
        cur_nots_nolog = session.open_cursor(self.uri + self.table_nots_nolog,
                                             None)

        # In FLCS the values are bytes, which are numbers, but the tests below are via
        # string inclusion rather than just equality of values. Not sure why that is, but
        # I'm going to assume there's a reason for it and not change things. Compensate.
        if self.value_format == '8t':
            check_value = str(check_value)

        # Count how many times the check_value is present in the
        # logged timestamp table.
        actual_ts_log = 0
        for k, v in cur_ts_log:
            if check_value in str(v):
                actual_ts_log += 1
        cur_ts_log.close()
        # Count how many times the check_value is present in the
        # not logged timestamp table
        actual_ts_nolog = 0
        for k, v in cur_ts_nolog:
            if check_value in str(v):
                actual_ts_nolog += 1
        cur_ts_nolog.close()
        # Count how many times the check_value is present in the
        # logged non-timestamp table.
        actual_nots_log = 0
        for k, v in cur_nots_log:
            if check_value in str(v):
                actual_nots_log += 1
        cur_nots_log.close()
        # Count how many times the check_value is present in the
        # not logged non-timestamp table.
        actual_nots_nolog = 0
        for k, v in cur_nots_nolog:
            if check_value in str(v):
                actual_nots_nolog += 1
        cur_nots_nolog.close()
        conn.close()
        self.assertEqual(actual_ts_log, expected_ts_log)
        self.assertEqual(actual_ts_nolog, expected_ts_nolog)
        self.assertEqual(actual_nots_log, expected_nots_log)
        self.assertEqual(actual_nots_nolog, expected_nots_nolog)
Exemple #16
0
    def durable_check(self, check_value, uri, ds):
        # Simulating recovery.
        newdir = "BACKUP"
        copy_wiredtiger_home(self, '.', newdir, True)
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        cursor = session.open_cursor(uri, None)

        cursor.next()
        self.assertTrue(
            check_value == cursor.get_value(),
            "for key " + str(1) + ", expected " + str(check_value) + ", got " +
            str(cursor.get_value()))
        cursor.close()
        session.close()
        conn.close()
Exemple #17
0
    def backup_check(self, check_value, valcnt, valcnt2, valcnt3):
        newdir = "BACKUP"
        copy_wiredtiger_home(self, '.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        c = session.open_cursor(self.uri + self.tablename, None)
        c2 = session.open_cursor(self.uri + self.tablename2, None)
        c3 = session.open_cursor(self.uri + self.tablename3, None)

        # In FLCS the values are bytes, which are numbers, but the tests below are via
        # string inclusion rather than just equality of values. Not sure why that is, but
        # I'm going to assume there's a reason for it and not change things. Compensate.
        if self.value_format == '8t':
            check_value = str(check_value)

        # Count how many times the second value is present
        count = 0
        for k, v in c:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count += 1
        c.close()
        # Count how many times the second value is present in the
        # non-timestamp table.
        count2 = 0
        for k, v in c2:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count2 += 1
        c2.close()
        # Count how many times the second value is present in the
        # logged timestamp table.
        count3 = 0
        for k, v in c3:
            if check_value in str(v):
                count3 += 1
        c3.close()
        conn.close()
        # print "CHECK BACKUP: Count " + str(count) + " Count2 " + str(count2) + " Count3 " + str(count3)
        # print "CHECK BACKUP: Expect value2 count " + str(valcnt)
        # print "CHECK BACKUP: 2nd table Expect value2 count " + str(valcnt2)
        # print "CHECK BACKUP: 3rd table Expect value2 count " + str(valcnt3)
        self.assertEqual(count, valcnt)
        self.assertEqual(count2, valcnt2)
        self.assertEqual(count3, valcnt3)
Exemple #18
0
    def test_bug018(self):
        '''Test closing multiple tables'''
        basename = 'bug018.'
        baseuri = 'file:' + basename
        c1 = self.create_table(baseuri + '01.wt')
        c2 = self.create_table(baseuri + '02.wt')

        self.session.begin_transaction()
        c1['key'] = 'value'
        c2['key'] = 'value'
        self.session.commit_transaction()

        # Simulate a write failure by closing the file descriptor for the second
        # table out from underneath WiredTiger.  We do this right before
        # closing the connection so that the write error happens during close
        # when writing out the final data.  Allow table 1 to succeed and force
        # an erorr writing out table 2.
        #
        # This is Linux-specific code to figure out the file descriptor.
        for f in os.listdir('/proc/self/fd'):
            try:
                if os.readlink('/proc/self/fd/' + f).endswith(basename +
                                                              '02.wt'):
                    os.close(int(f))
            except OSError:
                pass

        # Expect an error and messages, so turn off stderr checking.
        with self.expectedStderrPattern(''):
            try:
                self.close_conn()
            except wiredtiger.WiredTigerError:
                self.conn = None

        # Make a backup for forensics in case something goes wrong.
        backup_dir = 'BACKUP'
        copy_wiredtiger_home('.', backup_dir, True)

        # After reopening and running recovery both tables should be in
        # sync even though table 1 was successfully written and table 2
        # had an error on close.
        self.open_conn()
        c1 = self.session.open_cursor(baseuri + '01.wt')
        c2 = self.session.open_cursor(baseuri + '02.wt')
        self.assertEqual(list(c1), list(c2))
Exemple #19
0
    def copy_and_restore(self, backup_cursor, last_doc_in_backup,
                         last_doc_in_data):
        log_files_to_copy = 0
        os.mkdir(self.backup_dir)
        if self.all_log_files:
            helper.copy_wiredtiger_home(self, '.', self.backup_dir)
            log_files_copied = [
                x for x in os.listdir(self.backup_dir)
                if x.startswith('WiredTigerLog.')
            ]
            self.assertEqual(len(log_files_copied), 2)
        else:
            while True:
                ret = backup_cursor.next()
                if ret != 0:
                    break
                shutil.copy(backup_cursor.get_key(), self.backup_dir)
                if backup_cursor.get_key().startswith('WiredTigerLog.'):
                    log_files_to_copy += 1

            self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
            self.assertEqual(log_files_to_copy, 1)

        backup_conn = self.wiredtiger_open(self.backup_dir, self.conn_config)
        if self.all_log_files:
            self.captureout.checkAdditionalPattern(
                self, 'Both WiredTiger.turtle and WiredTiger.backup exist.*')

        session = backup_conn.open_session()
        cursor = session.open_cursor(self.uri)

        if self.all_log_files:
            doc_cnt = 0
            for key, val in cursor:
                doc_cnt += 1
                self.assertLessEqual(key, last_doc_in_data)

            self.assertEqual(doc_cnt, last_doc_in_data)
        else:
            doc_cnt = 0
            for key, val in cursor:
                doc_cnt += 1
                self.assertLessEqual(key, last_doc_in_backup)

            self.assertEqual(doc_cnt, last_doc_in_backup)
Exemple #20
0
    def durable_check(self, check_value, uri, ds, nrows):
        # Checkpoint and backup so as to simulate recovery
        self.session.checkpoint()
        newdir = "BACKUP"
        copy_wiredtiger_home('.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        cursor = session.open_cursor(uri, None)
        # Skip the initial rows, which were not updated
        for i in range(0, nrows+1):
            self.assertEquals(cursor.next(), 0)
        #print "Check value : " + str(check_value)
        #print "value : " + str(cursor.get_value())
        self.assertTrue(check_value == cursor.get_value())
        cursor.close()
        session.close()
        conn.close()
Exemple #21
0
    def check_manual_backup(self, i, olddir, newdir):
        ''' Simulate a manual backup from olddir and restart in newdir. '''
        self.session.checkpoint()
        cbkup = self.session.open_cursor('backup:', None, None)

        # With the connection still open, copy files to new directory.
        # Half the time use an unaligned copy.
        aligned = (i % (self.freq * 2) != 0) or os.name == "nt"
        copy_wiredtiger_home(olddir, newdir, aligned)

        # Now simulate fsyncUnlock by closing the backup cursor.
        cbkup.close()

        # Open the new directory and verify
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        session.verify(self.uri)
        conn.close()
Exemple #22
0
    def durable_check(self, check_value, uri, ds, nrows):
        # Checkpoint and backup so as to simulate recovery
        self.session.checkpoint()
        newdir = "BACKUP"
        copy_wiredtiger_home('.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        cursor = session.open_cursor(uri, None)
        # Skip the initial rows, which were not updated
        for i in range(0, nrows + 1):
            self.assertEquals(cursor.next(), 0)
        #print "Check value : " + str(check_value)
        #print "value : " + str(cursor.get_value())
        self.assertTrue(check_value == cursor.get_value())
        cursor.close()
        session.close()
        conn.close()
Exemple #23
0
    def check_manual_backup(self, i, olddir, newdir):
        ''' Simulate a manual backup from olddir and restart in newdir. '''
        self.session.checkpoint()
        cbkup = self.session.open_cursor('backup:', None, None)

        # With the connection still open, copy files to new directory.
        # Half the time use an unaligned copy.
        aligned = (i % (self.freq * 2) != 0) or os.name == "nt"
        copy_wiredtiger_home(olddir, newdir, aligned)

        # Now simulate fsyncUnlock by closing the backup cursor.
        cbkup.close()

        # Open the new directory and verify
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        session.verify(self.uri)
        conn.close()
Exemple #24
0
    def test_bug014(self):
        # Populate a table with 1000 keys on small pages.
        uri = 'table:test_bug014'
        ds = SimpleDataSet(self,
                           uri,
                           1000,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config='allocation_size=512,leaf_page_max=512')

        ds.populate()

        # Reopen it so we can fast-delete pages.
        self.reopen_conn()

        # Truncate a chunk of the key/value pairs inside a transaction.
        self.session.begin_transaction(None)
        start = self.session.open_cursor(uri, None)
        start.set_key(ds.key(250))
        end = self.session.open_cursor(uri, None)
        end.set_key(ds.key(500))
        self.session.truncate(None, start, end, None)
        start.close()
        end.close()

        # With the truncation uncommitted, checkpoint the database.
        ckpt_session = self.conn.open_session()
        ckpt_session.checkpoint(None)
        ckpt_session.close()

        # Simulate a crash by copying to a new directory.
        copy_wiredtiger_home(self, ".", "RESTART")

        # Open the new directory.
        conn = self.setUpConnectionOpen("RESTART")
        session = self.setUpSessionOpen(conn)
        cursor = session.open_cursor(uri)

        # Confirm all of the records are there.
        for i in range(1, 1001):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.search(), 0)

        conn.close()
Exemple #25
0
    def test_bug018(self):
        '''Test closing multiple tables'''
        basename = 'bug018.'
        baseuri = 'file:' + basename
        c1 = self.create_table(baseuri + '01.wt')
        c2 = self.create_table(baseuri + '02.wt')

        self.session.begin_transaction()
        c1['key'] = 'value'
        c2['key'] = 'value'
        self.session.commit_transaction()

        # Simulate a write failure by closing the file descriptor for the second
        # table out from underneath WiredTiger.  We do this right before
        # closing the connection so that the write error happens during close
        # when writing out the final data.  Allow table 1 to succeed and force
        # an error writing out table 2.
        #
        # This is Linux-specific code to figure out the file descriptor.
        for f in os.listdir('/proc/self/fd'):
            try:
                if os.readlink('/proc/self/fd/' + f).endswith(basename + '02.wt'):
                    os.close(int(f))
            except OSError:
                pass

        # Expect an error and messages, so turn off stderr checking.
        with self.expectedStderrPattern(''):
            try:
                self.close_conn()
            except wiredtiger.WiredTigerError:
                self.conn = None

        # Make a backup for forensics in case something goes wrong.
        backup_dir = 'BACKUP'
        copy_wiredtiger_home('.', backup_dir, True)

        # After reopening and running recovery both tables should be in
        # sync even though table 1 was successfully written and table 2
        # had an error on close.
        self.open_conn()
        c1 = self.session.open_cursor(baseuri + '01.wt')
        c2 = self.session.open_cursor(baseuri + '02.wt')
        self.assertEqual(list(c1), list(c2))
Exemple #26
0
    def test_export(self):
        uri_a = self.type + "exporta"
        uri_b = self.type + "exportb"
        uri_c = self.type + "exportc"

        # Create a few tables.
        self.session.create(uri_a)
        self.session.create(uri_b)
        self.session.create(uri_c)

        # Insert some records.
        c1 = self.session.open_cursor(uri_a)
        c1["k1"] = "v1"
        c1.close()

        c2 = self.session.open_cursor(uri_b)
        c2["k2"] = "v2"
        c2.close()

        c3 = self.session.open_cursor(uri_c)
        c3["k3"] = "v3"
        c3.close()

        self.session.checkpoint()

        if self.is_tiered_scenario():
            self.session.flush_tier(None)

        # Open a special backup cursor for export operation.
        export_cursor = self.session.open_cursor('backup:export', None, None)

        os.mkdir(self.dir)
        copy_wiredtiger_home(self, '.', self.dir)
        self.assertTrue(os.path.isfile("WiredTiger.export"))

        export_cursor.close()

        # The export file should be removed from the home directory.
        self.assertFalse(os.path.isfile("WiredTiger.export"))

        # The export file should exist in the backup directory.
        self.assertTrue(
            os.path.isfile(os.path.join(self.dir, "WiredTiger.export")))
    def check_manual_backup(self, i, olddir, newdir):
        ''' Simulate a manual backup from olddir and restart in newdir. '''
        self.session.checkpoint()
        cbkup = self.session.open_cursor('backup:', None, None)

        # With the connection still open, copy files to new directory.
        # Half the time use an unaligned copy.
        even = i % (self.freq * 2) == 0
        aligned = even or os.name == "nt"
        copy_wiredtiger_home(olddir, newdir, aligned)

        # Half the time try to rename a table and the other half try
        # to remove a table.  They should fail.
        if not even:
            self.assertRaises(wiredtiger.WiredTigerError,
                lambda: self.session.rename(
                self.emptyuri, self.newuri, None))
        else:
            self.assertRaises(wiredtiger.WiredTigerError,
                lambda: self.session.drop(self.emptyuri, None))

        # Now simulate fsyncUnlock by closing the backup cursor.
        cbkup.close()

        # Once the backup cursor is closed we should be able to perform
        # schema operations.  Test that and then reset the files to their
        # expected initial names.
        if not even:
            self.session.rename(self.emptyuri, self.newuri, None)
            self.session.drop(self.newuri, None)
            self.session.create(self.emptyuri, self.create_params)
        else:
            self.session.drop(self.emptyuri, None)
            self.session.create(self.emptyuri, self.create_params)


        # Open the new directory and verify
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        session.verify(self.uri)
        conn.close()
Exemple #28
0
    def backup_check(self, check_value, valcnt, valcnt2, valcnt3):
        newdir = "BACKUP"
        copy_wiredtiger_home('.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        c = session.open_cursor(self.uri + self.tablename, None)
        c2 = session.open_cursor(self.uri + self.tablename2, None)
        c3 = session.open_cursor(self.uri + self.tablename3, None)
        # Count how many times the second value is present
        count = 0
        for k, v in c:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count += 1
        c.close()
        # Count how many times the second value is present in the
        # non-timestamp table.
        count2 = 0
        for k, v in c2:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count2 += 1
        c2.close()
        # Count how many times the second value is present in the
        # logged timestamp table.
        count3 = 0
        for k, v in c3:
            if check_value in str(v):
                count3 += 1
        c3.close()
        conn.close()
        # print "CHECK BACKUP: Count " + str(count) + " Count2 " + str(count2) + " Count3 " + str(count3)
        # print "CHECK BACKUP: Expect value2 count " + str(valcnt)
        # print "CHECK BACKUP: 2nd table Expect value2 count " + str(valcnt2)
        # print "CHECK BACKUP: 3rd table Expect value2 count " + str(valcnt3)
        # print "CHECK BACKUP: config " + str(self.ckptcfg)
        self.assertEqual(count, valcnt)
        self.assertEqual(count2, valcnt2)
        self.assertEqual(count3, valcnt3)
Exemple #29
0
    def copy_and_restore(self, backup_cursor, last_doc_in_backup, last_doc_in_data):
        log_files_to_copy = 0
        os.mkdir(self.backup_dir)
        if self.all_log_files:
            helper.copy_wiredtiger_home('.', self.backup_dir)
            log_files_copied = filter(lambda x: x.startswith('WiredTigerLog.'), os.listdir(self.backup_dir))
            self.assertEqual(len(log_files_copied), 2)
        else:
            while True:
                ret = backup_cursor.next()
                if ret != 0:
                    break
                shutil.copy(backup_cursor.get_key(), self.backup_dir)
                if backup_cursor.get_key().startswith('WiredTigerLog.'):
                    log_files_to_copy += 1

            self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
            self.assertEqual(log_files_to_copy, 1)

        backup_conn = self.wiredtiger_open(self.backup_dir, self.conn_config)
        if self.all_log_files:
            self.captureout.checkAdditionalPattern(self, 'Both WiredTiger.turtle and WiredTiger.backup exist.*')

        session = backup_conn.open_session()
        cursor = session.open_cursor(self.uri)

        if self.all_log_files:
            doc_cnt = 0
            for key, val in cursor:
                doc_cnt += 1
                self.assertLessEqual(key, last_doc_in_data)

            self.assertEqual(doc_cnt, last_doc_in_data)
        else:
            doc_cnt = 0
            for key, val in cursor:
                doc_cnt += 1
                self.assertLessEqual(key, last_doc_in_backup)

            self.assertEqual(doc_cnt, last_doc_in_backup)
Exemple #30
0
    def check_manual_backup(self, i, olddir, newdir):
        ''' Simulate a manual backup from olddir and restart in newdir. '''
        self.session.checkpoint()
        cbkup = self.session.open_cursor('backup:', None, None)

        # With the connection still open, copy files to new directory.
        # Half the time use an unaligned copy.
        even = i % (self.freq * 2) == 0
        aligned = even or os.name == "nt"
        copy_wiredtiger_home(self, olddir, newdir, aligned)

        # Half the time try to rename a table and the other half try
        # to remove a table.  They should fail.
        if not even:
            self.assertRaises(
                wiredtiger.WiredTigerError,
                lambda: self.session.rename(self.emptyuri, self.newuri, None))
        else:
            self.assertRaises(wiredtiger.WiredTigerError,
                              lambda: self.session.drop(self.emptyuri, None))

        # Now simulate fsyncUnlock by closing the backup cursor.
        cbkup.close()

        # Once the backup cursor is closed we should be able to perform
        # schema operations.  Test that and then reset the files to their
        # expected initial names.
        if not even:
            self.session.rename(self.emptyuri, self.newuri, None)
            self.session.drop(self.newuri, None)
            self.session.create(self.emptyuri, self.create_params)
        else:
            self.session.drop(self.emptyuri, None)
            self.session.create(self.emptyuri, self.create_params)

        # Open the new directory and verify
        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        session.verify(self.uri)
        conn.close()
Exemple #31
0
    def backup_check(self, check_value, valcnt, valcnt2, valcnt3):
        newdir = "BACKUP"
        copy_wiredtiger_home('.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        c = session.open_cursor(self.uri + self.tablename, None)
        c2 = session.open_cursor(self.uri + self.tablename2, None)
        c3 = session.open_cursor(self.uri + self.tablename3, None)
        # Count how many times the second value is present
        count = 0
        for k, v in c:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count += 1
        c.close()
        # Count how many times the second value is present in the
        # non-timestamp table.
        count2 = 0
        for k, v in c2:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count2 += 1
        c2.close()
        # Count how many times the second value is present in the
        # logged timestamp table.
        count3 = 0
        for k, v in c3:
            if check_value in str(v):
                count3 += 1
        c3.close()
        conn.close()
        # print "CHECK BACKUP: Count " + str(count) + " Count2 " + str(count2) + " Count3 " + str(count3)
        # print "CHECK BACKUP: Expect value2 count " + str(valcnt)
        # print "CHECK BACKUP: 2nd table Expect value2 count " + str(valcnt2)
        # print "CHECK BACKUP: 3rd table Expect value2 count " + str(valcnt3)
        # print "CHECK BACKUP: config " + str(self.ckptcfg)
        self.assertEqual(count, valcnt)
        self.assertEqual(count2, valcnt2)
        self.assertEqual(count3, valcnt3)
Exemple #32
0
    def test_bug014(self):
        # Populate a table with 1000 keys on small pages.
        uri = 'table:test_bug014'
        ds = SimpleDataSet(self, uri, 1000,
                           config='allocation_size=512,leaf_page_max=512')
        ds.populate()

        # Reopen it so we can fast-delete pages.
        self.reopen_conn()

        # Truncate a chunk of the key/value pairs inside a transaction.
        self.session.begin_transaction(None)
        start = self.session.open_cursor(uri, None)
        start.set_key(ds.key(250))
        end = self.session.open_cursor(uri, None)
        end.set_key(ds.key(500))
        self.session.truncate(None, start, end, None)
        start.close()
        end.close()

        # With the truncation uncommitted, checkpoint the database.
        ckpt_session = self.conn.open_session()
        ckpt_session.checkpoint(None)
        ckpt_session.close()

        # Simulate a crash by copying to a new directory.
        copy_wiredtiger_home(".", "RESTART")

        # Open the new directory.
        conn = self.setUpConnectionOpen("RESTART")
        session = self.setUpSessionOpen(conn)
        cursor = session.open_cursor(uri)

        # Confirm all of the records are there.
        for i in range(1, 1001):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.search(), 0)

        conn.close()
Exemple #33
0
    def test_export_restart(self):
        uri_a = self.type + "exporta"
        uri_b = self.type + "exportb"
        uri_c = self.type + "exportc"

        # Create two tables.
        self.session.create(uri_a)
        self.session.create(uri_b)

        # Insert some records.
        c4 = self.session.open_cursor(uri_a)
        c4["k4"] = "v4"
        c4.close()

        c5 = self.session.open_cursor(uri_b)
        c5["k5"] = "v5"
        c5.close()

        self.session.checkpoint()

        if self.is_tiered_scenario():
            self.session.flush_tier(None)

        # Open a special backup cursor for export operation.
        main_cursor = self.session.open_cursor('backup:export', None, None)

        # Copy the file so that we have more information if WT-9203 ever happens again.
        shutil.copyfile('WiredTiger.export', 'WiredTiger.export.original')

        # Copy the main database to another directory, including the WiredTiger.export file.
        os.mkdir(self.dir)
        copy_wiredtiger_home(self, '.', self.dir)

        main_cursor.close()
        self.close_conn()

        # Open a connection and session on the directory copy.
        self.conn = self.setUpConnectionOpen(self.dir)
        self.session = self.setUpSessionOpen(self.conn)

        # Create a third table and drop the second table.
        self.session.create(uri_c)
        c6 = self.session.open_cursor(uri_c)
        c6["k6"] = "k6"
        c6.close()

        self.session.checkpoint()

        if self.is_tiered_scenario():
            self.session.flush_tier(None)

        self.session.drop(uri_b)

        # Open an export cursor on the database copy.
        wt_export_path = os.path.join(self.dir, "WiredTiger.export")
        export_cursor = self.session.open_cursor('backup:export', None, None)

        # Copy the file so that we have more information if WT-9203 ever happens again.
        shutil.copyfile(wt_export_path,
                        os.path.join(self.dir, "WiredTiger.export.backup"))

        self.assertTrue(os.path.isfile(wt_export_path))

        # The information for the third table should exist in the WiredTiger.export file
        # but the information for the second table should not exist in the file.
        with open(wt_export_path, "r") as export_file:
            export_file_string = export_file.read()
            self.assertFalse("exportb" in export_file_string)
            self.assertTrue("exportc" in export_file_string)

        export_cursor.close()
Exemple #34
0
    def test_rollback_to_stable(self):
        nrows = 10

        # Create two tables.
        uri_1 = "table:rollback_to_stable35_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format=self.key_format,
                             value_format=self.value_format)
        ds_1.populate()

        uri_2 = "table:rollback_to_stable35_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format=self.key_format,
                             value_format=self.value_format)
        ds_2.populate()

        if self.value_format == '8t':
            valuea = 97
            valueb = 98
            valuec = 99
        else:
            valuea = "aaaaa" * 100
            valueb = "bbbbb" * 100
            valuec = "ccccc" * 100

        self.large_updates(uri_1, uri_2, valuea, ds_1, ds_2, nrows)
        self.check(valuea, uri_1, uri_2, nrows)

        # Start a long running transaction and keep it open.
        session_2 = self.conn.open_session()
        session_2.begin_transaction()

        self.large_updates(uri_1, uri_2, valueb, ds_1, ds_2, nrows)
        self.check(valueb, uri_1, uri_2, nrows)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            self.large_updates(uri_1, uri_2, valuec, ds_1, ds_2, nrows)
            self.check(valuec, uri_1, uri_2, nrows)

            # Evict the data.
            self.evict_cursor(uri_1, nrows, valuec)

            # Wait for checkpoint stop timing stress to copy the database.
            ckpt_stop_timing_stress = 0
            while not ckpt_stop_timing_stress:
                time.sleep(1)
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_stop_timing_stress = stat_cursor[
                    stat.conn.txn_checkpoint_stop_stress_active][2]
                stat_cursor.close()

            copy_wiredtiger_home(self, '.', "RESTART")

        finally:
            done.set()
            ckpt.join()
        self.session.checkpoint()

        # Clear all running transactions before rollback to stable.
        session_2.commit_transaction()
        session_2.close()

        # Open the new directory
        self.close_conn()
        self.conn_config = 'cache_size=50MB,statistics=(all),log=(enabled)'
        conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(conn)

        self.check(valuec, uri_1, uri_2, nrows)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertEqual(pages_visited, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(hs_removed, 0)
Exemple #35
0
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table without logging.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(
            self, uri_1, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(
            self, uri_2, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Here's the update operations we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def prepare_range_updates(session, cursor, ds, value, nrows, prepare_config):
            self.pr("updates")
            for i in range(1, nrows):
                key = ds.key(i)
                cursor.set_key(key)
                cursor.set_value(value)
                self.assertEquals(cursor.update(), 0)
            self.pr("prepare")
            session.prepare_transaction(prepare_config)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction('isolation=snapshot')
            self.retry_rollback('update ds1', session_p1,
                           lambda: prepare_range_updates(
                               session_p1, cursor_p1, ds_1, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_1, nrows, value_a)

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction('isolation=snapshot')
            self.retry_rollback('update ds2', session_p2,
                           lambda: prepare_range_updates(
                               session_p2, cursor_p2, ds_2, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_2, nrows, value_a)
        finally:
            done.set()
            ckpt.join()

        # Check that the history store file has been used and has non-zero size before the simulated
        # crash.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # The history store file size should be greater than zero after the restart.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
Exemple #36
0
    def prepare_updates(self, ds, nrows, nsessions, nkeys):
        # Insert some records with commit timestamp, corrupt file and call salvage, verify before checkpoint.

        # Commit some updates to get eviction and history store fired up
        commit_value = b"bbbbb" * 100
        cursor = self.session.open_cursor(self.uri)
        for i in range(1, nsessions * nkeys):
            self.session.begin_transaction('isolation=snapshot')
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(commit_value)
            self.assertEquals(cursor.insert(), 0)
            self.session.commit_transaction('commit_timestamp=' + timestamp_str(1))
        cursor.close()

        # Corrupt the table, Call salvage to recover data from the corrupted table and call verify
        self.corrupt_salvage_verify()

        # Call checkpoint
        self.session.checkpoint()

        hs_writes_start = self.get_stat(stat.conn.cache_write_hs)

        # Have prepared updates in multiple sessions. This should ensure writing
        # prepared updates to the history store
        sessions = [0] * nsessions
        cursors = [0] * nsessions
        prepare_value = b"ccccc" * 100
        for j in range (0, nsessions):
            sessions[j] = self.conn.open_session()
            sessions[j].begin_transaction('isolation=snapshot')
            cursors[j] = sessions[j].open_cursor(self.uri)
            # Each session will update many consecutive keys.
            start = (j * nkeys)
            end = start + nkeys
            for i in range(start, end):
                cursors[j].set_key(ds.key(nrows + i))
                cursors[j].set_value(prepare_value)
                self.assertEquals(cursors[j].insert(), 0)
            sessions[j].prepare_transaction('prepare_timestamp=' + timestamp_str(4))

        hs_writes = self.get_stat(stat.conn.cache_write_hs) - hs_writes_start

        # Assert if not writing anything to the history store.
        self.assertGreaterEqual(hs_writes, 0)

        # Test if we can read prepared updates from the history store.
        cursor = self.session.open_cursor(self.uri)
        self.session.begin_transaction('read_timestamp=' + timestamp_str(3))
        for i in range(1, nsessions * nkeys):
            cursor.set_key(ds.key(nrows + i))
            # The search should pass.
            self.assertEqual(cursor.search(), 0)
            # Correctness Test - commit_value should be visible
            self.assertEquals(cursor.get_value(), commit_value)
            # Correctness Test - prepare_value should NOT be visible
            self.assertNotEquals(cursor.get_value(), prepare_value)
        cursor.close()

        # Close all cursors and sessions, this will cause prepared updates to be
        # rollback-ed
        for j in range (0, nsessions):
            cursors[j].close()
            sessions[j].close()

        # Corrupt the table, Call salvage to recover data from the corrupted table and call verify
        self.corrupt_salvage_verify()

        self.session.commit_transaction()
        self.session.checkpoint()

        # Corrupt the table, Call salvage to recover data from the corrupted table and call verify
        self.corrupt_salvage_verify()

        # Finally, search for the keys inserted with commit timestamp
        cursor = self.session.open_cursor(self.uri)
        self.pr('Read Keys')
        self.session.begin_transaction('read_timestamp=' + timestamp_str(4))
        for i in range(1, nkeys):
            cursor.set_key(ds.key(nrows + i))
            # The search should pass
            self.assertEqual(cursor.search(), 0)
            # Correctness Test - commit_value should be visible
            self.assertEquals(cursor.get_value(), commit_value)
        cursor.close()

        self.session.commit_transaction()
        self.session.checkpoint()

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(".", "RESTART")

        # Open the new directory.
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        cursor = self.session.open_cursor(self.uri)

        # Search the keys inserted with commit timestamp after crash
        self.session.begin_transaction('read_timestamp=' + timestamp_str(4))
        for i in range(1, nkeys):
            cursor.set_key(ds.key(nrows + i))
            # The search should pass
            self.assertEqual(cursor.search(), 0)
            # Correctness Test - commit_value should be visible
            self.assertEquals(cursor.get_value(), commit_value)
            # Correctness Test - prepare_value should NOT be visible
            self.assertNotEquals(cursor.get_value(), prepare_value)
        cursor.close()
        self.session.commit_transaction()

        # After simulating a crash, corrupt the table, call salvage to recover data from the corrupted table
        # and call verify
        self.corrupt_salvage_verify()
Exemple #37
0
    def test_truncate09(self):
        # Create a large table with lots of pages.
        uri = "table:test_truncate09"
        format = 'key_format={},value_format=S'.format(self.key_format)
        self.session.create(uri,
                            'allocation_size=512,leaf_page_max=512,' + format)

        cursor = self.session.open_cursor(uri)
        for i in range(1, 80000):
            cursor[simple_key(cursor, i)] = simple_value(cursor, i)
        cursor.close()

        # Force to disk.
        self.reopen_conn()

        # Set the oldest timestamp and the stable timestamp.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(100))
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(100))

        # Start a transaction.
        self.session.begin_transaction()

        # Truncate a chunk.
        c1 = self.session.open_cursor(uri, None)
        c1.set_key(simple_key(c1, 20000))
        c2 = self.session.open_cursor(uri, None)
        c2.set_key(simple_key(c1, 40000))
        self.session.truncate(None, c1, c2, None)

        # Commit the transaction.
        self.session.timestamp_transaction('commit_timestamp=' +
                                           self.timestamp_str(150))
        self.session.commit_transaction()

        # Move the stable timestamp to make the previous truncate operation permanent.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(200))

        # Checkpoint
        self.session.checkpoint()

        # Start a transaction.
        self.session.begin_transaction()

        # Truncate a chunk.
        c1.set_key(simple_key(c1, 50000))
        c2.set_key(simple_key(c1, 70000))
        self.session.truncate(None, c1, c2, None)

        # Remove a single row.
        c1.set_key(simple_key(c1, 75000))
        c1.remove()

        # Commit the transaction.
        self.session.timestamp_transaction('commit_timestamp=' +
                                           self.timestamp_str(250))
        self.session.commit_transaction()

        # Checkpoint
        self.session.checkpoint()

        # Restart, testing RTS on the copy.
        copy_wiredtiger_home(self, ".", "RESTART")
        simulate_crash_restart(self, ".", "RESTART")

        # Search for a key in the truncated range which is stabilised, hence should not find it.
        cursor = self.session.open_cursor(uri)
        cursor.set_key(simple_key(cursor, 30000))
        self.assertNotEqual(cursor.search(), 0)

        # Search for a key in the truncated range which is not stabilised, hence should find it.
        cursor.set_key(simple_key(cursor, 60000))
        self.assertEqual(cursor.search(), 0)

        # Search for a removed key which is not stabilised, hence should find it.
        cursor.set_key(simple_key(cursor, 75000))
        self.assertEqual(cursor.search(), 0)
Exemple #38
0
    def prepare_updates(self, ds):

        # Set oldest and stable timestamp for the database.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1))
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))

        # Commit some updates to get eviction and history store fired up.
        # Insert a key at timestamp 1.
        commit_key = "C"
        commit_value = b"bbbbb" * 100
        cursor = self.session.open_cursor(self.uri)
        for i in range(1, self.nsessions * self.nkeys):
            self.session.begin_transaction('isolation=snapshot')
            key = commit_key + ds.key(self.nrows + i)
            cursor.set_key(key)
            cursor.set_value(commit_value)
            self.assertEquals(cursor.insert(), 0)
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(1))
        cursor.close()

        # Call checkpoint.
        self.session.checkpoint()

        cursor = self.session.open_cursor(self.uri)
        for i in range(1, self.nsessions * self.nkeys):
            self.session.begin_transaction('isolation=snapshot')
            key = commit_key + ds.key(self.nrows + i)
            cursor.set_key(key)
            self.assertEquals(cursor.remove(), 0)
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(10))
        cursor.close()

        # Move the stable timestamp to match the timestamp for the last update.
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(10))

        hs_writes_start = self.get_stat(stat.conn.cache_write_hs)
        # Have prepared updates in multiple sessions. This should ensure writing prepared updates to
        # the data store. Insert the same key at timestamp 20, but with prepare updates.
        sessions = [0] * self.nsessions
        cursors = [0] * self.nsessions
        prepare_value = b"ccccc" * 100
        for j in range(0, self.nsessions):
            sessions[j] = self.conn.open_session()
            sessions[j].begin_transaction('isolation=snapshot')
            cursors[j] = sessions[j].open_cursor(self.uri)
            # Each session will update many consecutive keys.
            start = (j * self.nkeys)
            end = start + self.nkeys
            for i in range(start, end):
                cursors[j].set_key(commit_key + ds.key(self.nrows + i))
                cursors[j].set_value(prepare_value)
                self.assertEquals(cursors[j].insert(), 0)
            sessions[j].prepare_transaction('prepare_timestamp=' +
                                            timestamp_str(20))

        hs_writes = self.get_stat(stat.conn.cache_write_hs) - hs_writes_start
        # Assert if not writing anything to the history store.
        self.assertGreaterEqual(hs_writes, 0)

        txn_config = 'read_timestamp=' + timestamp_str(
            5) + ',ignore_prepare=true'
        # Search keys with timestamp 5, ignore_prepare=true and expect the cursor search to return 0 (key found)
        self.search_keys_timestamp_and_ignore(ds, txn_config, commit_value)

        txn_config = 'read_timestamp=' + timestamp_str(
            20) + ',ignore_prepare=true'
        # Search keys with timestamp 20, ignore_prepare=true, expect the cursor to return wiredtiger.WT_NOTFOUND
        self.search_keys_timestamp_and_ignore(ds, txn_config, None)

        prepare_conflict_msg = '/conflict with a prepared update/'
        txn_config = 'read_timestamp=' + timestamp_str(
            20) + ',ignore_prepare=false'
        # Search keys with timestamp 20, ignore_prepare=false and expect the cursor the cursor search to return prepare conflict message
        self.search_keys_timestamp_and_ignore(ds, txn_config,
                                              prepare_conflict_msg, True)

        # If commit is True then commit the transactions and simulate a crash which would
        # eventualy rollback transactions.
        if self.commit == True:
            # Commit the prepared_transactions with timestamp 30.
            for j in range(0, self.nsessions):
                sessions[j].commit_transaction('commit_timestamp=' +
                                               timestamp_str(30) +
                                               ',durable_timestamp=' +
                                               timestamp_str(30))
            # Move the stable timestamp to match the durable timestamp for prepared updates.
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(30))

        self.session.checkpoint()

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Open the new directory.
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)

        # After simulating a crash, search for the keys inserted. Rollback-to-stable as part of recovery
        # will try restoring values according to the last known stable timestamp.

        # Search keys with timestamp 5, ignore_prepare=true and expect the cursor search to return
        # value committed before prepared update.
        txn_config = 'read_timestamp=' + timestamp_str(
            5) + ',ignore_prepare=false'
        self.search_keys_timestamp_and_ignore(ds, txn_config, commit_value)

        # Search keys with timestamp 20, ignore_prepare=true and expect the cursor search to return
        # WT_NOTFOUND.
        txn_config = 'read_timestamp=' + timestamp_str(
            20) + ',ignore_prepare=true'
        self.search_keys_timestamp_and_ignore(ds, txn_config, None)

        # Search keys with timestamp 20, ignore_prepare=false and expect the cursor search to return WT_NOTFOUND.
        txn_config = 'read_timestamp=' + timestamp_str(
            20) + ',ignore_prepare=false'
        self.search_keys_timestamp_and_ignore(ds, txn_config, None)

        # If commit is true then the commit_tramsactions was called and we will expect prepare_value.
        if self.commit == True:
            txn_config = 'read_timestamp=' + timestamp_str(
                30) + ',ignore_prepare=true'
            # Search keys with timestamp 30, ignore_prepare=true and expect the cursor value to be prepare_value.
            self.search_keys_timestamp_and_ignore(ds, txn_config,
                                                  prepare_value)
        else:
            # Commit is false and we simulated a crash/restart which would have rolled-back the transactions, therefore we expect the
            # cursor search to return WT_NOTFOUND.
            txn_config = 'read_timestamp=' + timestamp_str(
                30) + ',ignore_prepare=true'
            # Search keys with timestamp 30, ignore_prepare=true and expect the cursor value to return WT_NOTFOUND.
            self.search_keys_timestamp_and_ignore(ds, txn_config, None)

        if self.commit == True:
            txn_config = 'read_timestamp=' + timestamp_str(
                30) + ',ignore_prepare=false'
            # Search keys with timestamp 30, ignore_prepare=false and expect the cursor value to be prepare_value.
            self.search_keys_timestamp_and_ignore(ds, txn_config,
                                                  prepare_value)
        else:
            # Commit is false and we simulated a crash/restart which would have rolled-back the transactions, therefore we expect the
            # cursor search to return WT_NOTFOUND.
            txn_config = 'read_timestamp=' + timestamp_str(
                30) + ',ignore_prepare=false'
            # Search keys with timestamp 30, ignore_prepare=false and expect the cursor value to return WT_NOTFOUND.
            self.search_keys_timestamp_and_ignore(ds, txn_config, None)
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table without logging.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100
        value_f = "fffff" * 100

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.session.breakpoint()
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.session.breakpoint()
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Here's the update operation we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def simple_update(cursor, key, value):
            cursor.set_key(key)
            cursor.set_value(value)
            self.assertEquals(cursor.update(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction('isolation=snapshot')
            self.pr("updates 1")
            for i in range(1, nrows):
                retry_rollback(
                    self, 'update ds1',
                    lambda: simple_update(cursor_p1, ds_1.key(i), value_e))
            self.pr("prepare 1")
            session_p1.prepare_transaction('prepare_timestamp=' +
                                           timestamp_str(69))

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction('isolation=snapshot')
            self.pr("updates 2")
            for i in range(1, nrows):
                retry_rollback(
                    self, 'update ds2',
                    lambda: simple_update(cursor_p2, ds_2.key(i), value_e))
            self.pr("prepare 2")
            session_p2.prepare_transaction('prepare_timestamp=' +
                                           timestamp_str(69))
        finally:
            done.set()
            ckpt.join()

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + timestamp_str(70) +
                                      ',durable_timestamp=' +
                                      timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + timestamp_str(70) +
                                      ',durable_timestamp=' +
                                      timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)
Exemple #40
0
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(
            self, uri_1, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.prepare_extraconfig)
        ds_1.populate()

        # Create another table.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(
            self, uri_2, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.prepare_extraconfig)
        ds_2.populate()

        if self.value_format == '8t':
            nrows *= 2
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
            value_e = 101
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100
            value_e = "eeeee" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, None, 20)
        self.check(value_c, uri_1, nrows, None, 30)
        self.check(value_b, uri_1, nrows, None, 40)
        self.check(value_a, uri_1, nrows, None, 50)

        self.check(value_d, uri_2, nrows, None, 20)
        self.check(value_c, uri_2, nrows, None, 30)
        self.check(value_b, uri_2, nrows, None, 40)
        self.check(value_a, uri_2, nrows, None, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Do an explicit checkpoint first, before starting the background checkpointer.
        # Otherwise (depending on timing and load) because there's a lot to write for the
        # first checkpoint there's a tendency for the background checkpointer to only
        # manage to do the one checkpoint; and sometimes (especially on FLCS) it ends up
        # not containing any of the concurrent updates, and then the test fails because
        # RTS correctly notices it has no work to do and doesn't visit any of the pages
        # or update anything in the history store.
        self.session.checkpoint()

        # Here's the update operations we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def prepare_range_updates(session, cursor, ds, value, nrows, prepare_config):
            self.pr("updates")
            for i in range(1, nrows):
                key = ds.key(i)
                cursor.set_key(key)
                cursor.set_value(value)
                self.assertEquals(cursor.update(), 0)
            self.pr("prepare")
            session.prepare_transaction(prepare_config)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for some time so that checkpoint starts.
            time.sleep(5)

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction()
            self.retry_rollback('update ds1', session_p1,
                           lambda: prepare_range_updates(
                               session_p1, cursor_p1, ds_1, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_1, nrows, value_a)

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction()
            self.retry_rollback('update ds2', session_p2,
                           lambda: prepare_range_updates(
                               session_p2, cursor_p2, ds_2, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_2, nrows, value_a)
        finally:
            done.set()
            ckpt.join()

        # Check that the history store file has been used and has non-zero size before the simulated
        # crash.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # The history store file size should be greater than zero after the restart.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, None, 50)
        self.check(value_a, uri_1, nrows, None, 80)
        self.check(value_b, uri_1, nrows, None, 40)
        self.check(value_c, uri_1, nrows, None, 30)
        self.check(value_d, uri_1, nrows, None, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, None, 50)
        self.check(value_a, uri_2, nrows, None, 80)
        self.check(value_b, uri_2, nrows, None, 40)
        self.check(value_c, uri_2, nrows, None, 30)
        self.check(value_d, uri_2, nrows, None, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        # Each row that gets processed by RTS can be counted by either hs_removed or hs_sweep,
        # but not both. If the data store page for the row appears in the last checkpoint, it
        # gets counted in hs_removed; if not, it gets counted in hs_sweep, unless the history
        # store page for the row didn't make it out, in which case nothing gets counted at all.
        # We expect at least some history store pages to appear, so assert that some rows get
        # processed, but the balance between the two counts depends on test timing and we
        # should not depend on it.
        self.assertGreater(hs_removed + hs_sweep, 0)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")