示例#1
0
    def test_tiered08(self):

        # FIXME-WT-7833
        #     This test can trigger races in file handle access during flush_tier.
        #     We will re-enable it when that is fixed.
        self.skipTest('Concurrent flush_tier and insert operations not supported yet.')

        cfg = self.conn_config()
        self.pr('Config is: ' + cfg)
        intl_page = 'internal_page_max=16K'
        base_create = 'key_format=S,value_format=S,' + intl_page
        self.session.create(self.uri, base_create)

        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        flush = flush_tier_thread(self.conn, done)

        # Start background threads and give them a chance to start.
        ckpt.start()
        flush.start()
        time.sleep(0.5)

        key_count = self.populate()

        done.set()
        flush.join()
        ckpt.join()

        self.verify(key_count)

        self.close_conn()
        self.pr('Reopening tiered table')
        self.reopen_conn()

        self.verify(key_count)
    def test_checkpoint_snapshot(self):

        ds = SimpleDataSet(self, self.uri, 0, key_format="S", value_format="S",config='log=(enabled=false)')
        ds.populate()
        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100
        valuec = "ccccc" * 100
        valued = "ddddd" * 100

        cursor = self.session.open_cursor(self.uri)
        self.large_updates(self.uri, valuea, ds, self.nrows)

        self.check(valuea, self.uri, self.nrows)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows, self.nrows*2):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valuea)
            self.assertEqual(cursor1.insert(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        #Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Open the new directory.
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)

        # Check the table contains the last checkpointed value.
        self.check(valuea, self.uri, self.nrows)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(pages_visited, 0)
示例#3
0
    def test_checkpoint02(self):
        done = threading.Event()
        self.session.create(
            self.uri,
            "key_format={},value_format={}".format(self.key_format,
                                                   self.value_format))

        if self.value_format == '8t':
            self.nops *= 2
            my_data = 97
        else:
            my_data = 'a' * self.dsize

        ckpt = checkpoint_thread(self.conn, done)
        work_queue = queue.Queue()
        opthreads = []
        try:
            ckpt.start()

            uris = list()
            uris.append(self.uri)
            for i in range(self.nops):
                if i % 191 == 0 and i != 0:
                    work_queue.put_nowait(('b', i + 1, my_data))
                work_queue.put_nowait(('i', i + 1, my_data))

            for i in range(self.nthreads):
                t = op_thread(self.conn, uris, self.key_format, work_queue,
                              done)
                opthreads.append(t)
                t.start()
        except:
            # Deplete the work queue if there's an error.
            while not work_queue.empty():
                work_queue.get()
                work_queue.task_done()
            raise
        finally:
            work_queue.join()
            done.set()
            for t in opthreads:
                t.join()
            ckpt.join()

        # Create a cursor - ensure all items have been put.
        cursor = self.session.open_cursor(self.uri, None, None)
        i = 1
        while True:
            nextret = cursor.next()
            if nextret != 0:
                break
            key = cursor.get_key()
            value = cursor.get_value()
            self.assertEqual(key, i)
            self.assertEqual(value, my_data)
            i += 1

        self.assertEqual(i, self.nops + 1)
    def test_checkpoint_snapshot(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        cursor = self.session.open_cursor(self.uri, None, "bulk")
        for i in range(1, self.nrows + 1):
            if self.value_format == '8t':
                cursor[i] = self.valuea
            else:
                cursor[i] = self.valuea + str(i)
        cursor.close()

        self.check(self.valuea, self.uri, self.nrows)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(1, self.nrows + 1):
            cursor1.set_key(ds.key(i))
            if self.value_format == '8t':
                cursor1.set_value(self.valueb)
            else:
                cursor1.set_value(self.valueb + str(i))
            cursor1.set_value(self.valueb)
            self.assertEqual(cursor1.update(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()
            self.evict(self.uri, ds, self.nrows)
        finally:
            done.set()
            ckpt.join()

        #Take a backup and restore it.
        self.take_full_backup(".", self.backup_dir)
        self.reopen_conn(self.backup_dir)

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertEqual(keys_removed, 0)
示例#5
0
    def test_checkpoint_snapshot_with_timestamp(self):

        ds = SimpleDataSet(self,
                           self.uri,
                           0,
                           key_format="S",
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()
        valuea = "aaaaa" * 100

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        self.large_updates(self.uri, valuea, ds, self.nrows, 20)
        self.check(valuea, self.uri, self.nrows, 20)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows + 1, (self.nrows * 2) + 1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valuea)
            self.assertEqual(cursor1.insert(), 0)
        session1.timestamp_transaction('commit_timestamp=' +
                                       self.timestamp_str(30))

        # Set stable timestamp to 40
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        #Simulate a crash by copying to a new directory(RESTART).
        simulate_crash_restart(self, ".", "RESTART")

        # Check the table contains the last checkpointed value.
        self.check(valuea, self.uri, self.nrows, 30)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)
示例#6
0
    def test_checkpoint_snapshot_with_timestamp(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        self.large_updates(self.uri, self.valuea, ds, self.nrows, 20)
        self.check(self.valuea, self.uri, self.nrows, 20, False)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows + 1, (self.nrows * 2) + 1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(self.valuea)
            self.assertEqual(cursor1.insert(), 0)
        session1.timestamp_transaction('commit_timestamp=' +
                                       self.timestamp_str(30))

        # Set stable timestamp to 25
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(25))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        self.perform_backup_or_crash_restart(".", self.backup_dir)

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 30, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)
示例#7
0
    def test_checkpoint_snapshot(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        self.large_updates(self.uri, self.valuea, ds, self.nrows, 0)
        self.check(self.valuea, self.uri, self.nrows, 0, False)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows + 1, (self.nrows * 2) + 1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(self.valuea)
            self.assertEqual(cursor1.insert(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        self.perform_backup_or_crash_restart(".", self.backup_dir)

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 0, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)
示例#8
0
    def test_checkpoint_dirty(self):
        # Create a lot of tables
        # insert the same item in each
        # Start a checkpoint with some of the updates
        # Create another checkpoint that should contain all data consistently
        # Read from the checkpoint and make sure the data is consistent
        for i in range(0, self.num_tables):
            self.printVerbose(3, 'Creating table ' + str(i))
            self.session.create(self.uri + str(i),
            'key_format=S,value_format=i')
            c = self.session.open_cursor(self.uri + str(i), None)
            c.set_key('a')
            c.set_value(0)
            c.insert()
            c.close()

        self.session.checkpoint()

        iterations = 1
        expected_val = 0
        for its in range(1, 10):
            self.printVerbose(3, 'Doing iteration ' + str(its))

            # Create a checkpoint thread
            done = threading.Event()
            ckpt = wtthread.checkpoint_thread(self.conn, done)
            ckpt.start()
            try:
                expected_val += 1
                for i in range(0, self.num_tables):
                    c = self.session.open_cursor(self.uri + str(i), None)
                    c.set_key('a')
                    c.set_value(expected_val)
                    c.insert()
                    c.close()
            finally:
                done.set()
                ckpt.join()

            # Execute another checkpoint, to make sure we have a consistent
            # view of the data.
            self.session.checkpoint()
            for i in range(0, self.num_tables):
                c = self.session.open_cursor(
                    self.uri + str(i), None, 'checkpoint=WiredTigerCheckpoint')
                c.next()
                self.assertEquals(c.get_value(), expected_val,
                    msg='Mismatch on iteration ' + str(its) +\
                                        ' for table ' + str(i))
                c.close()
示例#9
0
    def test_checkpoint_dirty(self):
        # Create a lot of tables
        # insert the same item in each
        # Start a checkpoint with some of the updates
        # Create another checkpoint that should contain all data consistently
        # Read from the checkpoint and make sure the data is consistent
        for i in range(0, self.num_tables):
            self.printVerbose(3, 'Creating table ' + str(i))
            self.session.create(self.uri + str(i),
                                'key_format=S,value_format=i')
            c = self.session.open_cursor(self.uri + str(i), None)
            c.set_key('a')
            c.set_value(0)
            c.insert()
            c.close()

        self.session.checkpoint()

        iterations = 1
        expected_val = 0
        for its in range(1, 10):
            self.printVerbose(3, 'Doing iteration ' + str(its))

            # Create a checkpoint thread
            done = threading.Event()
            ckpt = wtthread.checkpoint_thread(self.conn, done)
            ckpt.start()
            try:
                expected_val += 1
                for i in range(0, self.num_tables):
                    c = self.session.open_cursor(self.uri + str(i), None)
                    c.set_key('a')
                    c.set_value(expected_val)
                    c.insert()
                    c.close()
            finally:
                done.set()
                ckpt.join()

            # Execute another checkpoint, to make sure we have a consistent
            # view of the data.
            self.session.checkpoint()
            for i in range(0, self.num_tables):
                c = self.session.open_cursor(
                    self.uri + str(i), None, 'checkpoint=WiredTigerCheckpoint')
                c.next()
                self.assertEquals(c.get_value(), expected_val,
                    msg='Mismatch on iteration ' + str(its) +\
     ' for table ' + str(i))
                c.close()
    def test_checkpoint_snapshot(self):

        ds = SimpleDataSet(self,
                           self.uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()
        valuea = "aaaaa" * 100

        self.large_updates(self.uri, valuea, ds, self.nrows, 0)
        self.check(valuea, self.uri, self.nrows, 0)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows + 1, (self.nrows * 2) + 1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valuea)
            self.assertEqual(cursor1.insert(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        self.perform_backup_or_crash_restart(".", self.backup_dir)

        # Check the table contains the last checkpointed value.
        self.check(valuea, self.uri, self.nrows, 0)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)
示例#11
0
    def test_checkpoint02(self):
        done = threading.Event()
        self.session.create(self.uri,
            "key_format=" + self.fmt + ",value_format=S")
        ckpt = checkpoint_thread(self.conn, done)
        ckpt.start()

        uris = list()
        uris.append(self.uri)
        queue = Queue.Queue()
        my_data = 'a' * self.dsize
        for i in xrange(self.nops):
            if i % 191 == 0 and i != 0:
                queue.put_nowait(('b', i, my_data))
#            if i % 257 == 0 and i != 0:
#                queue.put_nowait(('t', i, my_data))
#            # Wait another 200 operations, then delete the above table. This
#            # not guarantee that the initial operations on the table will have
#            # been finished.
#            if (i - 100) % 257 == 0 and (i - 100) != 0:
#                queue.put_nowait(('d', i - 100, my_data))
            queue.put_nowait(('i', i, my_data))

        for i in xrange(self.nthreads):
            t = op_thread(self.conn, uris, self.fmt, queue, done)
            t.start()

        queue.join()
        done.set()
        # Wait for checkpoint thread to notice status change.
        while ckpt.is_alive():
            time.sleep(0.01)

        # Create a cursor - ensure all items have been put.
        cursor = self.session.open_cursor(self.uri, None, None)
        i = 0
        while True:
            nextret = cursor.next()
            if nextret != 0:
                break
            key = cursor.get_key()
            value = cursor.get_value()
            self.assertEqual(key, i)
            self.assertEqual(value, my_data)
            i += 1

        self.assertEqual(i, self.nops)
示例#12
0
    def test_checkpoint_snapshot(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        self.large_updates(self.uri, self.valuea, ds, self.nrows, 0)
        self.check(self.valuea, self.uri, self.nrows, 0, False)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows+1, (self.nrows*2)+1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(self.valuea)
            self.assertEqual(cursor1.insert(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        #Simulate a crash by copying to a new directory(RESTART).
        simulate_crash_restart(self, ".", "RESTART")

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 0, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)
示例#13
0
    def test_checkpoint02(self):
        done = threading.Event()
        self.session.create(self.uri,
            "key_format=" + self.fmt + ",value_format=S")
        ckpt = checkpoint_thread(self.conn, done)
        ckpt.start()

        uris = list()
        uris.append(self.uri)
        queue = Queue.Queue()
        my_data = 'a' * self.dsize
        for i in xrange(self.nops):
            if i % 191 == 0 and i != 0:
                queue.put_nowait(('b', i, my_data))
            queue.put_nowait(('i', i, my_data))

        opthreads = []
        for i in xrange(self.nthreads):
            t = op_thread(self.conn, uris, self.fmt, queue, done)
            opthreads.append(t)
            t.start()

        queue.join()
        done.set()
        for t in opthreads:
            t.join()
        ckpt.join()

        # Create a cursor - ensure all items have been put.
        cursor = self.session.open_cursor(self.uri, None, None)
        i = 0
        while True:
            nextret = cursor.next()
            if nextret != 0:
                break
            key = cursor.get_key()
            value = cursor.get_value()
            self.assertEqual(key, i)
            self.assertEqual(value, my_data)
            i += 1

        self.assertEqual(i, self.nops)
    def test_rollback_to_stable(self):
        nrows = 10

        # Create a table without logging.
        uri = "table:rollback_to_stable26"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100

        self.large_updates(uri, value_a, ds, nrows, False, 20)
        self.large_updates(uri, value_b, ds, nrows, False, 30)

        if self.hs_remove:
            self.large_removes(uri, ds, nrows, False, 40)

        prepare_session = self.conn.open_session()
        prepare_session.begin_transaction()
        cursor = prepare_session.open_cursor(uri)
        for i in range(1, nrows + 1):
            cursor[i] = value_c
            if self.prepare_remove:
                cursor.set_key(i)
                self.assertEqual(cursor.remove(), 0)
        cursor.close()
        prepare_session.prepare_transaction('prepare_timestamp=' +
                                            self.timestamp_str(50))

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)
        self.check(value_b, uri, nrows, 30)

        self.evict_cursor(uri, nrows)

        # Pin stable to timestamp 40.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(5)
            prepare_session.rollback_transaction()
        finally:
            done.set()
            ckpt.join()

        self.large_updates(uri, value_d, ds, nrows, False, 60)

        # Check that the correct data.
        self.check(value_a, uri, nrows, 20)
        self.check(value_b, uri, nrows, 30)
        self.check(value_d, uri, nrows, 60)

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[
            stat.conn.txn_rts_hs_restore_updates][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(hs_removed, nrows)

        # Check that the correct data.
        self.check(value_a, uri, nrows, 20)
        self.check(value_b, uri, nrows, 30)

        self.large_updates(uri, value_e, ds, nrows, False, 60)

        self.evict_cursor(uri, nrows)

        # Check that the correct data.
        self.check(value_a, uri, nrows, 20)
        self.check(value_b, uri, nrows, 30)
        self.check(value_e, uri, nrows, 60)
示例#15
0
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table without logging.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(
            self, uri_1, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(
            self, uri_2, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Here's the update operations we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def prepare_range_updates(session, cursor, ds, value, nrows, prepare_config):
            self.pr("updates")
            for i in range(1, nrows):
                key = ds.key(i)
                cursor.set_key(key)
                cursor.set_value(value)
                self.assertEquals(cursor.update(), 0)
            self.pr("prepare")
            session.prepare_transaction(prepare_config)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction('isolation=snapshot')
            self.retry_rollback('update ds1', session_p1,
                           lambda: prepare_range_updates(
                               session_p1, cursor_p1, ds_1, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_1, nrows, value_a)

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction('isolation=snapshot')
            self.retry_rollback('update ds2', session_p2,
                           lambda: prepare_range_updates(
                               session_p2, cursor_p2, ds_2, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_2, nrows, value_a)
        finally:
            done.set()
            ckpt.join()

        # Check that the history store file has been used and has non-zero size before the simulated
        # crash.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # The history store file size should be greater than zero after the restart.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table without logging.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100
        value_f = "fffff" * 100

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.session.breakpoint()
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.session.breakpoint()
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Here's the update operation we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def simple_update(cursor, key, value):
            cursor.set_key(key)
            cursor.set_value(value)
            self.assertEquals(cursor.update(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction('isolation=snapshot')
            self.pr("updates 1")
            for i in range(1, nrows):
                retry_rollback(
                    self, 'update ds1',
                    lambda: simple_update(cursor_p1, ds_1.key(i), value_e))
            self.pr("prepare 1")
            session_p1.prepare_transaction('prepare_timestamp=' +
                                           timestamp_str(69))

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction('isolation=snapshot')
            self.pr("updates 2")
            for i in range(1, nrows):
                retry_rollback(
                    self, 'update ds2',
                    lambda: simple_update(cursor_p2, ds_2.key(i), value_e))
            self.pr("prepare 2")
            session_p2.prepare_transaction('prepare_timestamp=' +
                                           timestamp_str(69))
        finally:
            done.set()
            ckpt.join()

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + timestamp_str(70) +
                                      ',durable_timestamp=' +
                                      timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + timestamp_str(70) +
                                      ',durable_timestamp=' +
                                      timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)
示例#17
0
    def test_rollback_to_stable(self):
        nrows = 10

        # Create two tables.
        uri_1 = "table:rollback_to_stable35_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format=self.key_format,
                             value_format=self.value_format)
        ds_1.populate()

        uri_2 = "table:rollback_to_stable35_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format=self.key_format,
                             value_format=self.value_format)
        ds_2.populate()

        if self.value_format == '8t':
            valuea = 97
            valueb = 98
            valuec = 99
        else:
            valuea = "aaaaa" * 100
            valueb = "bbbbb" * 100
            valuec = "ccccc" * 100

        self.large_updates(uri_1, uri_2, valuea, ds_1, ds_2, nrows)
        self.check(valuea, uri_1, uri_2, nrows)

        # Start a long running transaction and keep it open.
        session_2 = self.conn.open_session()
        session_2.begin_transaction()

        self.large_updates(uri_1, uri_2, valueb, ds_1, ds_2, nrows)
        self.check(valueb, uri_1, uri_2, nrows)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            self.large_updates(uri_1, uri_2, valuec, ds_1, ds_2, nrows)
            self.check(valuec, uri_1, uri_2, nrows)

            # Evict the data.
            self.evict_cursor(uri_1, nrows, valuec)

            # Wait for checkpoint stop timing stress to copy the database.
            ckpt_stop_timing_stress = 0
            while not ckpt_stop_timing_stress:
                time.sleep(1)
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_stop_timing_stress = stat_cursor[
                    stat.conn.txn_checkpoint_stop_stress_active][2]
                stat_cursor.close()

            copy_wiredtiger_home(self, '.', "RESTART")

        finally:
            done.set()
            ckpt.join()
        self.session.checkpoint()

        # Clear all running transactions before rollback to stable.
        session_2.commit_transaction()
        session_2.close()

        # Open the new directory
        self.close_conn()
        self.conn_config = 'cache_size=50MB,statistics=(all),log=(enabled)'
        conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(conn)

        self.check(valuec, uri_1, uri_2, nrows)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertEqual(pages_visited, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(hs_removed, 0)
示例#18
0
    def test_prepare_rollback(self):
        nrows = 10

        # Create a table.
        uri = "table:prepare21"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        self.large_updates(uri, value_a, ds, nrows, False, 20)
        self.large_updates(uri, value_b, ds, nrows, False, 30)
        self.large_removes(uri, ds, nrows, False, 40)

        prepare_session = self.conn.open_session()
        prepare_session.begin_transaction()
        cursor = prepare_session.open_cursor(uri)
        for i in range(1, nrows + 1):
            cursor[i] = value_c
        cursor.close()
        prepare_session.prepare_transaction('prepare_timestamp=' +
                                            self.timestamp_str(50))

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)

        self.evict_cursor(uri, nrows)

        # Pin stable to timestamp 40.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Rollback the prepared update
        prepare_session.rollback_transaction()
        self.large_updates(uri, value_d, ds, nrows, False, 60)

        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing last transaction.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()

            self.evict_cursor(uri, nrows)
        finally:
            done.set()
            ckpt.join()

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)
        self.check(value_d, uri, nrows, None, 60)
示例#19
0
    def test_checkpoint(self):
        uri = 'table:checkpoint14'
        nrows = 10000

        # Create a table.
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            nrows *= 5
            value_a = 97
            value_b = 98
            value_c = 99
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100

        # Write some baseline data.
        self.large_updates(uri, ds, nrows, value_a)
        # Write this data out now so we aren't waiting for it while trying to
        # race with the later data.
        self.session.checkpoint()

        # Write some more data, and hold the transaction open.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        for i in range(1, nrows + 1):
            cursor2[ds.key(i)] = value_b

        # Checkpoint in the background.
        done = threading.Event()
        if self.first_checkpoint is None:
            ckpt = checkpoint_thread(self.conn, done)
        else:
            ckpt = named_checkpoint_thread(self.conn, done, self.first_checkpoint)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None, None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session2.commit_transaction()
        finally:
            done.set()
            ckpt.join()

        # Rinse and repeat.
        session2.begin_transaction()
        for i in range(1, nrows + 1):
            cursor2[ds.key(i)] = value_c

        # Checkpoint in the background.
        done = threading.Event()
        if self.second_checkpoint is None:
            ckpt = checkpoint_thread(self.conn, done)
        else:
            ckpt = named_checkpoint_thread(self.conn, done, self.second_checkpoint)
        try:
            ckpt.start()
            
            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None, None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session2.commit_transaction()
        finally:
            done.set()
            ckpt.join()

        # Other tests check for whether the visibility of a partially-written transaction
        # is handled correctly. Here we're interested in whether the visibility mechanism
        # is using the right snapshot for the checkpoint we're reading. So insist that we
        # not see the value_b transaction in the first checkpoint, or the value_c transaction
        # in the second checkpoint. If test machine lag causes either transaction to commit
        # before the checkpoint starts, we'll see value_b in the first checkpoint and/or
        # value_c in the second. But also, if we end up using the second checkpoint's snapshot
        # for the first checkpoint, we'll see value_b. So if this happens more than once in a
        # blue moon we should probably strengthen the test so we can more reliably distinguish
        # the cases, probably by doing a third transaction/checkpoint pair.
        #
        # If we end up using the first checkpoint's snapshot for reading the second checkpoint,
        # we'll most likely see no data at all; that would be a serious failure if it happened.

        # Read the checkpoints.
        self.check(ds, self.first_checkpoint, nrows, value_a)
        self.check(ds, self.second_checkpoint, nrows, value_b)

        # If we haven't died yet, pretend to crash, and run RTS to see if the
        # (second) checkpoint was inconsistent. Unfortunately we can't readily
        # check on both.
        simulate_crash_restart(self, ".", "RESTART")
示例#20
0
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(
            self, uri_1, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.prepare_extraconfig)
        ds_1.populate()

        # Create another table.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(
            self, uri_2, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.prepare_extraconfig)
        ds_2.populate()

        if self.value_format == '8t':
            nrows *= 2
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
            value_e = 101
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100
            value_e = "eeeee" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, None, 20)
        self.check(value_c, uri_1, nrows, None, 30)
        self.check(value_b, uri_1, nrows, None, 40)
        self.check(value_a, uri_1, nrows, None, 50)

        self.check(value_d, uri_2, nrows, None, 20)
        self.check(value_c, uri_2, nrows, None, 30)
        self.check(value_b, uri_2, nrows, None, 40)
        self.check(value_a, uri_2, nrows, None, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Do an explicit checkpoint first, before starting the background checkpointer.
        # Otherwise (depending on timing and load) because there's a lot to write for the
        # first checkpoint there's a tendency for the background checkpointer to only
        # manage to do the one checkpoint; and sometimes (especially on FLCS) it ends up
        # not containing any of the concurrent updates, and then the test fails because
        # RTS correctly notices it has no work to do and doesn't visit any of the pages
        # or update anything in the history store.
        self.session.checkpoint()

        # Here's the update operations we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def prepare_range_updates(session, cursor, ds, value, nrows, prepare_config):
            self.pr("updates")
            for i in range(1, nrows):
                key = ds.key(i)
                cursor.set_key(key)
                cursor.set_value(value)
                self.assertEquals(cursor.update(), 0)
            self.pr("prepare")
            session.prepare_transaction(prepare_config)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for some time so that checkpoint starts.
            time.sleep(5)

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction()
            self.retry_rollback('update ds1', session_p1,
                           lambda: prepare_range_updates(
                               session_p1, cursor_p1, ds_1, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_1, nrows, value_a)

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction()
            self.retry_rollback('update ds2', session_p2,
                           lambda: prepare_range_updates(
                               session_p2, cursor_p2, ds_2, value_e, nrows,
                               'prepare_timestamp=' + self.timestamp_str(69)))
            self.evict_cursor(uri_2, nrows, value_a)
        finally:
            done.set()
            ckpt.join()

        # Check that the history store file has been used and has non-zero size before the simulated
        # crash.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + self.timestamp_str(70) + ',durable_timestamp=' + self.timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # The history store file size should be greater than zero after the restart.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, None, 50)
        self.check(value_a, uri_1, nrows, None, 80)
        self.check(value_b, uri_1, nrows, None, 40)
        self.check(value_c, uri_1, nrows, None, 30)
        self.check(value_d, uri_1, nrows, None, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, None, 50)
        self.check(value_a, uri_2, nrows, None, 80)
        self.check(value_b, uri_2, nrows, None, 40)
        self.check(value_c, uri_2, nrows, None, 30)
        self.check(value_d, uri_2, nrows, None, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        # Each row that gets processed by RTS can be counted by either hs_removed or hs_sweep,
        # but not both. If the data store page for the row appears in the last checkpoint, it
        # gets counted in hs_removed; if not, it gets counted in hs_sweep, unless the history
        # store page for the row didn't make it out, in which case nothing gets counted at all.
        # We expect at least some history store pages to appear, so assert that some rows get
        # processed, but the balance between the two counts depends on test timing and we
        # should not depend on it.
        self.assertGreater(hs_removed + hs_sweep, 0)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
示例#21
0
    def test_rollback_to_stable(self):
        nrows = 100

        # Create a table.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format=self.value_format)
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)
        value_modW = mod_val(value_modT, 'W', 4)
        value_modX = mod_val(value_modW, 'X', 5)
        value_modY = mod_val(value_modX, 'Y', 6)
        value_modZ = mod_val(value_modY, 'Z', 7)

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, self.prepare, 30)
        self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 40)
        self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 50)
        self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modR, uri, nrows, None, 40)
        self.check(value_modS, uri, nrows, None, 50)
        self.check(value_modT, uri, nrows, None, 60)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            self.retry_rollback('modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, 4, 1, nrows, self.prepare, 70))
            self.evict_cursor(uri, nrows, value_modW)
            self.retry_rollback('modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, 5, 1, nrows, self.prepare, 80))
            self.evict_cursor(uri, nrows, value_modX)
            self.retry_rollback('modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, 6, 1, nrows, self.prepare, 90))
            self.evict_cursor(uri, nrows, value_modY)
            self.retry_rollback('modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, 7, 1, nrows, self.prepare, 100))
            self.evict_cursor(uri, nrows, value_modZ)
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modR, uri, nrows, None, 40)
        self.check(value_modS, uri, nrows, None, 50)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
示例#22
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(
            self, uri_1, 0, key_format=self.key_format, value_format=self.value_format)
        ds_1.populate()

        # Create another table.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(
            self, uri_2, 0, key_format=self.key_format, value_format=self.value_format)
        ds_2.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
            value_e = 101
            value_f = 102
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100
            value_e = "eeeee" * 100
            value_f = "fffff" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, None, 20)
        self.check(value_c, uri_1, nrows, None, 30)
        self.check(value_b, uri_1, nrows, None, 40)
        self.check(value_a, uri_1, nrows, None, 50)

        self.check(value_d, uri_2, nrows, None, 20)
        self.check(value_c, uri_2, nrows, None, 30)
        self.check(value_b, uri_2, nrows, None, 40)
        self.check(value_a, uri_2, nrows, None, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several updates in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("updates")
            self.retry_rollback('update ds1, e', None,
                           lambda: self.large_updates(uri_1, value_e, ds_1, nrows, self.prepare, 70))
            self.retry_rollback('update ds2, e', None,
                           lambda: self.large_updates(uri_2, value_e, ds_2, nrows, self.prepare, 70))
            self.evict_cursor(uri_1, nrows, value_e)
            self.evict_cursor(uri_2, nrows, value_e)
            self.retry_rollback('update ds1, f', None,
                           lambda: self.large_updates(uri_1, value_f, ds_1, nrows, self.prepare, 80))
            self.retry_rollback('update ds2, f', None,
                           lambda: self.large_updates(uri_2, value_f, ds_2, nrows, self.prepare, 80))
            self.evict_cursor(uri_1, nrows, value_f)
            self.evict_cursor(uri_2, nrows, value_f)
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, None, 50)
        self.check(value_a, uri_1, nrows, None, 80)
        self.check(value_b, uri_1, nrows, None, 40)
        self.check(value_c, uri_1, nrows, None, 30)
        self.check(value_d, uri_1, nrows, None, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_c, uri_2, nrows, None, 30)
        self.check(value_a, uri_2, nrows, None, 50)
        self.check(value_a, uri_2, nrows, None, 80)
        self.check(value_b, uri_2, nrows, None, 40)
        self.check(value_d, uri_2, nrows, None, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)
示例#23
0
    def test_checkpoint(self):
        uri = 'table:checkpoint10'
        nrows = 10000
        overlap = 5000 if self.do_overlap else 0
        morerows = 10000

        # Create a table.
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            morerows *= 5
            value_a = 97
            value_b = 98
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100

        # Write some data.
        self.large_updates(uri, ds, nrows, value_a)
        # Write this data out now so we aren't waiting for it while trying to
        # race with the later data.
        self.session.checkpoint()

        # Write some more data, and hold the transaction open.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        for i in range(nrows - overlap + 1, nrows + morerows + 1):
            cursor2[ds.key(i)] = value_b

        # Checkpoint in the background.
        done = threading.Event()
        if self.second_checkpoint is None:
            ckpt = checkpoint_thread(self.conn, done)
        else:
            ckpt = named_checkpoint_thread(self.conn, done, self.second_checkpoint)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None, None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session2.commit_transaction()
        finally:
            done.set()
            ckpt.join()

        # Reopen if desired to cycle the write generations.
        if self.do_reopen:
            self.reopen_conn()

        # There are two states we should be able to produce: one with the original
        # data and one with the additional data.
        #
        # It is ok to see either in the checkpoint (since the checkpoint could
        # reasonably include or not include the second txn) but not ok to see
        # an intermediate state.
        expected_a = { value_a: nrows }
        expected_b = { value_a: nrows - overlap, value_b: overlap + morerows }
        expected = [expected_a, expected_b]

        # For FLCS, because the table expands under uncommitted data, we should
        # see zeros once the additional data's been written (that is, always strictly
        # before the checkpoint) if we don't see the actual values.
        expected_flcs_a = { value_a: nrows, 0: morerows }
        expected_flcs = [expected_flcs_a, expected_b]

        # Now read the checkpoint.
        self.check(ds, self.second_checkpoint, expected_flcs if self.value_format == '8t' else expected)

        # If we haven't died yet, pretend to crash and run RTS to see if the
        # checkpoint was inconsistent.
        # (This only works if we didn't reopen the connection, so don't bother if we did.)
        if not self.do_reopen:
            simulate_crash_restart(self, ".", "RESTART")

            # Make sure we did get an inconsistent checkpoint.
            stat_cursor = self.session.open_cursor('statistics:', None, None)
            inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
            stat_cursor.close()
            self.assertGreater(inconsistent_ckpt, 0)
示例#24
0
    def test_checkpoint(self):
        uri = 'table:checkpoint11'
        nrows = 10000
        overlap = 5000 if self.do_overlap else 0
        morerows = 10000

        # Create a table.
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            morerows *= 5
            value_a = 97
            value_b = 98
            value_c = 99
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100

        # Pin oldest and stable timestamps to 5.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) +
            ',stable_timestamp=' + self.timestamp_str(5))

        # Write some data at time 10.
        self.large_updates(uri, ds, nrows, value_a, 10)

        # Write some more data at time 20.
        self.large_updates(uri, ds, nrows, value_b, 20)

        # Write this data out now so we aren't waiting for it while trying to
        # race with the later data.
        self.session.checkpoint()

        # Write some further data, and hold the transaction open. Eventually commit at time 30.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        #for i in range(1, nrows + 1, 10):
        for i in range(nrows - overlap + 1, nrows + morerows + 1):
            cursor2[ds.key(i)] = value_c

        # Optionally move stable forward.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(self.stable_ts))

        # Checkpoint in the background.
        done = threading.Event()
        if self.second_checkpoint is None:
            ckpt = checkpoint_thread(self.conn, done)
        else:
            ckpt = named_checkpoint_thread(self.conn, done, self.second_checkpoint)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None, None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session2.commit_transaction('commit_timestamp=' + self.timestamp_str(30))
        finally:
            done.set()
            ckpt.join()

        # Reopen if desired to cycle the write generations.
        if self.do_reopen:
            self.reopen_conn()

        # There are two states we should be able to produce. In all cases we should
        # see all the original data (value_a at time 10, value_b at time 20). If
        # the value_c transaction appears in the checkpoint we should see all the
        # additional data as well (value_c at time 30); otherwise reading past time 20
        # should yield value_b.
        #
        # It is ok to see either state in the checkpoint (since the checkpoint could
        # reasonably include or not include the second txn) but not ok to see
        # an intermediate state, and in particular we must not see _part_ of the value_c
        # data.
        expected_5 = {}
        expected_15 = { value_a: nrows }
        expected_25 = { value_b: nrows }
        expected_35_a = { value_b: nrows }
        expected_35_b = { value_b: nrows - overlap, value_c: overlap + morerows }
        expected = {
            5: [expected_5],
            15: [expected_15],
            25: [expected_25],
            35: [expected_35_a, expected_35_b]
        }
        # When reading without an explicit timestamp, we should see the state as of
        # the stable timestamp when the checkpoint was taken.
        expected[None] = expected[self.stable_ts]

        # For FLCS, because the table expands under uncommitted data, we should
        # see zeros once the additional data's been written (that is, always strictly
        # before the checkpoint) if we don't see the actual values.
        expected_5_flcs = { 0: nrows + morerows }
        expected_15_flcs = { value_a: nrows, 0: morerows }
        expected_25_flcs = { value_b: nrows, 0: morerows }
        expected_35_flcs_a = { value_b: nrows, 0: morerows }
        expected_35_flcs_b = { value_b: nrows - overlap, value_c: overlap + morerows }
        expected_flcs = {
            5: [expected_5_flcs],
            15: [expected_15_flcs],
            25: [expected_25_flcs],
            35: [expected_35_flcs_a, expected_35_flcs_b]
        }
        expected_flcs[None] = expected_flcs[self.stable_ts]

        if self.do_advance:
            # Move oldest up in case that interferes with handling the checkpoint.
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))
            self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(50))

        # Now read the checkpoint.
        self.check(ds, self.second_checkpoint, expected_flcs if self.value_format == '8t' else expected)
示例#25
0
    def test_rollback_to_stable_same_ts_append(self):
        nrows = 100

        # Create a table.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = append_val(value_a, 'Q')
        value_modR = append_val(value_modQ, 'R')
        value_modS = append_val(value_modR, 'S')
        value_modT = append_val(value_modS, 'T')
        value_modW = append_val(value_modT, 'W')
        value_modX = append_val(value_modW, 'X')
        value_modY = append_val(value_modX, 'Y')
        value_modZ = append_val(value_modY, 'Z')

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, len(value_a), 1, nrows, self.prepare,
                            30)
        # prepare cannot use same timestamp always, so use a different timestamps that are aborted.
        if self.prepare:
            self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows,
                                self.prepare, 51)
            self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows,
                                self.prepare, 55)
            self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows,
                                self.prepare, 60)
        else:
            self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows,
                                self.prepare, 60)
            self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows,
                                self.prepare, 60)
            self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows,
                                self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 21 if self.prepare else 20)
        self.check(value_modQ, uri, nrows, None, 31 if self.prepare else 30)
        self.check(value_modT, uri, nrows, None, 61 if self.prepare else 60)

        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            self.retry_rollback(
                'modify ds1, W', None, lambda: self.large_modifies(
                    uri, 'W', ds, len(value_modT), 1, nrows, self.prepare, 70))
            self.retry_rollback(
                'modify ds1, X', None, lambda: self.large_modifies(
                    uri, 'X', ds, len(value_modW), 1, nrows, self.prepare, 80))
            self.evict_cursor(uri, nrows, value_modX)
            self.retry_rollback(
                'modify ds1, Y', None, lambda: self.large_modifies(
                    uri, 'Y', ds, len(value_modX), 1, nrows, self.prepare, 90))
            self.retry_rollback(
                'modify ds1, Z', None,
                lambda: self.large_modifies(uri, 'Z', ds, len(value_modY), 1,
                                            nrows, self.prepare, 100))
            self.evict_cursor(uri, nrows, value_modZ)
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[
            stat.conn.txn_rts_hs_restore_updates][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows * 3)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
示例#26
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100
        value_f = "fffff" * 100

        # Perform several updates.
        self.large_updates(uri_1, value_d, ds_1, nrows, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        ckpt.start()

        # Perform several updates in parallel with checkpoint.
        self.large_updates(uri_1, value_e, ds_1, nrows, 70)
        self.large_updates(uri_2, value_e, ds_2, nrows, 70)
        self.large_updates(uri_1, value_f, ds_1, nrows, 80)
        self.large_updates(uri_2, value_f, ds_2, nrows, 80)

        done.set()
        ckpt.join()

        # Simulate a server crash and restart.
        self.simulate_crash_restart(".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)
示例#27
0
    def test_rollback_to_stable(self):
        nrows = 10

        # Create a table.
        uri = "table:rollback_to_stable26"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
            value_e = 101
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100
            value_e = "eeeee" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_updates(uri, value_b, ds, nrows, self.prepare, 30)

        if self.hs_remove:
            self.large_removes(uri, ds, nrows, self.prepare, 40)

        prepare_session = self.conn.open_session()
        prepare_session.begin_transaction()
        cursor = prepare_session.open_cursor(uri)
        for i in range(1, nrows + 1):
            cursor[i] = value_c
            if self.prepare_remove:
                cursor.set_key(i)
                self.assertEqual(cursor.remove(), 0)
        cursor.close()
        prepare_session.prepare_transaction('prepare_timestamp=' +
                                            self.timestamp_str(50))

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 21 if self.prepare else 20)
        self.check(value_b, uri, nrows, None, 31 if self.prepare else 30)

        self.evict_cursor(uri, nrows)

        # Pin stable to timestamp 40.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing last transaction.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            prepare_session.rollback_transaction()
        finally:
            done.set()
            ckpt.join()

        self.large_updates(uri, value_d, ds, nrows, self.prepare, 60)

        # Check that the correct data.
        self.check(value_a, uri, nrows, None, 21 if self.prepare else 20)
        self.check(value_b, uri, nrows, None, 31 if self.prepare else 30)
        self.check(value_d, uri, nrows, None, 61 if self.prepare else 60)

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[
            stat.conn.txn_rts_hs_restore_updates][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(hs_removed, nrows)

        # Check that the correct data.
        self.check(value_a, uri, nrows, None, 21 if self.prepare else 20)
        self.check(value_b, uri, nrows, None, 31 if self.prepare else 30)

        self.large_updates(uri, value_e, ds, nrows, self.prepare, 70)

        self.evict_cursor(uri, nrows)

        # Check that the correct data.
        self.check(value_a, uri, nrows, None, 21 if self.prepare else 20)
        self.check(value_b, uri, nrows, None, 31 if self.prepare else 30)
        self.check(value_e, uri, nrows, None, 71 if self.prepare else 70)
示例#28
0
    def test_rollback_to_stable_same_ts(self):
        nrows = 1500

        # Create a table without logging.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(
            self, uri, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
            ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, 30)
        # prepare cannot use same timestamp always, so use a different timestamps that are aborted.
        if self.prepare:
            self.large_modifies(uri, 'R', ds, 1, 1, nrows, 51)
            self.large_modifies(uri, 'S', ds, 2, 1, nrows, 55)
            self.large_modifies(uri, 'T', ds, 3, 1, nrows, 60)
        else:
            self.large_modifies(uri, 'R', ds, 1, 1, nrows, 60)
            self.large_modifies(uri, 'S', ds, 2, 1, nrows, 60)
            self.large_modifies(uri, 'T', ds, 3, 1, nrows, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)
        self.check(value_modQ, uri, nrows, 30)
        self.check(value_modT, uri, nrows, 60)

        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            retry_rollback(self, 'modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, 4, 1, nrows, 70))
            retry_rollback(self, 'modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, 5, 1, nrows, 80))
            retry_rollback(self, 'modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, 6, 1, nrows, 90))
            retry_rollback(self, 'modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, 7, 1, nrows, 100))
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        self.simulate_crash_restart(".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows * 3)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, 20)
        self.check(value_modQ, uri, nrows, 30)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
示例#29
0
    def test_checkpoint(self):
        uri = 'table:checkpoint18'
        nrows = 10000

        # Create a table.
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            nrows *= 5
            value_a = 97
            value_b = 98
            value_c = 99
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100

        # Write some baseline data and checkpoint it.
        self.large_updates(ds, nrows, value_a)
        self.session.checkpoint()

        # Write more data. Touch odd keys only. Hold the transaction open.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        for i in range(1, nrows + 1, 2):
            cursor2[ds.key(i)] = value_b

        # Commit the transaction with a background checkpoint so we get part of it
        # in the checkpoint.
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session2.commit_transaction()
        finally:
            done.set()
            ckpt.join()

        # Now write the other keys.
        session2.begin_transaction()
        for i in range(2, nrows + 2, 2):
            cursor2[ds.key(i)] = value_c
        session2.commit_transaction()

        # Open the checkpoint now.
        ckpt = self.session.open_cursor(uri, None,
                                        'checkpoint=WiredTigerCheckpoint')

        # Take another checkpoint. This will write out the rest of the partial
        # transaction and remove its history store footprint, so reading from the
        # first checkpoint will fail if the cursor hasn't secured itself a reference
        # to its matching history store checkpoint.
        self.session.checkpoint()

        # Make sure we can read the table from the second checkpoint.
        # We shouldn't see either value_b or value_c.
        self.check(ckpt, nrows, value_a)
        ckpt.close()