Example #1
0
    def test_truncate_cursor_order(self):
        uri = self.type + self.name

        # A simple, one-file file or table object.
        ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt)
        ds.populate()
        c1 = self.session.open_cursor(uri, None)
        c1.set_key(ds.key(1000))
        c2 = self.session.open_cursor(uri, None)
        c2.set_key(ds.key(2000))
        self.session.truncate(None, c1, c2, None)
        self.assertEqual(c1.close(), 0)
        self.assertEqual(c2.close(), 0)
        self.session.drop(uri)

        if self.type == "table:":
            ds = ComplexDataSet(self, uri, 100, key_format=self.keyfmt)
            ds.populate()
            c1 = self.session.open_cursor(uri, None)
            c1.set_key(ds.key(1000))
            c2 = self.session.open_cursor(uri, None)
            c2.set_key(ds.key(2000))
            self.session.truncate(None, c1, c2, None)
            self.assertEqual(c1.close(), 0)
            self.assertEqual(c2.close(), 0)
            self.session.drop(uri)
Example #2
0
    def test_prepare_lookaside(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        # Create a small table.
        uri = "table:test_prepare_lookaside01"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(bigvalue)
            self.assertEquals(cursor.insert(), 0)
        cursor.close()
        self.session.checkpoint()

        # Check if lookaside is working properly with prepare transactions.
        # We put prepared updates in multiple sessions so that we do not hang
        # because of cache being full with uncommitted updates.
        nsessions = 3
        nkeys = 4000
        self.prepare_updates(uri, ds, nrows, nsessions, nkeys)
Example #3
0
    def test_drop(self):
        uri = 'lsm:' + self.name
        ds = SimpleDataSet(self, uri, 100000)
        ds.populate()
        self.reopen_conn()

        self.session.drop(uri, None)
Example #4
0
    def test_insert_over_delete_replace(self):
        msg = '/WT_CACHE_FULL.*/'
        ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt,
            value_format=self.valuefmt, config=self.table_config)
        self.assertRaisesHavingMessage(wiredtiger.WiredTigerError,
            ds.populate, msg)

        cursor = self.session.open_cursor(self.uri, None)
        cursor.prev()
        last_key = int(cursor.get_key())

        # Now that the database contains as much data as will fit into
        # the configured cache, verify removes succeed.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(1, last_key / 4, 1):
            cursor.set_key(ds.key(i))
            cursor.remove()

        cursor.reset()
        # Spin inserting to give eviction a chance to reclaim space
        inserted = False
        for i in range(1, 1000):
            try:
                cursor[ds.key(1)] = ds.value(1)
            except wiredtiger.WiredTigerError:
                cursor.reset()
                sleep(1)
                continue
            inserted = True
            break
        self.assertTrue(inserted)
Example #5
0
    def test_modify_smoke_recover(self):
        # Close the original database.
        self.conn.close()

        # Open a new database with logging configured.
        self.conn_config = \
            'log=(enabled=true),transaction_sync=(method=dsync,enabled)'
        self.conn = self.setUpConnectionOpen(".")
        self.session = self.setUpSessionOpen(self.conn)

        # Populate a database, and checkpoint it so it exists after recovery.
        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()
        self.session.checkpoint()
        self.modify_load(ds, False)

        # Crash and recover in a new directory.
        newdir = 'RESTART'
        copy_wiredtiger_home('.', newdir)
        self.conn.close()
        self.conn = self.setUpConnectionOpen(newdir)
        self.session = self.setUpSessionOpen(self.conn)
        self.session.verify(self.uri)

        self.modify_confirm(ds, False)
Example #6
0
 def test_basic(self):
     ds = SimpleDataSet(self, self.uri, self.nentries,
         config=self.config, key_format=self.keyfmt)
     ds.populate()
     self.reopen_conn()
     c = self.session.open_cursor(self.uri, None)
     self.forward(c, ds, self.nentries, [])
     self.backward(c, ds, self.nentries, [])
Example #7
0
    def test_modify_smoke_single(self):
        if self.skip():
            return

        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format='u')
        ds.populate()
        self.modify_load(ds, True)
Example #8
0
    def test_modify_smoke_reopen(self):
        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()
        self.modify_load(ds, False)

        # Flush to disk, forcing reconciliation.
        self.reopen_conn()

        self.modify_confirm(ds, False)
Example #9
0
    def test_checkpoint_target(self):
        # Create 3 objects, change one record to an easily recognizable string.
        uri = self.uri + '1'
        ds1 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds1.populate()
        self.update(uri, ds1, 'ORIGINAL')

        uri = self.uri + '2'
        ds2 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds2.populate()
        self.update(uri, ds2, 'ORIGINAL')

        uri = self.uri + '3'
        ds3 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds3.populate()
        self.update(uri, ds3, 'ORIGINAL')

        # Checkpoint all three objects.
        self.session.checkpoint("name=checkpoint-1")

        # Update all 3 objects, then checkpoint two of the objects with the
        # same checkpoint name.
        self.update(self.uri + '1', ds1, 'UPDATE')
        self.update(self.uri + '2', ds2, 'UPDATE')
        self.update(self.uri + '3', ds3, 'UPDATE')
        target = 'target=("' + self.uri + '1"' + ',"' + self.uri + '2")'
        self.session.checkpoint("name=checkpoint-1," + target)

        # Confirm the checkpoint has the old value in objects that weren't
        # checkpointed, and the new value in objects that were checkpointed.
        self.check(self.uri + '1', ds1, 'UPDATE')
        self.check(self.uri + '2', ds2, 'UPDATE')
        self.check(self.uri + '3', ds3, 'ORIGINAL')
Example #10
0
 def test_checkpoint_stats(self):
     ds = SimpleDataSet(self, self.uri, self.nentries,
         config=self.config, key_format=self.keyfmt)
     for name in ('first', 'second', 'third'):
         ds.populate()
         self.session.checkpoint('name=' + name)
         cursor = self.session.open_cursor(
             'statistics:' + self.uri, None, 'checkpoint=' + name)
         self.assertEqual(
             cursor[stat.dsrc.btree_entries][2], self.nentries)
         cursor.close()
Example #11
0
    def test_insert_over_delete(self):
        msg = '/WT_CACHE_FULL.*/'
        ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt,
            value_format=self.valuefmt, config=self.table_config)
        self.assertRaisesHavingMessage(wiredtiger.WiredTigerError,
            ds.populate, msg)

        # Now that the database contains as much data as will fit into
        # the configured cache, verify removes succeed.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(1, 100):
            cursor.set_key(ds.key(i))
            cursor.remove()
Example #12
0
    def test_truncate_cursor_order(self):
        uri = self.type + self.name
        ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt)
        ds.populate()
        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)

        c1.set_key(ds.key(20))
        c2.set_key(ds.key(10))
        msg = "/the start cursor position is after the stop cursor position/"
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg)
        c2.set_key(ds.key(20))
        self.session.truncate(None, c1, c2, None)
Example #13
0
    def test_search_empty(self):
        # Create the object and open a cursor.
        ds = SimpleDataSet(self, self.uri, 0, key_format=self.key_format,
                           value_format=self.value_format)
        ds.create()
        cursor = self.session.open_cursor(self.uri, None)

        # Search for a record past the end of the table, which should fail.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for a record past the end of the table, which should fail.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search_near(), wiredtiger.WT_NOTFOUND)
Example #14
0
    def test_truncate_cursor_notset(self):
        uri = self.type + self.name
        msg = "/requires key be set/"

        ds = SimpleDataSet(self, uri, 100)
        ds.populate()

        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)
        c2.set_key(ds.key(10))
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c2, c1, None), msg)
        c1.close()
        c2.close()
Example #15
0
    def test_las(self):
        # Create a small table.
        uri = "table:test_las"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S")
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(bigvalue)
            self.assertEquals(cursor.insert(), 0)
        cursor.close()
        self.session.checkpoint()

        # Scenario: 1
        # Check to see LAS working with old snapshot
        bigvalue1 = "bbbbb" * 100
        self.session.snapshot("name=xxx")
        # Update the values in different session after snapshot
        self.large_updates(self.session, uri, bigvalue1, ds, nrows)
        # Check to see the value after recovery
        self.durable_check(bigvalue1, uri, ds, nrows)
        self.session.snapshot("drop=(all)")

        # Scenario: 2
        # Check to see LAS working with old reader
        bigvalue2 = "ccccc" * 100
        session2 = self.conn.open_session()
        session2.begin_transaction('isolation=snapshot')
        self.large_updates(self.session, uri, bigvalue2, ds, nrows)
        # Check to see the value after recovery
        self.durable_check(bigvalue2, uri, ds, nrows)
        session2.rollback_transaction()
        session2.close()

        # Scenario: 3
        # Check to see LAS working with old timestamp
        bigvalue3 = "ddddd" * 100
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))
        self.large_updates(self.session, uri, bigvalue3, ds, nrows, timestamp=True)
        # Check to see data can be see only till the stable_timestamp
        self.durable_check(bigvalue2, uri, ds, nrows)

        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(i + 1))
        # Check to see latest data can be seen
        self.durable_check(bigvalue3, uri, ds, nrows)
Example #16
0
    def test_reconfig_fail(self):
        uri = 'table:reconfig_fail'
        ds = SimpleDataSet(self, uri, 100, key_format='S')
        ds.populate()

        self.session.begin_transaction("isolation=snapshot")
        c = self.session.open_cursor(uri, None)
        c.set_key(ds.key(20))
        c.set_value("abcde")
        self.assertEquals(c.update(), 0)

        compat_str = 'compatibility=(release="3.0.0")'
        msg = '/system must be quiescent/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
            lambda:self.conn.reconfigure(compat_str), msg)
Example #17
0
    def test_modify_delete(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format='u')
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        self.assertEquals(c.remove(), 0)

        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)

        c.set_key(ds.key(10))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
Example #18
0
 def test_checkpoint_cursor_update(self):
     ds = SimpleDataSet(self, self.uri, 100, key_format=self.fmt)
     ds.populate()
     self.session.checkpoint("name=ckpt")
     cursor = self.session.open_cursor(self.uri, None, "checkpoint=ckpt")
     cursor.set_key(ds.key(10))
     cursor.set_value("XXX")
     msg = "/Unsupported cursor/"
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: cursor.insert(), msg)
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: cursor.remove(), msg)
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: cursor.update(), msg)
     cursor.close()
Example #19
0
    def test_insert_over_capacity(self):
        msg = '/WT_CACHE_FULL.*/'
        ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt,
            value_format=self.valuefmt, config=self.table_config)
        self.assertRaisesHavingMessage(wiredtiger.WiredTigerError,
            ds.populate, msg)

        # Figure out the last key we successfully inserted, and check all
        # previous inserts are still there.
        cursor = self.session.open_cursor(self.uri, None)
        cursor.prev()
        last_key = int(cursor.get_key())
        ds = SimpleDataSet(self, self.uri, last_key, key_format=self.keyfmt,
            value_format=self.valuefmt, config=self.table_config)
        ds.check()
Example #20
0
    def test_insert_over_allowed(self):

        # Create a new table that is allowed to exceed the cache size, do this
        # before filling the cache so that the create succeeds
        self.session.create(self.uri + "_over", "ignore_in_memory_cache_size=true")

        # Populate a table with enough data to fill the cache.
        msg = "/WT_CACHE_FULL.*/"
        ds = SimpleDataSet(self, self.uri, 10000000, config=self.table_config)
        self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, lambda: ds.populate(), msg)

        # Add some content to the new table
        cursor = self.session.open_cursor(self.uri + "_over", None)
        for i in range(1, 1000):
            cursor[str("%015d" % i)] = str(i) + ": abcdefghijklmnopqrstuvwxyz"
        cursor.close()
Example #21
0
    def test_hazard(self):
        uri = "table:hazard"
        ds = SimpleDataSet(self, uri, 1000)
        ds.populate()

        # Open 10,000 cursors and pin a page to set a hazard pointer.
        cursors = []
        for i in range(0, 10000):
            c = self.session.open_cursor(uri, None)
            c.set_key(ds.key(10))
            c.search()
            cursors.append(c)

        # Close the cursors, clearing the hazard pointer.
        for c in cursors:
            c.close()
Example #22
0
    def test_modify_abort(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()

        # Start a transaction.
        self.session.begin_transaction()

        # Insert a new record.
        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(30))
        c.set_value(ds.value(30))
        self.assertEquals(c.insert(), 0)

        # Test that we can successfully modify our own record.
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), 0)

        # Test that another transaction cannot modify our uncommitted record.
        xs = self.conn.open_session()
        xc = xs.open_cursor(self.uri, None)
        xs.begin_transaction()
        xc.set_key(ds.key(30))
        xc.set_value(ds.value(30))
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        xc.set_key(ds.key(30))
        self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND)
        xs.rollback_transaction()

        # Rollback our transaction.
        self.session.rollback_transaction()

        # Test that we can't modify our aborted insert.
        self.session.begin_transaction()
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
        self.session.rollback_transaction()
Example #23
0
    def test_las(self):
        # Create a small table.
        uri = "table:test_las"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S")
        ds.populate()

        # Take a snapshot.
        self.session.snapshot("name=xxx")

        # Insert a large number of records, we'll hang if the lookaside table
        # isn't doing its thing.
        c = self.session.open_cursor(uri)
        bigvalue = "abcde" * 100
        for i in range(1, 1000000):
            c.set_key(ds.key(nrows + i))
            c.set_value(bigvalue)
            self.assertEquals(c.insert(), 0)
Example #24
0
 def test_smoke(self):
     ds = SimpleDataSet(self, self.uri, self.nentries,
         config=self.config, key_format=self.keyfmt)
     ds.populate()
     self.reopen_conn()
     c = self.session.open_cursor(self.uri, None)
     c.set_key(ds.key(100))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(100))
     c.set_key(ds.key(101))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(101))
     c.set_key(ds.key(9999))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(9999))
Example #25
0
    def test_cursor_random_reasonable_distribution(self):
        uri = self.type
        num_entries = self.records
        if uri == 'table:random':
            config = 'leaf_page_max=100MB'
        else:
            config = ''

        # Set the leaf-page-max value, otherwise the page might split.
        ds = SimpleDataSet(self, uri, num_entries, config=config)
        ds.populate()
        # Setup an array to track which keys are seen
        visitedKeys = [0] * (num_entries + 1)
        # Setup a counter to see when we find a sequential key
        sequentialKeys = 0

        cursor = self.session.open_cursor(uri, None, 'next_random=true')
        lastKey = None
        for i in range(0, num_entries):
            self.assertEqual(cursor.next(), 0)
            current = cursor.get_key()
            current = int(current)
            visitedKeys[current] = visitedKeys[current] + 1
            if lastKey != None:
                if current == (lastKey + 1):
                    sequentialKeys += 1
            lastKey = current

        differentKeys = sum(x > 0 for x in visitedKeys)

        #print visitedKeys
        #print differentKeys
        '''
        self.tty('differentKeys: ' + str(differentKeys) + ' of ' + \
            str(num_entries) + ', ' + \
            str((int)((differentKeys * 100) / num_entries)) + '%')
        '''
        # Can't test for non-sequential data when there is 1 item in the table
        if num_entries > 1:
            self.assertGreater(num_entries - 1, sequentialKeys,
                'cursor is returning sequential data')
        self.assertGreater(differentKeys, num_entries / 4,
            'next_random random distribution not adequate')
Example #26
0
    def test_checkpoint_las_reads(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        # Create a small table.
        uri = "table:test_las03"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor[ds.key(nrows + i)] = bigvalue
        cursor.close()
        self.session.checkpoint()

        # Check to see LAS working with old timestamp
        bigvalue2 = "ddddd" * 100
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))
        las_writes_start = self.get_stat(stat.conn.cache_write_lookaside)
        self.large_updates(self.session, uri, bigvalue2, ds, nrows, 10000)

        # If the test sizing is correct, the history will overflow the cache
        self.session.checkpoint()
        las_writes = self.get_stat(stat.conn.cache_write_lookaside) - las_writes_start
        self.assertGreaterEqual(las_writes, 0)

        for ts in range(2, 4):
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(ts))

            # Now just update one record and checkpoint again
            self.large_updates(self.session, uri, bigvalue2, ds, nrows, 1)

            las_reads_start = self.get_stat(stat.conn.cache_read_lookaside)
            self.session.checkpoint()
            las_reads = self.get_stat(stat.conn.cache_read_lookaside) - las_reads_start

            # Since we're dealing with eviction concurrent with checkpoints
            # and skewing is controlled by a heuristic, we can't put too tight
            # a bound on this.
            self.assertLessEqual(las_reads, 100)
Example #27
0
    def test_modify_many(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format='u')
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        orig = 'abcdefghijklmnopqrstuvwxyz'
        c.set_value(orig)
        self.assertEquals(c.update(), 0)
        for i in range(0, 50000):
            new = "".join([random.choice(string.digits) for i in xrange(5)])
            orig = orig[:10] + new + orig[15:]
            mods = []
            mod = wiredtiger.Modify(new, 10, 5)
            mods.append(mod)
            self.assertEquals(c.modify(mods), 0)

        c.set_key(ds.key(10))
        self.assertEquals(c.search(), 0)
        self.assertEquals(c.get_value(), orig)
Example #28
0
    def test_search_duplicate(self):
        if self.colvar == 0:
                return

        # Populate the tree.
        ds = SimpleDataSet(self, self.uri, 105, key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        # Set up deleted records before and after a set of duplicate records,
        # and make sure search/search-near returns the correct record.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(20, 100):
            cursor[ds.key(i)] = '=== IDENTICAL VALUE ==='
        for i in range(15, 25):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)
        for i in range(95, 106):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)
        cursor.close()

        # Reopen the connection, forcing it to disk and moving the records to
        # an on-page format.
        self.reopen_conn()

        # Open a cursor.
        cursor = self.session.open_cursor(self.uri, None)

        # Search-near for a record in the deleted set before the duplicate set,
        # which should succeed, returning the first record in the duplicate set.
        cursor.set_key(ds.key(18))
        self.assertEqual(cursor.search_near(), 1)
        self.assertEqual(cursor.get_key(), ds.key(25))

        # Search-near for a record in the deleted set after the duplicate set,
        # which should succeed, returning the last record in the duplicate set.
        cursor.set_key(ds.key(98))
        self.assertEqual(cursor.search_near(), -1)
        self.assertEqual(cursor.get_key(), ds.key(94))
Example #29
0
 def test_checkpoint_illegal_name(self):
     ds = SimpleDataSet(self, "file:checkpoint", 100, key_format='S')
     ds.populate()
     msg = '/the checkpoint name.*is reserved/'
     for conf in (
         'name=WiredTigerCheckpoint',
         'name=WiredTigerCheckpoint.',
         'name=WiredTigerCheckpointX',
         'drop=(from=WiredTigerCheckpoint)',
         'drop=(from=WiredTigerCheckpoint.)',
         'drop=(from=WiredTigerCheckpointX)',
         'drop=(to=WiredTigerCheckpoint)',
         'drop=(to=WiredTigerCheckpoint.)',
         'drop=(to=WiredTigerCheckpointX)'):
             self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                 lambda: self.session.checkpoint(conf), msg)
     msg = '/WiredTiger objects should not include grouping/'
     for conf in (
         'name=check{point',
         'name=check\\point'):
             self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                 lambda: self.session.checkpoint(conf), msg)
Example #30
0
    def test_checkpoint_last(self):
        # Create an object, change one record to an easily recognizable string,
        # then checkpoint it and open a cursor, confirming we see the correct
        # value.   Repeat this action, we want to be sure the engine gets the
        # latest checkpoint information each time.
        uri = self.uri
        ds = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds.populate()

        for value in ('FIRST', 'SECOND', 'THIRD', 'FOURTH', 'FIFTH'):
            # Update the object.
            cursor = self.session.open_cursor(uri, None, "overwrite")
            cursor[ds.key(10)] = value
            cursor.close()

            # Checkpoint the object.
            self.session.checkpoint()

            # Verify the "last" checkpoint sees the correct value.
            cursor = self.session.open_cursor(
                uri, None, "checkpoint=WiredTigerCheckpoint")
            self.assertEquals(cursor[ds.key(10)], value)
Example #31
0
    def test_checkpoint_snapshot(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        cursor = self.session.open_cursor(self.uri, None, "bulk")
        for i in range(1, self.nrows + 1):
            if self.value_format == '8t':
                cursor[i] = self.valuea
            else:
                cursor[i] = self.valuea + str(i)
        cursor.close()

        self.check(self.valuea, self.uri, self.nrows)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(1, self.nrows + 1):
            cursor1.set_key(ds.key(i))
            if self.value_format == '8t':
                cursor1.set_value(self.valueb)
            else:
                cursor1.set_value(self.valueb + str(i))
            self.assertEqual(cursor1.update(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session1.commit_transaction()
            self.evict(self.uri, ds, self.nrows)
        finally:
            done.set()
            ckpt.join()

        #Take a backup and restore it.
        self.take_full_backup(".", self.backup_dir)
        self.reopen_conn(self.backup_dir)

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertEqual(keys_removed, 0)
Example #32
0
    def test_rollback_to_stable(self):
        nrows = 1500

        # Prepare transactions for column store table is not yet supported.
        if self.prepare and self.key_format == 'r':
            self.skipTest('Prepare transactions for column store table is not yet supported')

        # Create a table without logging.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format="S", config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
            ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, self.prepare, 30)
        self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 40)
        self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 50)
        self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)
        self.check(value_modQ, uri, nrows, 30)
        self.check(value_modR, uri, nrows, 40)
        self.check(value_modS, uri, nrows, 50)
        self.check(value_modT, uri, nrows, 60)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            retry_rollback(self, 'modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, 4, 1, nrows, self.prepare, 70))
            retry_rollback(self, 'modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, 5, 1, nrows, self.prepare, 80))
            retry_rollback(self, 'modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, 6, 1, nrows, self.prepare, 90))
            retry_rollback(self, 'modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, 7, 1, nrows, self.prepare, 100))
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, 20)
        self.check(value_modQ, uri, nrows, 30)
        self.check(value_modR, uri, nrows, 40)
        self.check(value_modS, uri, nrows, 50)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
    def test_rollback_to_stable(self):
        nrows = 1000000

        # Create a table.
        uri = "table:rollback_to_stable12"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config='split_pct=50')
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)

        # Pin stable to timestamp 30 if prepare otherwise 20.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(30))
        else:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(20))

        # Load a single row modification to be removed.
        commit_ts = 30
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        cursor[ds.key(1)] = value_b
        if self.prepare:
            self.session.prepare_transaction('prepare_timestamp=' +
                                             self.timestamp_str(commit_ts - 1))
            self.session.timestamp_transaction('commit_timestamp=' +
                                               self.timestamp_str(commit_ts))
            self.session.timestamp_transaction('durable_timestamp=' +
                                               self.timestamp_str(commit_ts +
                                                                  1))
            self.session.commit_transaction()
        else:
            self.session.commit_transaction('commit_timestamp=' +
                                            self.timestamp_str(commit_ts))
        cursor.close()

        self.session.checkpoint()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, None, 30)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        pages_walk_skipped = stat_cursor[
            stat.conn.txn_rts_tree_walk_skip_pages][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertEqual(hs_sweep, 0)
        self.assertGreaterEqual(pages_walk_skipped, 0)
Example #34
0
    def test_timestamp(self):
        uri = "table:test_timestamp19"
        create_params = 'value_format=S,key_format=i'
        self.session.create(uri, create_params)

        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format="i",
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        nrows = 1000
        value_x = 'x' * 1000
        value_y = 'y' * 1000
        value_z = 'z' * 1000

        # Set the oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ', stable_timestamp=' + timestamp_str(10))

        # Insert values with varying timestamps.
        self.updates(uri, value_x, ds, nrows, 20)
        self.updates(uri, value_y, ds, nrows, 30)
        self.updates(uri, value_z, ds, nrows, 40)

        # Perform a checkpoint.
        self.session.checkpoint('use_timestamp=true')

        # Move the oldest and stable timestamps to 40.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(40) +
                                ', stable_timestamp=' + timestamp_str(40))

        # Update values.
        self.updates(uri, value_z, ds, nrows, 50)
        self.updates(uri, value_x, ds, nrows, 60)
        self.updates(uri, value_y, ds, nrows, 70)

        # Perform a checkpoint.
        self.session.checkpoint('use_timestamp=true')

        # Close and reopen the connection.
        self.close_conn()
        self.conn = self.setUpConnectionOpen('.')
        self.session = self.setUpSessionOpen(self.conn)

        # The oldest timestamp on recovery is 40. Trying to set it earlier is a no-op.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10))
        self.assertTimestampsEqual(self.conn.query_timestamp('get=oldest'),
                                   timestamp_str(40))

        # Trying to set an earlier stable timestamp is an error.
        self.assertRaisesWithMessage(
            wiredtiger.WiredTigerError, lambda: self.conn.set_timestamp(
                'stable_timestamp=' + timestamp_str(10)),
            '/oldest timestamp \(0, 40\) must not be later than stable timestamp \(0, 10\)/'
        )
        self.assertTimestampsEqual(self.conn.query_timestamp('get=stable'),
                                   timestamp_str(40))

        # Move the oldest and stable timestamps to 70.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(70) +
                                ', stable_timestamp=' + timestamp_str(70))
        self.assertTimestampsEqual(self.conn.query_timestamp('get=oldest'),
                                   timestamp_str(70))
        self.assertTimestampsEqual(self.conn.query_timestamp('get=stable'),
                                   timestamp_str(70))
    def test_readonly(self):
        create_params = 'key_format=i,value_format=i'
        entries = 10
        # Create a database and a table.
        SimpleDataSet(self,
                      self.uri,
                      entries,
                      key_format='i',
                      value_format='i').populate()

        #
        # Now close and reopen.  Note that the connection function
        # above will reopen it readonly.
        self.reopen_conn()
        msg = '/Unsupported/'
        c = self.session.open_cursor(self.uri, None, None)
        for op in self.cursor_ops:
            c.set_key(1)
            c.set_value(1)
            if op == 'insert':
                self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                             lambda: c.insert(), msg)
            elif op == 'remove':
                self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                             lambda: c.remove(), msg)
            elif op == 'update':
                self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                             lambda: c.update(), msg)
            else:
                self.fail('Unknown cursor operation: ' + op)
        c.close()
        for op in self.session_ops:
            if op == 'alter':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.alter(self.uri, None), msg)
            elif op == 'create':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.create(self.uri2, create_params), msg)
            elif op == 'compact':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.compact(self.uri, None), msg)
            elif op == 'drop':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.drop(self.uri, None), msg)
            elif op == 'log_flush':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.log_flush(None), msg)
            elif op == 'log_printf':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.log_printf("test"), msg)
            elif op == 'rebalance':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.rebalance(self.uri, None), msg)
            elif op == 'rename':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.rename(self.uri, self.uri2, None),
                    msg)
            elif op == 'salvage':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.salvage(self.uri, None), msg)
            elif op == 'truncate':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.truncate(self.uri, None, None, None),
                    msg)
            elif op == 'upgrade':
                self.assertRaisesWithMessage(
                    wiredtiger.WiredTigerError,
                    lambda: self.session.upgrade(self.uri, None), msg)
            else:
                self.fail('Unknown session method: ' + op)
    def test_checkpoint_target(self):
        # Create 3 objects, change one record to an easily recognizable string.
        uri = self.uri + '1'
        ds1 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds1.populate()
        self.update(uri, ds1, 'ORIGINAL')

        uri = self.uri + '2'
        ds2 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds2.populate()
        self.update(uri, ds2, 'ORIGINAL')

        uri = self.uri + '3'
        ds3 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds3.populate()
        self.update(uri, ds3, 'ORIGINAL')

        # Checkpoint all three objects.
        self.session.checkpoint("name=checkpoint-1")

        # Update all 3 objects, then checkpoint two of the objects with the
        # same checkpoint name.
        self.update(self.uri + '1', ds1, 'UPDATE')
        self.update(self.uri + '2', ds2, 'UPDATE')
        self.update(self.uri + '3', ds3, 'UPDATE')
        target = 'target=("' + self.uri + '1"' + ',"' + self.uri + '2")'
        self.session.checkpoint("name=checkpoint-1," + target)

        # Confirm the checkpoint has the old value in objects that weren't
        # checkpointed, and the new value in objects that were checkpointed.
        self.check(self.uri + '1', ds1, 'UPDATE')
        self.check(self.uri + '2', ds2, 'UPDATE')
        self.check(self.uri + '3', ds3, 'ORIGINAL')
Example #37
0
    def test_modify_abort(self):
        ds = SimpleDataSet(self,
                           self.uri,
                           20,
                           key_format=self.keyfmt,
                           value_format=self.valuefmt)
        ds.populate()

        # Start a transaction.
        self.session.begin_transaction()

        # Insert a new record.
        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(30))
        c.set_value(ds.value(30))
        self.assertEquals(c.insert(), 0)

        # Test that we can successfully modify our own record.
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), 0)

        # Test that another transaction cannot modify our uncommitted record.
        xs = self.conn.open_session()
        xc = xs.open_cursor(self.uri, None)
        xs.begin_transaction()
        xc.set_key(ds.key(30))
        xc.set_value(ds.value(30))
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        xc.set_key(ds.key(30))
        self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND)
        xs.rollback_transaction()

        # Rollback our transaction.
        self.session.rollback_transaction()

        # Test that we can't modify our aborted insert.
        self.session.begin_transaction()
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
        self.session.rollback_transaction()
Example #38
0
    def test_truncate07(self):
        nrows = 10000

        uri = "table:truncate07"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100

        # Pin oldest and stable timestamps to 1.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) +
                                ',stable_timestamp=' + self.timestamp_str(1))

        # Write a bunch of data at time 10.
        cursor = self.session.open_cursor(ds.uri)
        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = value_a
        self.session.commit_transaction('commit_timestamp=' +
                                        self.timestamp_str(10))

        # This data can be stable; move the stable timestamp forward.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(10))

        # Munge some of it at time 20. Touch every other even-numbered key in the middle third of
        # the data. (This allows using the odd keys to evict.)
        #
        # Use a separate session because we're going to prepare the transaction and we want to
        # be able to do other things afterward.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(ds.uri)
        session2.begin_transaction()
        start = nrows // 3
        if start % 2 == 1:
            start += 1
        for i in range(start, 2 * nrows // 3, 2):
            cursor2.set_key(ds.key(i))
            if self.munge_with_update:
                cursor2.set_value(value_b)
                self.assertEqual(cursor2.update(), 0)
            else:
                self.assertEqual(cursor2.remove(), 0)
        session2.prepare_transaction('prepare_timestamp=' +
                                     self.timestamp_str(20))
        cursor2.close()

        # Evict the lot so that we can fast-truncate.
        # For now, evict every 4th key explicitly; FUTURE: improve this to evict each page only
        # once when we have a good way to do that.
        if self.do_evict:
            for i in range(1, nrows + 1, 4):
                self.evict(ds.uri, ds.key(i), value_a)

        if self.do_checkpoint:
            self.session.checkpoint()

        # Truncate the data, including what we prepared.
        self.session.begin_transaction()
        err = self.truncate(ds.uri, ds.key, nrows // 4, nrows - nrows // 4)
        self.assertEqual(err, WT_ROLLBACK)
        self.session.rollback_transaction()

        # Move the stable timestamp forward before exiting so we don't waste time rolling
        # back the rest of the changes during shutdown.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))
Example #39
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Prepare transactions for column store table is not yet supported.
        if self.key_format == 'r':
            self.skipTest(
                'Prepare transactions for column store table is not yet supported'
            )

        # Create a table without logging.
        uri = "table:rollback_to_stable21"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        valuea = 'a' * 400
        valueb = 'b' * 400

        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[i] = valuea

        self.session.commit_transaction('commit_timestamp=' +
                                        timestamp_str(30))

        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[i] = valueb

        cursor.reset()
        cursor.close()
        self.session.prepare_transaction('prepare_timestamp=' +
                                         timestamp_str(20))

        s = self.conn.open_session()
        s.begin_transaction('ignore_prepare = true')
        # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
        evict_cursor = s.open_cursor(uri, None, "debug=(release_evict)")

        for i in range(1, nrows + 1):
            evict_cursor.set_key(i)
            self.assertEquals(evict_cursor.search(), 0)
            self.assertEqual(evict_cursor.get_value(), valuea)
            evict_cursor.reset()

        s.rollback_transaction()
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(40))
        s.checkpoint()

        # Rollback the prepared transaction
        self.session.rollback_transaction()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        self.check(valuea, uri, nrows, 40)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        stat_cursor.close()

        self.assertGreater(hs_removed, 0)
Example #40
0
    def test_timestamp_randomizer(self):
        # Local function to generate a random timestamp, or return -1
        def maybe_ts(do_gen, iternum):
            if do_gen:
                return self.gen_ts(iternum)
            else:
                return -1

        if wttest.islongtest():
            iterations = 100000
        else:
            iterations = 1000

        create_params = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
        self.session.create(self.uri, create_params)

        self.set_global_timestamps(1, 1, -1)

        # Create tables with no entries
        ds = SimpleDataSet(
            self, self.uri, 0, key_format=self.key_format, value_format=self.value_format)

        # We do a bunch of iterations, doing transactions, prepare, and global timestamp calls
        # with timestamps that are sometimes valid, sometimes not. We use the iteration number
        # as an "approximate timestamp", and generate timestamps for our calls that are near
        # that number (within 10).  Thus, as the test runs, the timestamps generally get larger.
        # We always know the state of global timestamps, so we can predict the success/failure
        # on each call.
        self.commit_value = '<NOT_SET>'
        for iternum in range(1, iterations):
            self.pr('\n===== ITERATION ' + str(iternum) + '/' + str(iterations))
            self.pr('RANDOM: ({0},{1})'.format(self.rand.seedw,self.rand.seedz))
            if self.rand.rand32() % 10 != 0:
                commit_ts = self.gen_ts(iternum)
                durable_ts = self.gen_ts(iternum)
                do_prepare = (self.rand.rand32() % 20 == 0)
                if self.rand.rand32() % 2 == 0:
                    read_ts = self.gen_ts(iternum)
                else:
                    read_ts = -1   # no read_timestamp used in txn

                # OOD does not work with prepared updates. Hence, the commit ts should always be
                # greater than the last durable ts.
                if commit_ts <= self.last_durable:
                    commit_ts = self.last_durable + 1

                if do_prepare:
                    # If we doing a prepare, we must abide by some additional rules.
                    # If we don't we'll immediately panic
                    if commit_ts < self.oldest_ts:
                        commit_ts = self.oldest_ts
                    if durable_ts < commit_ts:
                        durable_ts = commit_ts
                    if durable_ts <= self.stable_ts:
                        durable_ts = self.stable_ts + 1
                value = self.gen_value(iternum, commit_ts)
                self.updates(value, ds, do_prepare, commit_ts, durable_ts, read_ts)

            if self.rand.rand32() % 2 == 0:
                # Set some combination of the global timestamps
                r = self.rand.rand32() % 16
                oldest = maybe_ts((r & 0x1) != 0, iternum)
                stable = maybe_ts((r & 0x2) != 0, iternum)
                commit = maybe_ts((r & 0x4) != 0, iternum)
                durable = maybe_ts((r & 0x8) != 0, iternum)
                self.set_global_timestamps(oldest, stable, durable)

        # Make sure the resulting rows are what we expect.
        cursor = self.session.open_cursor(self.uri)
        expect_key = 1
        expect_value = self.commit_value
        for k,v in cursor:
            self.assertEquals(k, expect_key)
            self.assertEquals(v, expect_value)
            expect_key += 1

        # Although it's theoretically possible to never successfully update a single row,
        # with a large number of iterations that should never happen.  I'd rather catch
        # a test code error where we mistakenly don't update any rows.
        self.assertGreater(expect_key, 1)
        cursor.close()
Example #41
0
    def test_search_duplicate(self):
        if self.colvar == 0:
            return

        # Populate the tree.
        ds = SimpleDataSet(self,
                           self.uri,
                           105,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        # Set up deleted records before and after a set of duplicate records,
        # and make sure search/search-near returns the correct record.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(20, 100):
            cursor[ds.key(i)] = '=== IDENTICAL VALUE ==='
        for i in range(15, 25):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)
        for i in range(95, 106):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)
        cursor.close()

        # Reopen the connection, forcing it to disk and moving the records to
        # an on-page format.
        self.reopen_conn()

        # Open a cursor.
        cursor = self.session.open_cursor(self.uri, None)

        # Search-near for a record in the deleted set before the duplicate set,
        # which should succeed, returning the first record in the duplicate set.
        cursor.set_key(ds.key(18))
        self.assertEqual(cursor.search_near(), 1)
        self.assertEqual(cursor.get_key(), ds.key(25))

        # Search-near for a record in the deleted set after the duplicate set,
        # which should succeed, returning the last record in the duplicate set.
        cursor.set_key(ds.key(98))
        self.assertEqual(cursor.search_near(), -1)
        self.assertEqual(cursor.get_key(), ds.key(94))
    def test_checkpoint_snapshot(self):

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='leaf_page_max=4k')
        ds.populate()

        if self.value_format == '8t':
            valuea = 97
            valueb = 98
            valuec = 99
        else:
            valuea = "aaaaa" * 100
            valueb = "bbbbb" * 100
            valuec = "ccccc" * 100

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)
        for i in range(self.nrows + 1, self.nrows + 2):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valueb)
            self.assertEqual(cursor1.insert(), 0)

        self.large_updates(self.uri, valuea, ds, self.nrows)
        self.check(valuea, self.uri, self.nrows)

        self.session.checkpoint()
        session1.rollback_transaction()
        self.reopen_conn()

        # Check the table contains the last checkpointed value.
        self.session.breakpoint()
        self.check(valuea, self.uri, self.nrows)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)
        for i in range(self.nrows + 1, self.nrows + 2):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(valueb)
            self.assertEqual(cursor1.insert(), 0)

        self.session.begin_transaction()
        cursor = self.session.open_cursor(self.uri)
        for i in range(1, 2):
            cursor.set_key(ds.key(i))
            cursor.set_value(valuec)
            self.assertEqual(cursor.update(), 0)
        self.session.commit_transaction()

        self.session.checkpoint()
        session1.rollback_transaction()

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_skipped = stat_cursor[stat.conn.txn_rts_tree_walk_skip_pages][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertGreaterEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_skipped, 0)
Example #43
0
    def test_search_invisible_one(self):
        # Populate the tree.
        ds = SimpleDataSet(self,
                           self.uri,
                           100,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        # Delete a range of records.
        for i in range(5, 10):
            cursor = self.session.open_cursor(self.uri, None)
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)

        # Reopen the connection, forcing it to disk and moving the records to
        # an on-page format.
        self.reopen_conn()

        # Add updates to the existing records (in both the deleted an undeleted
        # range), as well as some new records after the end. Put the updates in
        # a separate transaction so they're invisible to another cursor.
        self.session.begin_transaction()
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(5, 10):
            cursor[ds.key(i)] = ds.value(i + 1000)
        for i in range(30, 40):
            cursor[ds.key(i)] = ds.value(i + 1000)
        for i in range(100, 140):
            cursor[ds.key(i)] = ds.value(i + 1000)

        # Open a separate session and cursor.
        s = self.conn.open_session()
        cursor = s.open_cursor(self.uri, None)

        # Search for an existing record in the deleted range, should not find
        # it.
        for i in range(5, 10):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Fixed-length column-store rows always exist.
                self.assertEqual(cursor.search(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search for an existing record in the updated range, should see the
        # original value.
        for i in range(30, 40):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.search(), 0)
            self.assertEqual(cursor.get_key(), ds.key(i))

        # Search for a added record, should not find it.
        for i in range(120, 130):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Invisible updates to fixed-length column-store objects are
                # invisible to the reader, but the fact that they exist past
                # the end of the initial records causes the instantiation of
                # empty records: confirm successful return of an empty row.
                self.assertEqual(cursor.search(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                # Otherwise, we should not find any matching records.
                self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for an existing record in the deleted range, should find
        # the next largest record. (This depends on the implementation behavior
        # which currently includes a bias to prefix search.)
        for i in range(5, 10):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Fixed-length column-store rows always exist.
                self.assertEqual(cursor.search_near(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                self.assertEqual(cursor.search_near(), 1)
                self.assertEqual(cursor.get_key(), ds.key(10))

        # Search-near for an existing record in the updated range, should see
        # the original value.
        for i in range(30, 40):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.search_near(), 0)
            self.assertEqual(cursor.get_key(), ds.key(i))

        # Search-near for an added record, should find the previous largest
        # record.
        for i in range(120, 130):
            cursor.set_key(ds.key(i))
            if self.empty:
                # Invisible updates to fixed-length column-store objects are
                # invisible to the reader, but the fact that they exist past
                # the end of the initial records causes the instantiation of
                # empty records: confirm successful return of an empty row.
                self.assertEqual(cursor.search_near(), 0)
                self.assertEqual(cursor.get_key(), i)
                self.assertEqual(cursor.get_value(), 0)
            else:
                self.assertEqual(cursor.search_near(), -1)
                self.assertEqual(cursor.get_key(), ds.key(100))
    def test_update_restore_evict_recovery(self):
        uri = 'table:test_debug_mode10'
        nrows = 10000

        # Create our table.
        ds = SimpleDataSet(self, uri, 0, key_format='i', value_format='S',config='log=(enabled=false)')
        ds.populate()

        value_a = 'a' * 500
        value_b = 'b' * 500
        value_c = 'c' * 500
        value_d = 'd' * 500

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, False, 20)
        self.large_updates(uri, value_b, ds, nrows, False, 30)
        self.large_updates(uri, value_c, ds, nrows, False, 40)
        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)
        self.check(value_b, uri, nrows, 30)
        self.check(value_c, uri, nrows, 40)

        # Pin the stable timestamp to 40. We will be validating the state of the data post-stable timestamp
        # after we perform a recovery.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Perform additional updates post-stable timestamp.
        self.large_updates(uri, value_d, ds, nrows, False, 50)
        self.large_updates(uri, value_a, ds, nrows, False, 60)
        self.large_updates(uri, value_b, ds, nrows, False, 70)

        # Verify additional updated data is visible and correct.
        self.check(value_d, uri, nrows, 50)
        self.check(value_a, uri, nrows, 60)
        self.check(value_b, uri, nrows, 70)

        # Checkpoint to ensure the data is flushed to disk.
        self.session.checkpoint()

        # Extract the most recent checkpoints write gen & run write gen. As we are still on a new DB connection,
        # the run write gen should be 1 at this point, equal to the connection-wide base write gen.
        # Since we checkpointed after a series of large writes/updates, the write gen of the pages should
        # definitely be greater than 1.
        checkpoint_write_gen, checkpoint_run_write_gen = self.parse_write_gen("file:test_debug_mode10.wt")
        self.assertEqual(checkpoint_run_write_gen, 1)
        self.assertGreater(checkpoint_write_gen, checkpoint_run_write_gen)

        # Simulate a crash/restart, opening our new DB in recovery. As we open in recovery we want to additionally
        # use the 'update_restore_evict' debug option to trigger update restore eviction.
        self.conn_config = self.conn_config + self.conn_recon
        simulate_crash_restart(self, ".", "RESTART")

        # As we've created a new DB connection post-shutdown, the connection-wide
        # base write gen should eventually initialise from the previous checkpoint's base 'write_gen' during the recovery process
        # ('write_gen'+1). This should be reflected in the initialisation of the 'run_write_gen' field of the newest
        # checkpoint post-recovery. As the recovery/rts process updates our pages, we'd also expect the latest checkpoint's
        # 'write_gen' to again be greater than its 'run_write_gen'.
        recovery_write_gen, recovery_run_write_gen = self.parse_write_gen("file:test_debug_mode10.wt")
        self.assertGreater(recovery_run_write_gen, checkpoint_write_gen)
        self.assertGreater(recovery_write_gen, recovery_run_write_gen)

        # Read the statistics of pages that have been update restored (to check the mechanism was used).
        stat_cursor = self.session.open_cursor('statistics:')
        pages_update_restored = stat_cursor[stat.conn.cache_write_restore][2]
        stat_cursor.close()
        self.assertGreater(pages_update_restored, 0)

        # Check that after recovery, we see the correct data with respect to our previous stable timestamp (40).
        self.check(value_c, uri, nrows, 40)
        self.check(value_c, uri, nrows, 50)
        self.check(value_c, uri, nrows, 60)
        self.check(value_c, uri, nrows, 70)
        self.check(value_b, uri, nrows, 30)
        self.check(value_a, uri, nrows, 20)
        # Passing 0 results in opening a transaction with no read timestamp.
        self.check(value_c, uri, nrows, 0)
Example #45
0
    def test_prepare_rollback_retrieve_time_window(self):
        # Create a small table.
        uri = "table:test_prepare10"
        nrows = 1000
        ds = SimpleDataSet(self, uri, 0, key_format="S", value_format='u')
        ds.populate()

        value_a = b"aaaaa" * 100
        value_b = b"bbbbb" * 100
        value_c = b"ccccc" * 100

        # Commit some updates along with a prepared update, which is not resolved.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10))
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(10))

        # Initially load huge data
        self.updates(ds, uri, nrows, value_a, 20)
        # Add some more updates
        self.updates(ds, uri, nrows, value_b, 30)

        # Checkpoint
        self.session.checkpoint()

        # Validate that we do see the correct value.
        session2 = self.setUpSessionOpen(self.conn)
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        for i in range(1, nrows):
            cursor2.set_key(ds.key(i))
            self.assertEquals(cursor2.search(), 0)
            self.assertEquals(cursor2.get_value(), value_b)
        session2.commit_transaction()

        # Reset the cursor.
        cursor2.reset()
        session2.begin_transaction()

        # Remove all keys
        self.removes(ds, uri, nrows, 40)

        # Validate that we do see the correct value.
        session3 = self.setUpSessionOpen(self.conn)
        cursor3 = session3.open_cursor(uri)
        session3.begin_transaction()
        for i in range(1, nrows):
            cursor3.set_key(ds.key(i))
            self.assertEquals(cursor3.search(), wiredtiger.WT_NOTFOUND)
        session3.commit_transaction()

        # Reset the cursor.
        cursor3.reset()
        session3.begin_transaction()

        # Insert the updates from a prepare session and keep it open.
        session_p = self.conn.open_session()
        cursor_p = session_p.open_cursor(uri)
        session_p.begin_transaction()
        for i in range(1, nrows):
            cursor_p.set_key(ds.key(i))
            cursor_p.set_value(value_c)
            self.assertEquals(cursor_p.insert(), 0)
        session_p.prepare_transaction('prepare_timestamp=' + timestamp_str(50))

        self.check(ds, uri, nrows, value_a, 20)
        self.check(ds, uri, nrows, value_b, 35)
        self.check_not_found(ds, uri, nrows, 60)

        #rollback the prepared session
        session_p.rollback_transaction()

        self.check(ds, uri, nrows, value_a, 20)
        self.check(ds, uri, nrows, value_b, 35)
        self.check_not_found(ds, uri, nrows, 60)

        # session2 still can see the value_b
        for i in range(1, nrows):
            cursor2.set_key(ds.key(i))
            self.assertEquals(cursor2.search(), 0)
            self.assertEquals(cursor2.get_value(), value_b)
        session2.commit_transaction()

        # session3 still can't see a value
        for i in range(1, nrows):
            cursor3.set_key(ds.key(i))
            self.assertEquals(cursor3.search(), wiredtiger.WT_NOTFOUND)
        session3.commit_transaction()

        # close sessions.
        cursor_p.close()
        session_p.close()
        cursor2.close()
        session2.close()
        cursor3.close()
        session3.close()
        self.session.close()
Example #46
0
    def test_gc(self):
        nrows = 10000

        # Create a table without logging.
        uri = "table:gc01"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format="i",
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
                                ',stable_timestamp=' + timestamp_str(1))

        bigvalue = "aaaaa" * 100
        bigvalue2 = "ddddd" * 100
        self.large_updates(uri, bigvalue, ds, nrows, 10)

        # Check that all updates are seen.
        self.check(bigvalue, uri, nrows, 10)

        self.large_updates(uri, bigvalue2, ds, nrows, 100)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 100)

        # Check that old updates are seen.
        self.check(bigvalue, uri, nrows, 10)

        # Pin oldest and stable to timestamp 100.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(100) +
                                ',stable_timestamp=' + timestamp_str(100))

        # Checkpoint to ensure that the history store is cleaned.
        self.session.checkpoint()
        self.check_gc_stats()

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 100)

        # Load a slight modification with a later timestamp.
        self.large_modifies(uri, 'A', ds, 10, 1, nrows, 110)
        self.large_modifies(uri, 'B', ds, 20, 1, nrows, 120)
        self.large_modifies(uri, 'C', ds, 30, 1, nrows, 130)

        # Second set of update operations with increased timestamp.
        self.large_updates(uri, bigvalue, ds, nrows, 200)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue, uri, nrows, 200)

        # Check that the modifies are seen.
        bigvalue_modA = bigvalue2[0:10] + 'A' + bigvalue2[11:]
        bigvalue_modB = bigvalue_modA[0:20] + 'B' + bigvalue_modA[21:]
        bigvalue_modC = bigvalue_modB[0:30] + 'C' + bigvalue_modB[31:]
        self.check(bigvalue_modA, uri, nrows, 110)
        self.check(bigvalue_modB, uri, nrows, 120)
        self.check(bigvalue_modC, uri, nrows, 130)

        # Check that old updates are seen.
        self.check(bigvalue2, uri, nrows, 100)

        # Pin oldest and stable to timestamp 200.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(200) +
                                ',stable_timestamp=' + timestamp_str(200))

        # Checkpoint to ensure that the history store is cleaned.
        self.session.checkpoint()
        self.check_gc_stats()

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue, uri, nrows, 200)

        # Load a slight modification with a later timestamp.
        self.large_modifies(uri, 'A', ds, 10, 1, nrows, 210)
        self.large_modifies(uri, 'B', ds, 20, 1, nrows, 220)
        self.large_modifies(uri, 'C', ds, 30, 1, nrows, 230)

        # Third set of update operations with increased timestamp.
        self.large_updates(uri, bigvalue2, ds, nrows, 300)

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 300)

        # Check that the modifies are seen.
        bigvalue_modA = bigvalue[0:10] + 'A' + bigvalue[11:]
        bigvalue_modB = bigvalue_modA[0:20] + 'B' + bigvalue_modA[21:]
        bigvalue_modC = bigvalue_modB[0:30] + 'C' + bigvalue_modB[31:]
        self.check(bigvalue_modA, uri, nrows, 210)
        self.check(bigvalue_modB, uri, nrows, 220)
        self.check(bigvalue_modC, uri, nrows, 230)

        # Check that old updates are seen.
        self.check(bigvalue, uri, nrows, 200)

        # Pin oldest and stable to timestamp 300.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(300) +
                                ',stable_timestamp=' + timestamp_str(300))

        # Checkpoint to ensure that the history store is cleaned.
        self.session.checkpoint()
        self.check_gc_stats()

        # Check that the new updates are only seen after the update timestamp.
        self.check(bigvalue2, uri, nrows, 300)
Example #47
0
    def test_rollback_to_stable(self):
        # RTS will fail if there are uncommitted prepared transactions, so skip tests of prepare
        # with a runtime call to RTS, that doesn't add useful testing scenarios.
        if self.prepare and not self.crash:
            return

        nrows = 10000

        # Create a table without logging.
        uri = "table:rollback_to_stable33"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config='log=(enabled=false)' + self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            valuea = 97
            valueb = 98
        else:
            valuea = "aaaaa" * 100
            valueb = "bbbbb" * 100

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        cursor = self.session.open_cursor(uri)

        # Write some baseline data out at time 20.
        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = self.mkdata(valuea, i)
            # Make a new transaction every 97 keys so the transactions don't get huge.
            if i % 97 == 0:
                self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(20))
                self.session.begin_transaction()
        self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(20))

        # Write some more data out at time 30.
        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = self.mkdata(valueb, i)
            # Make a new transaction every 97 keys so the transactions don't get huge.
            if i % 97 == 0:
                self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(30))
                self.session.begin_transaction()
        self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(30))

        cursor.close()

        # Evict the lot.
        self.evict(ds, 1, nrows + 1, valueb)

        # Move stable to 25 (after the baseline data).
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(25))

        # Checkpoint.
        self.session.checkpoint()

        # Now fast-delete the lot at time 35.
        # Use a separate session for this so that if we leave the truncate prepared it
        # doesn't obstruct the rest of the test.
        session2 = self.conn.open_session()
        session2.begin_transaction()
        lo_cursor = session2.open_cursor(uri)
        lo_cursor.set_key(ds.key(nrows // 2 + 1))
        hi_cursor = session2.open_cursor(uri)
        hi_cursor.set_key(ds.key(nrows + 1))
        session2.truncate(None, lo_cursor, hi_cursor, None)
        if self.prepare:
            session2.prepare_transaction('prepare_timestamp=' + self.timestamp_str(35))
        else:
            session2.commit_transaction('commit_timestamp=' + self.timestamp_str(35))
        hi_cursor.close()
        lo_cursor.close()

        # Check stats to make sure we fast-deleted at least one page.
        # Since VLCS and FLCS do not (yet) support fast-delete, instead assert we didn't.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        fastdelete_pages = stat_cursor[stat.conn.rec_page_delete_fast][2]
        if self.key_format == 'r':
            self.assertEqual(fastdelete_pages, 0)
        else:
            self.assertGreater(fastdelete_pages, 0)

        if self.second_checkpoint:
            # Checkpoint again with the deletion.
            self.session.checkpoint()

        # Roll back, either via crashing or by explicit RTS.
        if self.crash:
            simulate_crash_restart(self, ".", "RESTART")
        else:
            self.conn.rollback_to_stable()

        # We should see the original data at read-ts 20 and 30.
        self.checkx(ds, nrows, 20, valuea)
        self.checkx(ds, nrows, 30, valuea)
Example #48
0
    def test_gc(self):
        uri = "table:gc05"
        create_params = 'value_format=S,key_format=i'
        self.session.create(uri, create_params)

        nrows = 10000
        value_x = "xxxxx" * 100
        value_y = "yyyyy" * 100
        value_z = "zzzzz" * 100
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format="i",
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Set the oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        # Insert values with varying timestamps.
        self.large_updates(uri, value_x, ds, nrows, 20)
        self.large_updates(uri, value_y, ds, nrows, 30)
        self.large_updates(uri, value_z, ds, nrows, 40)

        # Perform a checkpoint.
        self.session.checkpoint("name=checkpoint_one")

        # Check statistics.
        self.check_gc_stats()

        # Open a cursor to the checkpoint just performed.
        ckpt_cursor = self.session.open_cursor(uri, None,
                                               "checkpoint=checkpoint_one")

        # Move the oldest and stable timestamps to 40.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(40) +
                                ',stable_timestamp=' + timestamp_str(40))

        # Insert values with varying timestamps.
        self.large_updates(uri, value_z, ds, nrows, 50)
        self.large_updates(uri, value_y, ds, nrows, 60)
        self.large_updates(uri, value_x, ds, nrows, 70)

        # Move the oldest and stable timestamps to 70.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(70) +
                                ',stable_timestamp=' + timestamp_str(70))

        # Perform a checkpoint.
        self.session.checkpoint()
        self.check_gc_stats()

        # Verify checkpoint_one still exists and contains the expected values.
        for i in range(0, nrows):
            ckpt_cursor.set_key(i)
            ckpt_cursor.search()
            self.assertEqual(value_z, ckpt_cursor.get_value())

        # Close checkpoint cursor.
        ckpt_cursor.close()
Example #49
0
    def test_hs(self):
        active_files = []
        value1 = 'a' * 500
        value2 = 'd' * 500

        # Set up 'numfiles' with 'numrows' entries. We want to create a number of files that
        # contain active history (content newer than the oldest timestamp).
        for f in range(self.numfiles):
            table_uri = 'table:%s.%d' % (self.file_name, f)
            file_uri = 'file:%s.%d.wt' % (self.file_name, f)
            # Create a small table.
            ds = SimpleDataSet(
                self, table_uri, 0, key_format='S', value_format='S', config='log=(enabled=false)')
            ds.populate()
            # Checkpoint to ensure we write the files metadata checkpoint value.
            self.session.checkpoint()
            # Get the base write gen of the file so we can compare after the handles get closed.
            base_write_gen = self.parse_run_write_gen(file_uri)
            active_files.append((base_write_gen, ds))

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
            ',stable_timestamp=' + timestamp_str(1))

        # Perform a series of updates over our files at timestamp 2. This being data we can later assert
        # to ensure the history store is working as intended.
        for (_, ds) in active_files:
            # Load data at timestamp 2.
            self.large_updates(ds.uri, value1, ds, self.nrows // 2 , 2)

        # We want to create a long running read transaction in a seperate session which we will persist over the closing and
        # re-opening of handles. We want to ensure the correct data gets read throughout this time period.
        session_read = self.conn.open_session()
        session_read.begin_transaction('read_timestamp=' + timestamp_str(2))
        # Check our inital set of updates are seen at the read timestamp.
        for (_, ds) in active_files:
            # Check that all updates at timestamp 2 are seen.
            self.check(session_read, value1, ds.uri, self.nrows // 2)

        # Perform a series of updates over over files at a later timestamp. Checking the history store data is consistent
        # with old and new timestamps.
        for (_, ds) in active_files:
            # Load more data with a later timestamp.
            self.large_updates(ds.uri, value2, ds, self.nrows, 100)
            # Check that the new updates are only seen after the update timestamp.
            self.check(self.session, value1, ds.uri, self.nrows // 2, 2)
            self.check(self.session, value2, ds.uri, self.nrows, 100)

        # Our sweep scan interval is every 1 second and the amount of idle time needed for a handle to be closed is 2 seconds.
        # It should take roughly 3 seconds for the sweep server to close our file handles. Lets wait at least double
        # that to be safe.
        max = 6
        sleep = 0
        # After waiting for the sweep server to remove our idle handles, the only open
        # handles that should be the metadata file, history store file and lock file.
        final_numfiles = 3
        # Open the stats cursor to collect the dhandle sweep status.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        while sleep < max:
            # We continue doing checkpoints which as a side effect runs the session handle sweep. This encouraging the idle
            # handles get removed.
            # Note, though checkpointing blocks sweeping, the checkpoint should be fast and not add too much extra time to the
            # overall test time.
            self.session.checkpoint()
            sleep += 0.5
            time.sleep(0.5)
            stat_cursor.reset()
            curr_files_open = stat_cursor[stat.conn.file_open][2]
            curr_dhandles_removed = stat_cursor[stat.conn.dh_sweep_remove][2]
            curr_dhandle_sweep_closes = stat_cursor[stat.conn.dh_sweep_close][2]

            self.printVerbose(3, "==== loop " + str(sleep))
            self.printVerbose(3, "Number of files open: " + str(curr_files_open))
            self.printVerbose(3, "Number of connection sweep dhandles closed: " + str(curr_dhandle_sweep_closes))
            self.printVerbose(3, "Number of connection sweep dhandles removed from hashlist: " + str(curr_dhandles_removed))

            # We've sweeped all the handles we can if we are left with the number of final dhandles
            # that we expect to be always open.
            if curr_files_open == final_numfiles and curr_dhandle_sweep_closes >= self.numfiles:
                break

        stat_cursor.reset()
        final_dhandle_sweep_closes = stat_cursor[stat.conn.dh_sweep_close][2]
        stat_cursor.close()
        # We want to assert our active history files have all been closed.
        self.assertGreaterEqual(final_dhandle_sweep_closes, self.numfiles)

        # Using our long running read transaction, we want to now check the correct data can still be read after the
        # handles have been closed.
        for (_, ds) in active_files:
            # Check that all updates at timestamp 2 are seen.
            self.check(session_read, value1, ds.uri, self.nrows // 2)
        session_read.rollback_transaction()

        # Perform a series of checks over our files to ensure that our transactions have been written
        # before the dhandles were closed/sweeped.
        # Also despite the dhandle is being re-opened, we don't expect the base write generation
        # to have changed since we haven't actually restarted the system.
        for idx, (initial_base_write_gen, ds) in enumerate(active_files):
            # Check that the most recent transaction has the correct data.
            self.check(self.session, value2, ds.uri, self.nrows, 100)
            file_uri = 'file:%s.%d.wt' % (self.file_name, idx)
            # Get the current base_write_gen and ensure it hasn't changed since being
            # closed.
            base_write_gen = self.parse_run_write_gen(file_uri)
            self.assertEqual(initial_base_write_gen, base_write_gen)
    def test_rollback_to_stable(self):
        nrows = 10000

        # Prepare transactions for column store table is not yet supported.
        if self.prepare and self.key_format == 'r':
            self.skipTest(
                'Prepare transactions for column store table is not yet supported'
            )

        # Create a table without logging.
        uri = "table:rollback_to_stable02"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
                                ',stable_timestamp=' + timestamp_str(1))

        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100
        valuec = "ccccc" * 100
        valued = "ddddd" * 100
        self.large_updates(uri, valuea, ds, nrows, self.prepare, 10)
        # Check that all updates are seen.
        self.check(valuea, uri, nrows, 10)

        self.large_updates(uri, valueb, ds, nrows, self.prepare, 20)
        # Check that the new updates are only seen after the update timestamp.
        self.check(valueb, uri, nrows, 20)

        self.large_updates(uri, valuec, ds, nrows, self.prepare, 30)
        # Check that the new updates are only seen after the update timestamp.
        self.check(valuec, uri, nrows, 30)

        self.large_updates(uri, valued, ds, nrows, self.prepare, 40)
        # Check that the new updates are only seen after the update timestamp.
        self.check(valued, uri, nrows, 40)

        # Pin stable to timestamp 30 if prepare otherwise 20.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(30))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(20))
        # Checkpoint to ensure that all the data is flushed.
        if not self.in_memory:
            self.session.checkpoint()

        self.conn.rollback_to_stable()
        # Check that the new updates are only seen after the update timestamp.
        self.check(valueb, uri, nrows, 40)
        self.check(valueb, uri, nrows, 20)
        self.check(valuea, uri, nrows, 10)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        upd_aborted = (stat_cursor[stat.conn.txn_rts_upd_aborted][2] +
                       stat_cursor[stat.conn.txn_rts_hs_removed][2])
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(upd_aborted, nrows * 2)
Example #51
0
    def test_sharing(self):
        ds = SimpleDataSet(self, self.uri, 10)
        ds.populate()
        ds.check()
        self.session.checkpoint()
        ds.check()

        # Create a secondary database
        dir2 = os.path.join(self.home, 'SECONDARY')
        os.mkdir(dir2)
        conn2 = self.setUpConnectionOpen(dir2)
        session2 = conn2.open_session()

        # Reference the tree from the secondary:
        metac = self.session.open_cursor('metadata:')
        metac2 = session2.open_cursor('metadata:', None, 'readonly=0')
        uri2 = self.uri[:5] + '../' + self.uri[5:]
        metac2[uri2] = metac[self.uri] + ",readonly=1"

        cursor2 = session2.open_cursor(uri2)
        ds.check_cursor(cursor2)
        cursor2.close()

        newds = SimpleDataSet(self, self.uri, 10000)
        newds.populate()
        newds.check()
        self.session.checkpoint()
        newds.check()

        # Check we can still read from the last checkpoint
        cursor2 = session2.open_cursor(uri2)
        ds.check_cursor(cursor2)
        cursor2.close()

        # Bump to new checkpoint
        origmeta = metac[self.uri]
        checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:]
        self.pr('Orig checkpoint: ' + checkpoint)
        session2.alter(uri2, checkpoint)
        self.pr('New metadata on secondaery: ' + metac2[uri2])

        # Check that we can see the new data
        cursor2 = session2.open_cursor(uri2)
        newds.check_cursor(cursor2)
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri = "table:rollback_to_stable07"
        ds = SimpleDataSet(
            self, uri, 0, key_format="i", value_format="S", config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
            ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100

        # Perform several updates.
        self.large_updates(uri, value_d, ds, nrows, 20)
        self.large_updates(uri, value_c, ds, nrows, 30)
        self.large_updates(uri, value_b, ds, nrows, 40)
        self.large_updates(uri, value_a, ds, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri, nrows, 20)
        self.check(value_c, uri, nrows, 30)
        self.check(value_b, uri, nrows, 40)
        self.check(value_a, uri, nrows, 50)

        # Pin stable to timestamp 50 if prepare otherwise 40.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(40))

        # Perform additional updates.
        self.large_updates(uri, value_b, ds, nrows, 60)
        self.large_updates(uri, value_c, ds, nrows, 70)
        self.large_updates(uri, value_d, ds, nrows, 80)

        # Checkpoint to ensure the data is flushed to disk.
        self.session.checkpoint()

        # Verify additional update data is visible and correct.
        self.check(value_b, uri, nrows, 60)
        self.check(value_c, uri, nrows, 70)
        self.check(value_d, uri, nrows, 80)

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_b, uri, nrows, 40)
        self.check(value_b, uri, nrows, 80)
        self.check(value_c, uri, nrows, 30)
        self.check(value_d, uri, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows * 4)

        # Simulate another server crash and restart.
        simulate_crash_restart(self, "RESTART", "RESTART2")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_b, uri, nrows, 40)
        self.check(value_b, uri, nrows, 80)
        self.check(value_c, uri, nrows, 30)
        self.check(value_d, uri, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(pages_visited, 0)
        self.assertEqual(upd_aborted, 0)
        self.assertEqual(hs_removed, 0)
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table.
        uri = "table:rollback_to_stable13"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config='split_pct=50')
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)

        # Perform several removes.
        self.large_removes(uri, ds, nrows, self.prepare, 30)

        # Perform several updates.
        self.large_updates(uri, value_b, ds, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        # (In FLCS, the removed rows should read back as zero.)
        self.check(value_a, uri, nrows, None, 20)
        self.check(None, uri, 0, nrows, 30)
        self.check(value_b, uri, nrows, None, 60)

        # Pin stable to timestamp 50 if prepare otherwise 40.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(50))
        else:
            self.conn.set_timestamp('stable_timestamp=' +
                                    self.timestamp_str(40))

        self.session.checkpoint()
        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(None, uri, 0, nrows, 50)

        # Check that we restore the correct value from the history store.
        self.check(value_a, uri, nrows, None, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        restored_tombstones = stat_cursor[
            stat.conn.txn_rts_hs_restore_tombstones][2]
        self.assertEqual(restored_tombstones, nrows)
Example #54
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Prepare transactions for column store table is not yet supported.
        if self.prepare and self.key_format == 'r':
            self.skipTest(
                'Prepare transactions for column store table is not yet supported'
            )

        # Create a table without logging.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format=self.key_format,
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100
        value_f = "fffff" * 100

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several updates in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("updates")
            retry_rollback(
                self, 'update ds1, e', None, lambda: self.large_updates(
                    uri_1, value_e, ds_1, nrows, self.prepare, 70))
            retry_rollback(
                self, 'update ds2, e', None, lambda: self.large_updates(
                    uri_2, value_e, ds_2, nrows, self.prepare, 70))
            retry_rollback(
                self, 'update ds1, f', None, lambda: self.large_updates(
                    uri_1, value_f, ds_1, nrows, self.prepare, 80))
            retry_rollback(
                self, 'update ds2, f', None, lambda: self.large_updates(
                    uri_2, value_f, ds_2, nrows, self.prepare, 80))
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)
Example #55
0
    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table.
        uri = "table:rollback_to_stable06"
        ds_config = ',log=(enabled=false)' if self.in_memory else ''
        ds = SimpleDataSet(self, uri, 0,
            key_format=self.key_format, value_format=self.value_format, config=ds_config)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_updates(uri, value_b, ds, nrows, self.prepare, 30)
        self.large_updates(uri, value_c, ds, nrows, self.prepare, 40)
        self.large_updates(uri, value_d, ds, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)
        self.check(value_c, uri, nrows, None, 40)
        self.check(value_d, uri, nrows, None, 50)

        # Checkpoint to ensure the data is flushed, then rollback to the stable timestamp.
        if not self.in_memory:
            self.session.checkpoint()
        self.conn.rollback_to_stable()

        # Check that all keys are removed.
        # (For FLCS, at least for now, they will read back as 0, meaning deleted, rather
        # than disappear.)
        self.check(value_a, uri, 0, nrows, 20)
        self.check(value_b, uri, 0, nrows, 30)
        self.check(value_c, uri, 0, nrows, 40)
        self.check(value_d, uri, 0, nrows, 50)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(keys_removed, 0)
        if self.in_memory:
            self.assertEqual(upd_aborted, nrows * 4)
            self.assertEqual(hs_removed, 0)
        else:
            self.assertGreaterEqual(upd_aborted + hs_removed + keys_removed, nrows * 4)
Example #56
0
    def test_rollback_to_stable_prepare(self):
        nrows = 1000

        # Create a table without logging.
        self.pr("create/populate tables")
        uri_1 = "table:rollback_to_stable10_1"
        ds_1 = SimpleDataSet(self,
                             uri_1,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_1.populate()

        # Create another table without logging.
        uri_2 = "table:rollback_to_stable10_2"
        ds_2 = SimpleDataSet(self,
                             uri_2,
                             0,
                             key_format="i",
                             value_format="S",
                             config='log=(enabled=false)')
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100
        value_e = "eeeee" * 100
        value_f = "fffff" * 100

        # Perform several updates.
        self.pr("large updates")
        self.large_updates(uri_1, value_d, ds_1, nrows, self.prepare, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, self.prepare, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, self.prepare, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, self.prepare, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, self.prepare, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, self.prepare, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, self.prepare, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, nrows, 20)
        self.check(value_c, uri_1, nrows, 30)
        self.session.breakpoint()
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_a, uri_1, nrows, 50)

        self.check(value_d, uri_2, nrows, 20)
        self.check(value_c, uri_2, nrows, 30)
        self.session.breakpoint()
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_a, uri_2, nrows, 50)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))

        # Here's the update operations we'll perform, encapsulated so we can easily retry
        # it if we get a rollback. Rollbacks may occur when checkpoint is running.
        def prepare_range_updates(session, cursor, ds, value, nrows,
                                  prepare_config):
            self.pr("updates")
            for i in range(1, nrows):
                key = ds.key(i)
                cursor.set_key(key)
                cursor.set_value(value)
                self.assertEquals(cursor.update(), 0)
            self.pr("prepare")
            session.prepare_transaction(prepare_config)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()

            # Perform several updates in parallel with checkpoint.
            session_p1 = self.conn.open_session()
            cursor_p1 = session_p1.open_cursor(uri_1)
            session_p1.begin_transaction('isolation=snapshot')
            retry_rollback(
                self, 'update ds1', session_p1, lambda: prepare_range_updates(
                    session_p1, cursor_p1, ds_1, value_e, nrows,
                    'prepare_timestamp=' + timestamp_str(69)))

            # Perform several updates in parallel with checkpoint.
            session_p2 = self.conn.open_session()
            cursor_p2 = session_p2.open_cursor(uri_2)
            session_p2.begin_transaction('isolation=snapshot')
            retry_rollback(
                self, 'update ds2', session_p2, lambda: prepare_range_updates(
                    session_p2, cursor_p2, ds_2, value_e, nrows,
                    'prepare_timestamp=' + timestamp_str(69)))
        finally:
            done.set()
            ckpt.join()

        # Check that the history store file has been used and has non-zero size before the simulated
        # crash.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Simulate a crash by copying to a new directory(RESTART).
        copy_wiredtiger_home(self, ".", "RESTART")

        # Commit the prepared transaction.
        session_p1.commit_transaction('commit_timestamp=' + timestamp_str(70) +
                                      ',durable_timestamp=' +
                                      timestamp_str(71))
        session_p2.commit_transaction('commit_timestamp=' + timestamp_str(70) +
                                      ',durable_timestamp=' +
                                      timestamp_str(71))
        session_p1.close()
        session_p2.close()

        # Open the new directory.
        self.pr("restart")
        self.conn = self.setUpConnectionOpen("RESTART")
        self.session = self.setUpSessionOpen(self.conn)
        self.pr("restart complete")

        # The history store file size should be greater than zero after the restart.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        cache_hs_ondisk = stat_cursor[stat.conn.cache_hs_ondisk][2]
        stat_cursor.close()
        self.assertGreater(cache_hs_ondisk, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_1, nrows, 50)
        self.check(value_a, uri_1, nrows, 80)
        self.check(value_b, uri_1, nrows, 40)
        self.check(value_c, uri_1, nrows, 30)
        self.check(value_d, uri_1, nrows, 20)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri_2, nrows, 50)
        self.check(value_a, uri_2, nrows, 80)
        self.check(value_b, uri_2, nrows, 40)
        self.check(value_c, uri_2, nrows, 30)
        self.check(value_d, uri_2, nrows, 20)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(keys_restored, 0)
        self.assertGreaterEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, 0)
        self.assertGreater(hs_sweep, 0)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists(
            "oldest pinned transaction ID rolled back for eviction")
Example #57
0
    def test_tiered(self):
        args = 'key_format=S,block_allocation=log-structured'
        self.verbose(
            3, 'Test log-structured allocation with config: ' + args +
            ' count: ' + str(self.nrecs))
        #ds = SimpleDataSet(self, self.uri, self.nrecs, config=args)
        ds = SimpleDataSet(self, self.uri, 10, config=args)
        ds.populate()
        self.session.checkpoint()
        ds = SimpleDataSet(self, self.uri, 10000, config=args)
        ds.populate()

        self.reopen_conn()
        ds = SimpleDataSet(self, self.uri, 1000, config=args)
        ds.populate()
Example #58
0
    def test_checkpoint_snapshot_with_txnid_and_timestamp(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        session1 = self.conn.open_session()
        session1.begin_transaction()

        self.large_updates(self.uri, self.valuea, ds, self.nrows, 20)
        self.check(self.valuea, self.uri, self.nrows, 20, False)

        session2 = self.conn.open_session()
        session2.begin_transaction()
        cursor2 = session2.open_cursor(self.uri)

        for i in range((self.nrows + 1), (self.nrows * 2) + 1):
            cursor2.set_key(ds.key(i))
            cursor2.set_value(self.valuea)
            self.assertEqual(cursor2.insert(), 0)
        session1.timestamp_transaction('commit_timestamp=' +
                                       self.timestamp_str(30))

        # Set stable timestamp to 40
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session2.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        session1.rollback_transaction()

        self.perform_backup_or_crash_restart(".", self.backup_dir)

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 30, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)

        self.perform_backup_or_crash_restart(self.backup_dir, self.backup_dir2)

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 30, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreaterEqual(inconsistent_ckpt, 0)
        self.assertEqual(keys_removed, 0)
Example #59
0
    def test_search_eot(self):
        # Populate the tree and reopen the connection, forcing it to disk
        # and moving the records to an on-page format.
        ds = SimpleDataSet(self,
                           self.uri,
                           100,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()
        self.reopen_conn()

        # Open a cursor.
        cursor = self.session.open_cursor(self.uri, None)

        # Search for a record at the end of the table, which should succeed.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search(), 0)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))

        # Search-near for a record at the end of the table, which should
        # succeed, returning the last record.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search_near(), 0)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))

        # Search for a record past the end of the table, which should fail.
        cursor.set_key(ds.key(200))
        self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for a record past the end of the table, which should
        # succeed, returning the last record.
        cursor.set_key(ds.key(200))
        self.assertEqual(cursor.search_near(), -1)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))
Example #60
0
    def test_search_invisible_two(self):
        # Populate the tree and reopen the connection, forcing it to disk
        # and moving the records to an on-page format.
        ds = SimpleDataSet(self,
                           self.uri,
                           100,
                           key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()
        self.reopen_conn()

        # Add some additional visible records.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(100, 120):
            cursor[ds.key(i)] = ds.value(i)
        cursor.close()

        # Begin a transaction, and add some additional records.
        self.session.begin_transaction()
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(120, 140):
            cursor[ds.key(i)] = ds.value(i)

        # Open a separate session and cursor.
        s = self.conn.open_session()
        cursor = s.open_cursor(self.uri, None)

        # Search for an invisible record.
        cursor.set_key(ds.key(130))
        if self.empty:
            # Invisible updates to fixed-length column-store objects are
            # invisible to the reader, but the fact that they exist past
            # the end of the initial records causes the instantiation of
            # empty records: confirm successful return of an empty row.
            cursor.search()
            self.assertEqual(cursor.get_key(), 130)
            self.assertEqual(cursor.get_value(), 0)
        else:
            # Otherwise, we should not find any matching records.
            self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for an invisible record, which should succeed, returning
        # the last visible record.
        cursor.set_key(ds.key(130))
        cursor.search_near()
        if self.empty:
            # Invisible updates to fixed-length column-store objects are
            # invisible to the reader, but the fact that they exist past
            # the end of the initial records causes the instantiation of
            # empty records: confirm successful return of an empty row.
            cursor.search()
            self.assertEqual(cursor.get_key(), 130)
            self.assertEqual(cursor.get_value(), 0)
        else:
            # Otherwise, we should find the closest record for which we can see
            # the value.
            self.assertEqual(cursor.get_key(), ds.key(119))
            self.assertEqual(cursor.get_value(), ds.value(119))