Exemplo n.º 1
0
class test_overwrite(wttest.WiredTigerTestCase):
    name = 'overwrite'
    scenarios = make_scenarios([
        ('file-r', dict(type='file:', keyfmt='r')),
        ('file-S', dict(type='file:', keyfmt='S')),
        ('lsm-S', dict(type='lsm:', keyfmt='S')),
        ('table-r', dict(type='table:', keyfmt='r')),
        ('table-S', dict(type='table:', keyfmt='S')),
    ])

    # Confirm a cursor configured with/without overwrite correctly handles
    # non-existent records during insert, remove and update operations.
    def test_overwrite_insert(self):
        uri = self.type + self.name
        simple_populate(self, uri, 'key_format=' + self.keyfmt, 100)

        # Insert of an existing record with overwrite off fails.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 5))
        cursor.set_value('XXXXXXXXXX')
        self.assertRaises(wiredtiger.WiredTigerError, lambda: cursor.insert())

        # One additional test for the insert method: duplicate the cursor with
        # overwrite configured and then the insert should succeed.  This test
        # is only for the insert method because the remove and update method
        # failure modes are for non-existent records, and you cannot duplicate
        # cursor pointing to non-existent records.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 5))
        dupc = self.session.open_cursor(None, cursor, "overwrite=true")
        dupc.set_value('XXXXXXXXXX')
        self.assertEquals(dupc.insert(), 0)

        # Insert of an existing record with overwrite on succeeds.
        cursor = self.session.open_cursor(uri, None)
        cursor.set_key(key_populate(cursor, 6))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.insert(), 0)

        # Insert of a non-existent record with overwrite off succeeds.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 200))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.insert(), 0)

        # Insert of a non-existent record with overwrite on succeeds.
        cursor = self.session.open_cursor(uri, None)
        cursor.set_key(key_populate(cursor, 201))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.insert(), 0)

    def test_overwrite_remove(self):
        uri = self.type + self.name
        simple_populate(self, uri, 'key_format=' + self.keyfmt, 100)

        # Remove of an existing record with overwrite off succeeds.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 5))
        self.assertEquals(cursor.remove(), 0)

        # Remove of an existing record with overwrite on succeeds.
        cursor = self.session.open_cursor(uri, None)
        cursor.set_key(key_populate(cursor, 6))
        self.assertEquals(cursor.remove(), 0)

        # Remove of a non-existent record with overwrite off fails.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 200))
        self.assertEquals(cursor.remove(), wiredtiger.WT_NOTFOUND)

        # Remove of a non-existent record with overwrite on succeeds.
        cursor = self.session.open_cursor(uri, None)
        cursor.set_key(key_populate(cursor, 201))
        self.assertEquals(cursor.remove(), 0)

    def test_overwrite_update(self):
        uri = self.type + self.name
        simple_populate(self, uri, 'key_format=' + self.keyfmt, 100)

        # Update of an existing record with overwrite off succeeds.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 5))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.update(), 0)

        # Update of an existing record with overwrite on succeeds.
        cursor = self.session.open_cursor(uri, None)
        cursor.set_key(key_populate(cursor, 6))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.update(), 0)

        # Update of a non-existent record with overwrite off fails.
        cursor = self.session.open_cursor(uri, None, "overwrite=false")
        cursor.set_key(key_populate(cursor, 200))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.update(), wiredtiger.WT_NOTFOUND)

        # Update of a non-existent record with overwrite on succeeds.
        cursor = self.session.open_cursor(uri, None)
        cursor.set_key(key_populate(cursor, 201))
        cursor.set_value('XXXXXXXXXX')
        self.assertEquals(cursor.update(), 0)
Exemplo n.º 2
0
class test_cursor_comparison(wttest.WiredTigerTestCase):
    name = 'test_compare'

    types = [('file', dict(type='file:', lsm=False, dataset=SimpleDataSet)),
             ('lsm', dict(type='table:', lsm=True, dataset=ComplexLSMDataSet)),
             ('table', dict(type='table:', lsm=False, dataset=ComplexDataSet))]
    keyfmt = [('integer', dict(keyfmt='i', valfmt='S')),
              ('recno', dict(keyfmt='r', valfmt='S')),
              ('recno-fix', dict(keyfmt='r', valfmt='8t')),
              ('string', dict(keyfmt='S', valfmt='S'))]

    # Discard invalid or unhelpful scenario combinations.
    def keep(name, d):
        if d['keyfmt'] == 'r':
            # Skip record number keys with LSM.
            if d['lsm']:
                return False
            # Skip complex data sets with FLCS.
            if d['valfmt'] == '8t' and d['dataset'] != SimpleDataSet:
                return False
        else:
            # Skip byte data with row-store.
            if d['valfmt'] == '8t':
                return False
        return True

    scenarios = make_scenarios(types, keyfmt, include=keep)

    def test_cursor_comparison(self):
        uri = self.type + 'compare'
        uriX = self.type + 'compareX'

        # Build the object.
        ds = self.dataset(self, uri, 100, key_format=self.keyfmt)
        dsX = self.dataset(self, uriX, 100, key_format=self.keyfmt)
        ds.populate()
        dsX.populate()
        if self.type == 'file:':
            ix0_0 = None
            ix0_1 = None
            ix1_0 = None
            ixX_0 = None
        else:
            ix0_0 = self.session.open_cursor(ds.index_name(0), None)
            ix0_1 = self.session.open_cursor(ds.index_name(0), None)
            ix1_0 = self.session.open_cursor(ds.index_name(1), None)
            ixX_0 = self.session.open_cursor(dsX.index_name(0), None)
            ix0_0.next()
            ix0_1.next()
            ix1_0.next()
            ixX_0.next()

        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)

        # Confirm failure unless the keys are set.
        msg = '/requires key be set/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: c1.compare(c2), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: c2.compare(c1), msg)

        # Test cursors before they're positioned.
        c1.set_key(ds.key(10))
        c2.set_key(ds.key(20))
        self.assertGreater(c2.compare(c1), 0)
        self.assertLess(c1.compare(c2), 0)
        c2.set_key(ds.key(10))
        self.assertEqual(c1.compare(c2), 0)
        self.assertEqual(c2.compare(c1), 0)

        # Confirm failure for different objects.
        cX = self.session.open_cursor(uriX, None)
        cX.set_key(dsX.key(10))
        msg = '/must reference the same object/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cX.compare(c1), msg)
        msg = '/wt_cursor.* is None/'
        self.assertRaisesHavingMessage(RuntimeError, lambda: cX.compare(None),
                                       msg)
        if ix0_0 != None:
            self.assertEqual(ix0_0.compare(ix0_1), 0)
            ix0_1.reset()
            ix0_1.prev()
            self.assertLess(ix0_0.compare(ix0_1), 0)
            self.assertGreater(ix0_1.compare(ix0_0), 0)
            # Main table vs. index not allowed
            msg = '/must reference the same object/'
            self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                         lambda: c1.compare(ix0_0), msg)
            # Two unrelated indices not allowed
            self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                         lambda: ixX_0.compare(ix0_0), msg)
            # Two different indices from same table not allowed
            self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                         lambda: ix0_0.compare(ix1_0), msg)

        # Test cursors after they're positioned (shouldn't matter for compare).
        c1.set_key(ds.key(10))
        self.assertEqual(c1.search(), 0)
        c2.set_key(ds.key(20))
        self.assertEqual(c2.search(), 0)
        self.assertGreater(c2.compare(c1), 0)
        self.assertLess(c1.compare(c2), 0)
        c2.set_key(ds.key(10))
        self.assertEqual(c2.search(), 0)
        self.assertEqual(c1.compare(c2), 0)
        self.assertEqual(c2.compare(c1), 0)

        # Confirm failure for different objects.
        cX = self.session.open_cursor(uriX, None)
        cX.set_key(dsX.key(10))
        self.assertEqual(cX.search(), 0)
        msg = '/must reference the same object/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cX.compare(c1), msg)

    def test_cursor_equality(self):
        uri = self.type + 'equality'
        uriX = self.type + 'compareX'

        # Build the object.
        ds = self.dataset(self, uri, 100, key_format=self.keyfmt)
        dsX = self.dataset(self, uriX, 100, key_format=self.keyfmt)
        ds.populate()
        dsX.populate()
        if self.type == 'file:':
            ix0_0 = None
            ix0_1 = None
            ix1_0 = None
            ixX_0 = None
        else:
            ix0_0 = self.session.open_cursor(ds.index_name(0), None)
            ix0_1 = self.session.open_cursor(ds.index_name(0), None)
            ix1_0 = self.session.open_cursor(ds.index_name(1), None)
            ixX_0 = self.session.open_cursor(dsX.index_name(0), None)
            ix0_0.next()
            ix0_1.next()
            ix1_0.next()
            ixX_0.next()

        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)

        # Confirm failure unless the keys are set.
        msg = '/requires key be set/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: c1.equals(c2), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: c2.equals(c1), msg)

        # Test cursors before they're positioned.
        c1.set_key(ds.key(10))
        c2.set_key(ds.key(20))
        self.assertFalse(c1.equals(c2))
        self.assertFalse(c2.equals(c1))
        c2.set_key(ds.key(10))
        self.assertTrue(c1.equals(c2))
        self.assertTrue(c2.equals(c1))

        # Confirm failure for different objects.
        cX = self.session.open_cursor(uriX, None)
        cX.set_key(dsX.key(10))
        msg = '/must reference the same object/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cX.equals(c1), msg)
        msg = '/wt_cursor.* is None/'
        self.assertRaisesHavingMessage(RuntimeError, lambda: cX.equals(None),
                                       msg)
        if ix0_0 != None:
            self.assertTrue(ix0_0.equals(ix0_1))
            ix0_1.reset()
            ix0_1.prev()
            self.assertFalse(ix0_0.equals(ix0_1))
            # Main table vs. index not allowed
            msg = '/must reference the same object/'
            self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                         lambda: c1.equals(ix0_0), msg)
            # Two unrelated indices not allowed
            self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                         lambda: ixX_0.equals(ix0_0), msg)
            # Two different indices from same table not allowed
            self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                         lambda: ix0_0.equals(ix1_0), msg)

        # Test cursors after they're positioned (internally, it's a different
        # search path if keys are positioned in the tree).
        c1.set_key(ds.key(10))
        self.assertEqual(c1.search(), 0)
        c2.set_key(ds.key(20))
        self.assertEqual(c2.search(), 0)
        self.assertFalse(c1.equals(c2))
        self.assertFalse(c2.equals(c1))
        c2.set_key(ds.key(10))
        self.assertEqual(c2.search(), 0)
        self.assertTrue(c1.equals(c2))
        self.assertTrue(c2.equals(c1))

        # Confirm failure for different objects.
        cX = self.session.open_cursor(uriX, None)
        cX.set_key(dsX.key(10))
        self.assertEqual(cX.search(), 0)
        msg = '/must reference the same object/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cX.equals(c1), msg)
Exemplo n.º 3
0
class test_tiered14(wttest.WiredTigerTestCase):
    uri = "table:test_tiered14-{}"  # format for subtests

    auth_token = "test_token"
    bucket = "mybucket"
    cachedir = "mybucket-cache"
    bucket_prefix = "pfx_"
    extension_name = "local_store"

    # FIXME-WT-7833: enable the commented scenarios and run the
    # test with the --long option.

    # The multiplier makes the size of keys and values progressively larger.
    # A multipler of 0 makes the keys and values a single length.
    multiplier = [
        ('0', dict(multiplier=0)),
        ('S', dict(multiplier=1)),
        ('M', dict(multiplier=10)),
        #('L', dict(multiplier=100, long_only=True)),
        #('XL', dict(multiplier=1000, long_only=True)),
    ]
    keyfmt = [
        ('integer', dict(keyfmt='i')),
        ('string', dict(keyfmt='S')),
    ]
    dataset = [
        ('simple', dict(dataset='simple')),
        #('complex', dict(dataset='complex', long_only=True)),
    ]
    scenarios = wtscenario.make_scenarios(multiplier, keyfmt, dataset)

    def conn_config(self):
        if not os.path.exists(self.bucket):
            os.mkdir(self.bucket)
        return \
          'tiered_storage=(auth_token=%s,' % self.auth_token + \
          'bucket=%s,' % self.bucket + \
          'bucket_prefix=%s,' % self.bucket_prefix + \
          'cache_directory=%s,' % self.cachedir + \
          'name=%s),tiered_manager=(wait=0)' % self.extension_name

    # Load the local store extension.
    def conn_extensions(self, extlist):
        # Windows doesn't support dynamically loaded extension libraries.
        if os.name == 'nt':
            extlist.skip_if_missing = True
        extlist.extension('storage_sources', self.extension_name)

    def progress(self, s):
        outstr = "testnum {}, position {}: {}".format(self.testnum,
                                                      self.position, s)
        self.verbose(3, outstr)
        self.pr(outstr)

    # Run a sequence of operations, indicated by a string.
    #  a = add some number of keys
    #  u = update some number of keys
    #  c = checkpoint
    #  r = reopen
    #  f = flush_tier
    #  . = check to make sure all expected values are present
    #
    # We require a unique test number so we get can generate a different uri from
    # previous runs.  A different approach is to drop the uri, but then we need to
    # remove the bucket and cache, which is specific to the storage source extension.
    def playback(self, testnum, ops):
        self.testnum = testnum
        self.position = -1

        uri = self.uri.format(testnum)
        self.progress('Running ops: {} using uri {}'.format(ops, uri))
        if self.dataset == 'simple':
            ds = TrackedSimpleDataSet(self,
                                      uri,
                                      self.multiplier,
                                      key_format=self.keyfmt)
        elif self.dataset == 'complex':
            ds = TrackedComplexDataSet(self,
                                       uri,
                                       self.multiplier,
                                       key_format=self.keyfmt)

        # Populate for a tracked data set is needed to create the uri.
        ds.populate()
        inserted = 0

        # At the end of the sequence of operations, do a final check ('.').
        for op in ops + '.':
            self.position += 1
            try:
                if op == 'f':
                    self.progress('flush_tier')
                    self.session.flush_tier(None)
                elif op == 'c':
                    self.progress('checkpoint')
                    self.session.checkpoint()
                elif op == 'r':
                    self.progress('reopen')
                    self.reopen_conn()
                elif op == 'a':
                    self.progress('add')
                    n = random.randrange(1, 101)  # 1 <= n <= 100
                    ds.store_range(inserted, n)
                    inserted += n
                elif op == 'u':
                    self.progress('update')
                    # only update the elements if enough have already been added.
                    n = random.randrange(1, 101)  # 1 <= n <= 100
                    if n < inserted:
                        pos = random.randrange(0, inserted - n)
                        ds.store_range(pos, n)
                elif op == '.':
                    self.progress('check')
                    ds.check()
            except Exception as e:
                self.progress('Failed at position {} in {}: {}'.format(
                    idx, ops, str(e)))
                raise (e)

    # Test tiered storage with checkpoints and flush_tier calls.
    def test_tiered(self):
        random.seed(0)

        # Get started with a fixed sequence of basic operations.
        # There's no particular reason to start with this sequence.
        testnum = 0
        self.playback(testnum,
                      "aaaaacaaa.uucrauaf.aauaac.auu.aacrauafa.uruua.")

        for i in range(0, 10):
            testnum += 1
            # Generate a sequence of 100 operations that is heavy on additions and updates.
            s = ''.join(random.choices('aaaaauuuuufcr.', k=100))
            self.playback(testnum, s)

        for i in range(0, 10):
            testnum += 1
            # Generate a sequence of 100 operations that is has a greater mix of 'operational' functions.
            s = ''.join(random.choices('aufcr.', k=100))
            self.playback(testnum, s)
Exemplo n.º 4
0
class test_cursor07(wttest.WiredTigerTestCase, suite_subprocess):
    logmax = "100K"
    tablename1 = 'test_cursor07_log'
    tablename2 = 'test_cursor07_nolog'
    tablename3 = 'test_cursor07_nologtxn'
    uri1 = 'table:' + tablename1
    uri2 = 'table:' + tablename2
    uri3 = 'table:' + tablename3
    #  A large number of keys will force a log file change which will
    # test that scenario for log cursors.
    nkeys = 7000

    scenarios = make_scenarios([('regular', dict(reopen=False)),
                                ('reopen', dict(reopen=True))])

    # Enable logging for this test.
    def conn_config(self):
        return 'log=(enabled,file_max=%s,remove=false),' % self.logmax + \
            'transaction_sync="(method=dsync,enabled)"'

    def test_log_cursor(self):
        # print "Creating %s with config '%s'" % (self.uri, self.create_params)
        create_params = 'key_format=i,value_format=u'
        create_nolog_params = 'key_format=i,value_format=u,log=(enabled=false)'
        self.session.create(self.uri1, create_params)
        c1 = self.session.open_cursor(self.uri1, None)
        self.session.create(self.uri2, create_nolog_params)
        c2 = self.session.open_cursor(self.uri2, None)
        self.session.create(self.uri3, create_nolog_params)
        c3 = self.session.open_cursor(self.uri3, None)

        # A binary value.
        value = b'\x01\x02abcd\x03\x04'
        value_nolog = b'\x01\x02dcba\x03\x04'

        # We want to test both adding data to a table that is not logged
        # that is part of the same transaction as a table that is logged
        # as well as in its own transaction.
        self.session.begin_transaction()
        for k in range(self.nkeys):
            c1[k] = value
            c3[k] = value_nolog
        self.session.commit_transaction()
        c1.close()
        c3.close()

        self.session.begin_transaction()
        for k in range(self.nkeys):
            c2[k] = value_nolog
        self.session.commit_transaction()
        c2.close()

        if self.reopen:
            self.reopen_conn()

        # Check for these values via a log cursor
        c = self.session.open_cursor("log:", None)
        count = 0
        while c.next() == 0:
            # lsn.file, lsn.offset, opcount
            keys = c.get_key()
            # txnid, rectype, optype, fileid, logrec_key, logrec_value
            values = c.get_value()
            # We are only looking for log records that that have a key/value
            # pair.
            if values[4] != b'':
                if value in values[5]:  # logrec_value
                    count += 1
        c.close()
        self.assertEqual(count, self.nkeys)
Exemplo n.º 5
0
class test_cursor_random(wttest.WiredTigerTestCase):
    types = [('file', dict(type='file:random', dataset=SimpleDataSet)),
             ('table', dict(type='table:random', dataset=ComplexDataSet))]
    config = [('sample',
               dict(config='next_random=true,next_random_sample_size=35')),
              ('not-sample', dict(config='next_random=true'))]
    scenarios = make_scenarios(types, config)

    # Check that opening a random cursor on a row-store returns not-supported
    # for methods other than next, reconfigure and reset, and next returns
    # not-found.
    def test_cursor_random(self):
        uri = self.type
        self.session.create(uri, 'key_format=S,value_format=S')
        cursor = self.session.open_cursor(uri, None, self.config)
        msg = "/Unsupported cursor/"
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.compare(cursor), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.insert(), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.prev(), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.remove(), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.search(), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.search_near(), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: cursor.update(), msg)

        self.assertTrue(cursor.next(), wiredtiger.WT_NOTFOUND)
        self.assertEquals(cursor.reconfigure(), 0)
        self.assertEquals(cursor.reset(), 0)
        cursor.close()

    # Check that next_random works with a single value, repeatedly.
    def test_cursor_random_single_record(self):
        uri = self.type
        self.session.create(uri, 'key_format=S,value_format=S')
        cursor = self.session.open_cursor(uri, None)
        cursor['AAA'] = 'BBB'
        cursor.close()
        cursor = self.session.open_cursor(uri, None, self.config)
        for i in range(1, 5):
            self.assertEquals(cursor.next(), 0)
            self.assertEquals(cursor.get_key(), 'AAA')
        cursor.close

    # Check that next_random works in the presence of a larger set of values,
    # where the values are in an insert list.
    def test_cursor_random_multiple_insert_records(self):
        uri = self.type
        ds = self.dataset(self,
                          uri,
                          100,
                          config='allocation_size=512,leaf_page_max=512')
        ds.populate()

        # In a insert list, next_random always selects the middle key/value
        # pair, all we can do is confirm cursor.next works.
        cursor = self.session.open_cursor(uri, None, self.config)
        self.assertEqual(cursor.next(), 0)

    # Check that next_random works in the presence of a larger set of values,
    # where the values are in a disk format page.
    def cursor_random_multiple_page_records(self, reopen):
        uri = self.type
        ds = self.dataset(self,
                          uri,
                          10000,
                          config='allocation_size=512,leaf_page_max=512')
        ds.populate()

        # Optionally close the connection so everything is forced to disk,
        # insert lists are an entirely different path in the code.
        if reopen:
            self.reopen_conn()

        cursor = self.session.open_cursor(uri, None, self.config)
        last = ''
        match = 0
        for i in range(1, 10):
            self.assertEqual(cursor.next(), 0)
            current = cursor.get_key()
            if current == last:
                match += 1
            last = current
        self.assertLess(
            match, 5,
            'next_random did not return random records, too many matches found'
        )

    def test_cursor_random_multiple_page_records_reopen(self):
        self.cursor_random_multiple_page_records(1)

    def test_cursor_random_multiple_page_records(self):
        self.cursor_random_multiple_page_records(0)
Exemplo n.º 6
0
class test_stat01(wttest.WiredTigerTestCase):
    """
    Test statistics
    """

    config = 'internal_page_max=4K,leaf_page_max=8K'
    nentries = 25

    types = [('file', dict(uri='file:test_stat01.wt')),
             ('table', dict(uri='table:test_stat01.wt'))]
    keyfmt = [
        ('recno', dict(keyfmt='r')),
        ('string', dict(keyfmt='S')),
    ]
    scenarios = make_scenarios(types, keyfmt)

    conn_config = 'statistics=(all)'

    def statstr_to_int(self, str):
        """
        Convert a statistics value string, which may be in either form:
        '12345' or '33M (33604836)'
        """
        parts = str.rpartition('(')
        return int(parts[2].rstrip(')'))

    # Do a quick check of the entries in the the stats cursor, the "lookfor"
    # string should appear with a minimum value of least "min".
    def check_stats(self, statcursor, min, lookfor):
        stringclass = ''.__class__
        intclass = (0).__class__

        # Reset the cursor, we're called multiple times.
        statcursor.reset()

        found = False
        foundval = 0
        for id, desc, valstr, val in statcursor:
            self.assertEqual(type(desc), stringclass)
            self.assertEqual(type(valstr), stringclass)
            self.assertEqual(type(val), intclass)
            self.assertEqual(val, self.statstr_to_int(valstr))
            self.printVerbose(
                2, '  stat: \'' + desc + '\', \'' + valstr + '\', ' + str(val))
            if desc == lookfor:
                found = True
                foundval = val

        self.assertTrue(found, 'in stats, did not see: ' + lookfor)
        self.assertTrue(foundval >= min)

    # Test simple connection statistics.
    def test_basic_conn_stats(self):
        # Build an object and force some writes.
        SimpleDataSet(self,
                      self.uri,
                      1000,
                      config=self.config,
                      key_format=self.keyfmt).populate()
        self.session.checkpoint(None)

        # See that we can get a specific stat value by its key and verify its
        # entry is self-consistent.
        allstat_cursor = self.session.open_cursor('statistics:', None, None)
        self.check_stats(allstat_cursor, 10, 'block-manager: blocks written')

        values = allstat_cursor[stat.conn.block_write]
        self.assertEqual(values[0], 'block-manager: blocks written')
        val = self.statstr_to_int(values[1])
        self.assertEqual(val, values[2])
        allstat_cursor.close()

    # Test simple object statistics.
    def test_basic_data_source_stats(self):
        # Build an object.
        config = self.config + ',key_format=' + self.keyfmt
        self.session.create(self.uri, config)
        cursor = self.session.open_cursor(self.uri, None, None)
        value = ""
        for i in range(1, self.nentries):
            value = value + 1000 * "a"
            cursor[simple_key(cursor, i)] = value
        cursor.close()

        # Force the object to disk, otherwise we can't check the overflow count.
        self.reopen_conn()

        # See that we can get a specific stat value by its key and verify its
        # entry is self-consistent.
        cursor = self.session.open_cursor('statistics:' + self.uri, None, None)
        self.check_stats(cursor, 8192, 'btree: maximum leaf page size')
        self.check_stats(cursor, 4096, 'btree: maximum internal page size')
        self.check_stats(cursor, 10, 'btree: overflow pages')

        values = cursor[stat.dsrc.btree_overflow]
        self.assertEqual(values[0], 'btree: overflow pages')
        val = self.statstr_to_int(values[1])
        self.assertEqual(val, values[2])
        cursor.close()

        cursor = self.session.open_cursor('statistics:' + self.uri, None,
                                          "statistics=(size)")
        values = cursor[stat.dsrc.block_size]
        self.assertNotEqual(values[2], 0)
        cursor.close()

    # Test simple per-checkpoint statistics.
    def test_checkpoint_stats(self):
        ds = SimpleDataSet(self,
                           self.uri,
                           self.nentries,
                           config=self.config,
                           key_format=self.keyfmt)
        for name in ('first', 'second', 'third'):
            ds.populate()
            self.session.checkpoint('name=' + name)
            cursor = self.session.open_cursor('statistics:' + self.uri, None,
                                              'checkpoint=' + name)
            self.assertEqual(cursor[stat.dsrc.btree_entries][2], self.nentries)
            cursor.close()

    def test_missing_file_stats(self):
        self.assertRaises(
            wiredtiger.WiredTigerError,
            lambda: self.session.open_cursor('statistics:file:DoesNotExist'))
Exemplo n.º 7
0
class test_rollback_to_stable19(test_rollback_to_stable_base):
    session_config = 'isolation=snapshot'

    in_memory_values = [('no_inmem', dict(in_memory=False)),
                        ('inmem', dict(in_memory=True))]

    key_format_values = [
        ('column', dict(key_format='r')),
        ('integer_row', dict(key_format='i')),
    ]

    restart_options = [
        ('shutdown', dict(crash='false')),
        ('crash', dict(crash='true')),
    ]

    scenarios = make_scenarios(in_memory_values, key_format_values,
                               restart_options)

    def conn_config(self):
        config = 'cache_size=50MB,statistics=(all),log=(enabled=false),eviction_dirty_trigger=5,eviction_updates_trigger=5'
        if self.in_memory:
            config += ',in_memory=true'
        else:
            config += ',in_memory=false'
        return config

    def test_rollback_to_stable_no_history(self):
        nrows = 1000

        # Prepare transactions for column store table is not yet supported.
        if self.key_format == 'r':
            self.skipTest(
                'Prepare transactions for column store table is not yet supported'
            )

        # Create a table without logging.
        uri = "table:rollback_to_stable19"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        valuea = "aaaaa" * 100

        # Perform several updates and removes.
        s = self.conn.open_session()
        cursor = s.open_cursor(uri)
        s.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = valuea
            cursor.set_key(i)
            cursor.remove()
        cursor.close()
        s.prepare_transaction('prepare_timestamp=' + timestamp_str(20))

        # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
        evict_cursor = self.session.open_cursor(uri, None,
                                                "debug=(release_evict)")

        # Search for the key so we position our cursor on the page that we want to evict.
        self.session.begin_transaction("ignore_prepare = true")
        evict_cursor.set_key(1)
        evict_cursor.search()
        evict_cursor.reset()
        evict_cursor.close()
        self.session.commit_transaction()

        # Pin stable timestamp to 20.
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(20))
        if not self.in_memory:
            self.session.checkpoint()

        if not self.in_memory:
            if self.crash:
                simulate_crash_restart(self, ".", "RESTART")
            else:
                # Close and reopen the connection
                self.reopen_conn()
        else:
            self.conn.rollback_to_stable()
            s.rollback_transaction()

        # Verify data is not visible.
        self.check(valuea, uri, 0, 20)
        self.check(valuea, uri, 0, 30)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        self.assertGreater(upd_aborted, 0)
        self.assertGreater(keys_removed, 0)

    def test_rollback_to_stable_with_history(self):
        nrows = 1000

        # Prepare transactions for column store table is not yet supported.
        if self.key_format == 'r':
            self.skipTest(
                'Prepare transactions for column store table is not yet supported'
            )

        # Create a table without logging.
        uri = "table:rollback_to_stable19"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable timestamps to 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        valuea = "aaaaa" * 100
        valueb = "bbbbb" * 100

        # Perform several updates.
        self.large_updates(uri, valuea, ds, nrows, 0, 20)

        # Perform several removes.
        self.large_removes(uri, ds, nrows, 0, 30)

        # Perform several updates and removes.
        s = self.conn.open_session()
        cursor = s.open_cursor(uri)
        s.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = valueb
            cursor.set_key(i)
            cursor.remove()
        cursor.close()
        s.prepare_transaction('prepare_timestamp=' + timestamp_str(40))

        # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
        evict_cursor = self.session.open_cursor(uri, None,
                                                "debug=(release_evict)")

        # Search for the key so we position our cursor on the page that we want to evict.
        self.session.begin_transaction("ignore_prepare = true")
        evict_cursor.set_key(1)
        evict_cursor.search()
        evict_cursor.reset()
        evict_cursor.close()
        self.session.commit_transaction()

        # Pin stable timestamp to 40.
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(40))
        if not self.in_memory:
            self.session.checkpoint()

        if not self.in_memory:
            if self.crash:
                simulate_crash_restart(self, ".", "RESTART")
            else:
                # Close and reopen the connection
                self.reopen_conn()
        else:
            self.conn.rollback_to_stable()
            s.rollback_transaction()

        # Verify data.
        self.check(valuea, uri, nrows, 20)
        self.check(valuea, uri, 0, 30)
        self.check(valuea, uri, 0, 40)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        self.assertGreater(upd_aborted, 0)
        if not self.in_memory:
            self.assertGreater(hs_removed, 0)
Exemplo n.º 8
0
class test_config03(test_base03.test_base03):
    K = 1024
    M = 1024 * K
    G = 1024 * M

    cache_size_scenarios = wtscenario.quick_scenarios(
        's_cache_size', [1 * M, 20 * M, 100 * M, 1 * G, None],
        [0.6, 0.6, 0.6, 0.6, 0.6])
    create_scenarios = wtscenario.quick_scenarios('s_create',
                                                  [True, False, None],
                                                  [1.0, 0.2, 0.3])
    error_prefix_scenarios = wtscenario.quick_scenarios(
        's_error_prefix', [None, "errpfx:"], [1.0, 0.2])
    # eviction_target < eviction_trigger -- checked later
    eviction_target_scenarios = wtscenario.quick_scenarios(
        's_eviction_target', [10, 40, 85, 98], None)
    eviction_trigger_scenarios = wtscenario.quick_scenarios(
        's_eviction_trigger', [50, 90, 95, 99], None)
    multiprocess_scenarios = wtscenario.quick_scenarios(
        's_multiprocess', [True, False], [1.0, 1.0])
    session_max_scenarios = wtscenario.quick_scenarios('s_session_max',
                                                       [3, 30, 300], None)
    transactional_scenarios = wtscenario.quick_scenarios(
        's_transactional', [True, False], [0.2, 1.0])

    # Note: we are not using any truly verbose scenarios until we have
    # a way to redirect verbose output to a file in Python.
    #
    #verbose_scenarios = wtscenario.quick_scenarios('s_verbose',
    #    ['block', 'evict,evictserver', 'fileops,hazard,mutex',
    #     'read,readserver,reconcile,salvage','verify,write',''], None)
    verbose_scenarios = wtscenario.quick_scenarios('s_verbose', [None], None)

    config_vars = [
        'cache_size', 'create', 'error_prefix', 'eviction_target',
        'eviction_trigger', 'multiprocess', 'session_max', 'verbose'
    ]

    scenarios = wtscenario.make_scenarios(cache_size_scenarios,
                                          create_scenarios,
                                          error_prefix_scenarios,
                                          eviction_target_scenarios,
                                          eviction_trigger_scenarios,
                                          multiprocess_scenarios,
                                          session_max_scenarios,
                                          transactional_scenarios,
                                          verbose_scenarios,
                                          prune=100,
                                          prunelong=1000)

    #wttest.WiredTigerTestCase.printVerbose(2, 'test_config03: running ' + \
    #                      str(len(scenarios)) + ' of ' + \
    #                      str(len(all_scenarios)) + ' possible scenarios')

    def setUpConnectionOpen(self, dir):
        args = ''
        # add names to args, e.g. args += ',session_max=30'
        for var in self.config_vars:
            value = getattr(self, 's_' + var)
            if value != None:
                if var == 'verbose':
                    value = '[' + str(value) + ']'
                if value == True:
                    value = 'true'
                if value == False:
                    value = 'false'
                args += ',' + var + '=' + str(value)
        args += ','
        self.pr('wiredtiger_open with args: ' + args)

        expect_fail = False
        successargs = args
        if self.s_create == False:
            successargs = successargs.replace(',create=false,', ',create,')
            expect_fail = True
            fail_msg = '/(No such file or directory|The system cannot find the file specified)/'
        elif self.s_create == None:
            successargs = successargs + 'create=true,'
            expect_fail = True
            fail_msg = '/(No such file or directory|The system cannot find the file specified)/'

        if self.s_eviction_target >= self.s_eviction_trigger:
            # construct args that guarantee that target < trigger
            # we know that trigger >= 1
            repfrom = ',eviction_target=' + str(self.s_eviction_target)
            repto = ',eviction_target=' + str(self.s_eviction_trigger - 1)
            successargs = successargs.replace(repfrom, repto)
            if not expect_fail:
                expect_fail = True
                fail_msg = \
                    '/eviction target must be lower than the eviction trigger/'

        if expect_fail:
            self.verbose(3, 'wiredtiger_open (should fail) with args: ' + args)
            self.assertRaisesWithMessage(
                wiredtiger.WiredTigerError,
                lambda: wiredtiger.wiredtiger_open(dir, args), fail_msg)
            args = successargs

        self.verbose(3, 'wiredtiger_open with args: ' + args)
        conn = self.wiredtiger_open(dir, args)
        self.pr( ` conn `)
        return conn
Exemplo n.º 9
0
class test_checkpoint(wttest.WiredTigerTestCase):
    conn_config = 'statistics=(all),timing_stress_for_test=[checkpoint_slow]'
    session_config = 'isolation=snapshot'

    format_values = [
        ('column-fix',
         dict(key_format='r',
              value_format='8t',
              extraconfig=',allocation_size=512,leaf_page_max=512')),
        ('column', dict(key_format='r', value_format='S', extraconfig='')),
        ('string_row', dict(key_format='S', value_format='S', extraconfig='')),
    ]
    name_values = [
        ('nn',
         dict(first_checkpoint='first_checkpoint',
              second_checkpoint='second_checkpoint')),
        ('nu', dict(first_checkpoint='first_checkpoint',
                    second_checkpoint=None)),
        # This doesn't work because there's no way to open the first unnamed checkpoint.
        #('un', dict(first_checkpoint=None, second_checkpoint='second_checkpoint')),
    ]
    scenarios = make_scenarios(format_values, name_values)

    def large_updates(self, uri, ds, nrows, value):
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = value
            if i % 101 == 0:
                self.session.commit_transaction()
                self.session.begin_transaction()
        self.session.commit_transaction()
        cursor.close()

    # "expected" is a list of maps from values to counts of values.
    def check(self, ds, ckpt, nrows, value):
        if ckpt is None:
            ckpt = 'WiredTigerCheckpoint'
        cursor = self.session.open_cursor(ds.uri, None, 'checkpoint=' + ckpt)
        #self.session.begin_transaction()
        count = 0
        for k, v in cursor:
            self.assertEqual(v, value)
            count += 1
        self.assertEqual(count, nrows)
        #self.session.rollback_transaction()
        cursor.close()

    def test_checkpoint(self):
        uri = 'table:checkpoint14'
        nrows = 10000

        # Create a table.
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            nrows *= 5
            value_a = 97
            value_b = 98
            value_c = 99
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100

        # Write some baseline data.
        self.large_updates(uri, ds, nrows, value_a)
        # Write this data out now so we aren't waiting for it while trying to
        # race with the later data.
        self.session.checkpoint()

        # Write some more data, and hold the transaction open.
        session2 = self.conn.open_session()
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        for i in range(1, nrows + 1):
            cursor2[ds.key(i)] = value_b

        # Checkpoint in the background.
        done = threading.Event()
        if self.first_checkpoint is None:
            ckpt = checkpoint_thread(self.conn, done)
        else:
            ckpt = named_checkpoint_thread(self.conn, done,
                                           self.first_checkpoint)
        try:
            ckpt.start()

            # Wait for checkpoint to start before committing.
            ckpt_started = 0
            while not ckpt_started:
                stat_cursor = self.session.open_cursor('statistics:', None,
                                                       None)
                ckpt_started = stat_cursor[stat.conn.txn_checkpoint_running][2]
                stat_cursor.close()
                time.sleep(1)

            session2.commit_transaction()
        finally:
            done.set()
            ckpt.join()

        # Rinse and repeat.
        session2.begin_transaction()
        for i in range(1, nrows + 1):
            cursor2[ds.key(i)] = value_c

        # Checkpoint in the background.
        done = threading.Event()
        if self.second_checkpoint is None:
            ckpt = checkpoint_thread(self.conn, done)
        else:
            ckpt = named_checkpoint_thread(self.conn, done,
                                           self.second_checkpoint)
        try:
            ckpt.start()
            # Sleep a bit so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session2.commit_transaction()
        finally:
            done.set()
            ckpt.join()

        # Other tests check for whether the visibility of a partially-written transaction
        # is handled correctly. Here we're interested in whether the visibility mechanism
        # is using the right snapshot for the checkpoint we're reading. So insist that we
        # not see the value_b transaction in the first checkpoint, or the value_c transaction
        # in the second checkpoint. If test machine lag causes either transaction to commit
        # before the checkpoint starts, we'll see value_b in the first checkpoint and/or
        # value_c in the second. But also, if we end up using the second checkpoint's snapshot
        # for the first checkpoint, we'll see value_b. So if this happens more than once in a
        # blue moon we should probably strengthen the test so we can more reliably distinguish
        # the cases, probably by doing a third transaction/checkpoint pair.
        #
        # If we end up using the first checkpoint's snapshot for reading the second checkpoint,
        # we'll most likely see no data at all; that would be a serious failure if it happened.

        # Read the checkpoints.
        self.check(ds, self.first_checkpoint, nrows, value_a)
        self.check(ds, self.second_checkpoint, nrows, value_b)

        # If we haven't died yet, pretend to crash, and run RTS to see if the
        # (second) checkpoint was inconsistent. Unfortunately we can't readily
        # check on both.
        simulate_crash_restart(self, ".", "RESTART")

        # Make sure we did get an inconsistent checkpoint.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        stat_cursor.close()
        self.assertGreater(inconsistent_ckpt, 0)
Exemplo n.º 10
0
class test_rollback_to_stable06(test_rollback_to_stable_base):
    session_config = 'isolation=snapshot'

    in_memory_values = [('no_inmem', dict(in_memory=False)),
                        ('inmem', dict(in_memory=True))]

    prepare_values = [('no_prepare', dict(prepare=False)),
                      ('prepare', dict(prepare=True))]

    scenarios = make_scenarios(in_memory_values, prepare_values)

    def conn_config(self):
        config = 'cache_size=50MB,statistics=(all)'
        if self.in_memory:
            config += ',in_memory=true'
        else:
            config += ',log=(enabled),in_memory=false'
        return config

    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri = "table:rollback_to_stable06"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format="i",
                           value_format="S",
                           config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
                                ',stable_timestamp=' + timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, 20)
        self.large_updates(uri, value_b, ds, nrows, 30)
        self.large_updates(uri, value_c, ds, nrows, 40)
        self.large_updates(uri, value_d, ds, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, 20)
        self.check(value_b, uri, nrows, 30)
        self.check(value_c, uri, nrows, 40)
        self.check(value_d, uri, nrows, 50)

        # Checkpoint to ensure the data is flushed, then rollback to the stable timestamp.
        if not self.in_memory:
            self.session.checkpoint()
        self.conn.rollback_to_stable()

        # Check that all keys are removed.
        self.check(value_a, uri, 0, 20)
        self.check(value_b, uri, 0, 30)
        self.check(value_c, uri, 0, 40)
        self.check(value_d, uri, 0, 50)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(keys_removed, 0)
        if self.in_memory:
            self.assertEqual(upd_aborted, nrows * 4)
            self.assertEqual(hs_removed, 0)
        else:
            self.assertGreaterEqual(upd_aborted, 0)
            self.assertGreaterEqual(hs_removed, nrows * 3)
Exemplo n.º 11
0
class test_timestamp07(wttest.WiredTigerTestCase, suite_subprocess):
    tablename = 'ts07_ts_nologged'
    tablename2 = 'ts07_nots_logged'
    tablename3 = 'ts07_ts_logged'

    format_values = [
        ('string-row', dict(key_format='i', value_format='S')),
        ('column', dict(key_format='r', value_format='S')),
        ('column-fix', dict(key_format='r', value_format='8t')),
    ]

    types = [
        ('file', dict(uri='file:', use_cg=False, use_index=False)),
        ('table-cg', dict(uri='table:', use_cg=True, use_index=False)),
    ]

    conncfg = [
        ('nolog', dict(conn_config='create,cache_size=2M', using_log=False)),
        ('log', dict(conn_config='create,log=(file_max=1M,archive=false,enabled),cache_size=2M', using_log=True)),
    ]

    nkeys = [
        ('100keys', dict(nkeys=100)),
        ('500keys', dict(nkeys=500)),
        ('1000keys', dict(nkeys=1000)),
    ]

    scenarios = make_scenarios(format_values, types, conncfg, nkeys)

    # Binary values.
    def moreinit(self):
        if self.value_format == '8t':
            self.value = 2
            self.value2 = 4
            self.value3 = 6
        else:
            self.value = u'\u0001\u0002abcd\u0007\u0004'
            self.value2 = u'\u0001\u0002dcba\u0007\u0004'
            self.value3 = u'\u0001\u0002cdef\u0007\u0004'

    # Check that a cursor (optionally started in a new transaction), sees the
    # expected value for a key
    def check(self, session, txn_config, k, expected, flcs_expected):
        # In FLCS the table extends under uncommitted writes and we expect to
        # see zero rather than NOTFOUND.
        if self.value_format == '8t' and flcs_expected is not None:
            expected = flcs_expected
        if txn_config:
            session.begin_transaction(txn_config)
        c = session.open_cursor(self.uri + self.tablename, None)
        if expected is None:
            c.set_key(k)
            self.assertEqual(c.search(), wiredtiger.WT_NOTFOUND)
        else:
            self.assertEqual(c[k], expected)
        c.close()
        if txn_config:
            session.commit_transaction()

    # Check reads of all tables at a timestamp
    def check_reads(self, session, txn_config, check_value, valcnt, valcnt2, valcnt3):
        if txn_config:
            session.begin_transaction(txn_config)
        c = session.open_cursor(self.uri + self.tablename, None)
        c2 = session.open_cursor(self.uri + self.tablename2, None)
        c3 = session.open_cursor(self.uri + self.tablename3, None)

        # In FLCS the values are bytes, which are numbers, but the tests below are via
        # string inclusion rather than just equality of values. Not sure why that is, but
        # I'm going to assume there's a reason for it and not change things. Compensate.
        if self.value_format == '8t':
            check_value = str(check_value)

        count = 0
        for k, v in c:
            if check_value in str(v):
                count += 1
        c.close()
        count2 = 0
        for k, v in c2:
            if check_value in str(v):
                count2 += 1
        c2.close()
        count3 = 0
        for k, v in c3:
            if check_value in str(v):
                count3 += 1
        c3.close()
        if txn_config:
            session.commit_transaction()
        self.assertEqual(count, valcnt)
        self.assertEqual(count2, valcnt2)
        self.assertEqual(count3, valcnt3)

    #
    # Take a backup of the database and verify that the value we want to
    # check exists in the tables the expected number of times.
    #
    def backup_check(self, check_value, valcnt, valcnt2, valcnt3):
        newdir = "BACKUP"
        copy_wiredtiger_home(self, '.', newdir, True)

        conn = self.setUpConnectionOpen(newdir)
        session = self.setUpSessionOpen(conn)
        c = session.open_cursor(self.uri + self.tablename, None)
        c2 = session.open_cursor(self.uri + self.tablename2, None)
        c3 = session.open_cursor(self.uri + self.tablename3, None)

        # In FLCS the values are bytes, which are numbers, but the tests below are via
        # string inclusion rather than just equality of values. Not sure why that is, but
        # I'm going to assume there's a reason for it and not change things. Compensate.
        if self.value_format == '8t':
            check_value = str(check_value)

        # Count how many times the second value is present
        count = 0
        for k, v in c:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count += 1
        c.close()
        # Count how many times the second value is present in the
        # non-timestamp table.
        count2 = 0
        for k, v in c2:
            if check_value in str(v):
                # print "check_value found in key " + str(k)
                count2 += 1
        c2.close()
        # Count how many times the second value is present in the
        # logged timestamp table.
        count3 = 0
        for k, v in c3:
            if check_value in str(v):
                count3 += 1
        c3.close()
        conn.close()
        # print "CHECK BACKUP: Count " + str(count) + " Count2 " + str(count2) + " Count3 " + str(count3)
        # print "CHECK BACKUP: Expect value2 count " + str(valcnt)
        # print "CHECK BACKUP: 2nd table Expect value2 count " + str(valcnt2)
        # print "CHECK BACKUP: 3rd table Expect value2 count " + str(valcnt3)
        self.assertEqual(count, valcnt)
        self.assertEqual(count2, valcnt2)
        self.assertEqual(count3, valcnt3)

    # Check that a cursor sees the expected values after a checkpoint.
    def ckpt_backup(self, check_value, valcnt, valcnt2, valcnt3):

        # Take a checkpoint.  Make a copy of the database.  Open the
        # copy and verify whether or not the expected data is in there.
        ckptcfg = 'use_timestamp=true'
        self.session.checkpoint(ckptcfg)
        self.backup_check(check_value, valcnt, valcnt2, valcnt3)

    def check_stable(self, check_value, valcnt, valcnt2, valcnt3):
        self.ckpt_backup(check_value, valcnt, valcnt2, valcnt3)

        # When reading as-of a timestamp, tables 1 and 3 should match (both
        # use timestamps and we're not running recovery, so logging behavior
        # should be irrelevant).
        self.check_reads(self.session, 'read_timestamp=' + self.stablets,
            check_value, valcnt, valcnt2, valcnt)

    def test_timestamp07(self):
        uri = self.uri + self.tablename
        uri2 = self.uri + self.tablename2
        uri3 = self.uri + self.tablename3
        self.moreinit()
        #
        # Open three tables:
        # 1. Table is not logged and uses timestamps.
        # 2. Table is logged and does not use timestamps.
        # 3. Table is logged and uses timestamps.
        #
        format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
        self.session.create(uri, format + ',log=(enabled=false)')
        c = self.session.open_cursor(uri)
        self.session.create(uri2, format)
        c2 = self.session.open_cursor(uri2)
        self.session.create(uri3, format)
        c3 = self.session.open_cursor(uri3)
        # print "tables created"

        # Insert keys 1..nkeys each with timestamp=key, in some order.
        orig_keys = list(range(1, self.nkeys+1))
        keys = orig_keys[:]
        random.shuffle(keys)

        for k in keys:
            c2[k] = self.value
            self.session.begin_transaction()
            c[k] = self.value
            c3[k] = self.value
            self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(k))

        # print "value inserted in all tables, reading..."

        # Now check that we see the expected state when reading at each
        # timestamp.
        for k in orig_keys:
            self.check(self.session, 'read_timestamp=' + self.timestamp_str(k),
                k, self.value, None)
            self.check(self.session, 'read_timestamp=' + self.timestamp_str(k),
                k + 1, None, None if k == self.nkeys else 0)

        # print "all values read, updating timestamps"

        # Bump the oldest timestamp, we're not going back...
        self.assertTimestampsEqual(self.conn.query_timestamp(), self.timestamp_str(self.nkeys))
        self.oldts = self.stablets = self.timestamp_str(self.nkeys)
        self.conn.set_timestamp('oldest_timestamp=' + self.oldts)
        self.conn.set_timestamp('stable_timestamp=' + self.stablets)
        # print "Oldest " + self.oldts

        # print "inserting value2 in all tables"

        # Update them and retry.
        random.shuffle(keys)
        count = 0
        for k in keys:
            # Make sure a timestamp cursor is the last one to update.  This
            # tests the scenario for a bug we found where recovery replayed
            # the last record written into the log.
            #
            # print "Key " + str(k) + " to value2"
            c2[k] = self.value2
            self.session.begin_transaction()
            c[k] = self.value2
            c3[k] = self.value2
            ts = self.timestamp_str(k + self.nkeys)
            self.session.commit_transaction('commit_timestamp=' + ts)
            # print "Commit key " + str(k) + " ts " + ts
            count += 1

        # print "Updated " + str(count) + " keys to value2"

        # Take a checkpoint using the given configuration.  Then verify
        # whether value2 appears in a copy of that data or not.
        # print "check_stable 1"
        self.check_stable(self.value2, 0, self.nkeys, self.nkeys if self.using_log else 0)

        # Update the stable timestamp to the latest, but not the oldest
        # timestamp and make sure we can see the data.  Once the stable
        # timestamp is moved we should see all keys with value2.
        self.stablets = self.timestamp_str(self.nkeys*2)
        self.conn.set_timestamp('stable_timestamp=' + self.stablets)
        # print "check_stable 2"
        self.check_stable(self.value2, self.nkeys, self.nkeys, self.nkeys)

        # If we're not using the log we're done.
        if not self.using_log:
            return

        # Update the key and retry.  This time take a backup and recover.
        random.shuffle(keys)
        count = 0
        for k in keys:
            # Make sure a timestamp cursor is the last one to update.  This
            # tests the scenario for a bug we found where recovery replayed
            # the last record written into the log.
            #
            # print "Key " + str(k) + " to value3"
            c2[k] = self.value3
            self.session.begin_transaction()
            c[k] = self.value3
            c3[k] = self.value3
            ts = self.timestamp_str(k + self.nkeys*2)
            self.session.commit_transaction('commit_timestamp=' + ts)
            # print "Commit key " + str(k) + " ts " + ts
            count += 1

        # print "Updated " + str(count) + " keys to value3"

        # Flush the log but don't checkpoint
        self.session.log_flush('sync=on')

        # Take a backup and then verify whether value3 appears in a copy
        # of that data or not.  Both tables that are logged should see
        # all the data regardless of timestamps.  The table that is not
        # logged should not see any of it.
        # print "check_stable 3"
        self.check_stable(self.value3, 0, self.nkeys, self.nkeys)
Exemplo n.º 12
0
class test_readonly01(wttest.WiredTigerTestCase, suite_subprocess):
    tablename = 'test_readonly01'
    create = True
    entries = 10000

    #
    # We want a list of directory writable or readonly.
    #
    basecfg_list = [
        ('basecfg', dict(basecfg='config_base=true,')),
        ('no_basecfg', dict(basecfg='config_base=false,')),
    ]
    dir_list = [
        ('write', dict(dirchmod=False)),
        ('readonly', dict(dirchmod=True)),
    ]
    log_list = [
        ('logging', dict(logcfg='log=(archive=false,enabled,file_max=100K),')),
        ('no_logging', dict(logcfg='log=(enabled=false),')),
    ]

    types = [
        ('lsm',
         dict(tabletype='lsm',
              uri='lsm',
              create_params='key_format=i,value_format=i')),
        ('file-row',
         dict(tabletype='row',
              uri='file',
              create_params='key_format=i,value_format=i')),
        ('file-var',
         dict(tabletype='var',
              uri='file',
              create_params='key_format=r,value_format=i')),
        ('file-fix',
         dict(tabletype='fix',
              uri='file',
              create_params='key_format=r,value_format=8t')),
        ('table-row',
         dict(tabletype='row',
              uri='table',
              create_params='key_format=i,value_format=i')),
        ('table-var',
         dict(tabletype='var',
              uri='table',
              create_params='key_format=r,value_format=i')),
        ('table-fix',
         dict(tabletype='fix',
              uri='table',
              create_params='key_format=r,value_format=8t')),
    ]

    scenarios = make_scenarios(basecfg_list, dir_list, log_list, types)

    def conn_config(self):
        params = \
            'error_prefix="%s",' % self.shortid() + \
            '%s' % self.logcfg + \
            '%s' % self.basecfg
        if self.create:
            conn_params = 'create,' + params
        else:
            conn_params = 'readonly=true,' + params
        return conn_params

    def close_reopen(self):
        ''' Close the connection and reopen readonly'''
        #
        # close the original connection.  If needed, chmod the
        # database directory to readonly mode.  Then reopen the
        # connection with readonly.
        #
        self.close_conn()
        #
        # The chmod command is not fully portable to windows.
        #
        if self.dirchmod and os.name == 'posix':
            for f in os.listdir(self.home):
                if os.path.isfile(f):
                    os.chmod(f, 0444)
            os.chmod(self.home, 0555)
        self.conn = self.setUpConnectionOpen(self.home)
        self.session = self.setUpSessionOpen(self.conn)

    def readonly(self):
        # Here's the strategy:
        #    - Create a table.
        #    - Insert data into table.
        #    - Close connection.
        #    - Possibly chmod to readonly
        #    - Open connection readonly
        #    - Confirm we can read the data.
        #
        tablearg = self.uri + ':' + self.tablename
        self.session.create(tablearg, self.create_params)
        c = self.session.open_cursor(tablearg, None, None)
        for i in range(self.entries):
            c[i + 1] = i % 255
        # Close the connection.  Reopen readonly
        self.create = False
        self.close_reopen()
        c = self.session.open_cursor(tablearg, None, None)
        i = 0
        for key, value in c:
            self.assertEqual(i + 1, key)
            self.assertEqual(i % 255, value)
            i += 1
        self.assertEqual(i, self.entries)
        self.pr('Read %d entries' % i)
        c.close()
        self.create = True

    def test_readonly(self):
        if self.dirchmod and os.name == 'posix':
            with self.expectedStderrPattern('Permission'):
                self.readonly()
        else:
            self.readonly()
Exemplo n.º 13
0
class test_alter04(wttest.WiredTigerTestCase):
    name = "alter04"
    entries = 100
    cache_alter = ('1M', '100K')
    # Settings for os_cache[_dirty]_max.
    types = [
        ('file', dict(uri='file:', use_cg=False, use_index=False)),
        ('lsm', dict(uri='lsm:', use_cg=False, use_index=False)),
        ('table-cg', dict(uri='table:', use_cg=True, use_index=False)),
        ('table-index', dict(uri='table:', use_cg=False, use_index=True)),
        ('table-simple', dict(uri='table:', use_cg=False, use_index=False)),
    ]
    sizes = [
        ('default', dict(ocreate='')),
        ('1M', dict(ocreate='1M')),
        ('200K', dict(ocreate='200K')),
    ]
    reopen = [
        ('no-reopen', dict(reopen=False)),
        ('reopen', dict(reopen=True)),
    ]
    settings = [
        ('cache', dict(setting='os_cache_max')),
        ('cache_dirty', dict(setting='os_cache_dirty_max')),
    ]
    scenarios = make_scenarios(types, sizes, reopen, settings)

    def verify_metadata(self, metastr):
        if metastr == '':
            return
        cursor = self.session.open_cursor('metadata:', None, None)
        #
        # Walk through all the metadata looking for the entries that are
        # the file URIs for components of the table.
        #
        found = False
        while True:
            ret = cursor.next()
            if ret != 0:
                break
            key = cursor.get_key()
            check_meta = ((key.find("lsm:") != -1 or key.find("file:") != -1) \
                and key.find(self.name) != -1)
            if check_meta:
                value = cursor[key]
                found = True
                self.assertTrue(value.find(metastr) != -1)
        cursor.close()
        self.assertTrue(found == True)

    # Alter: Change the setting after creation
    def test_alter04_cache(self):
        uri = self.uri + self.name
        create_params = 'key_format=i,value_format=i,'
        complex_params = ''
        #
        # If we're not explicitly setting the parameter, then don't
        # modify create_params to test using the default.
        #
        if self.ocreate != '':
            new_param = '%s=%s' % (self.setting, self.ocreate)
            create_params += '%s,' % new_param
            complex_params += '%s,' % new_param
        else:
            # NOTE: This is hard-coding the default value.  If the default
            # changes then this will fail and need to be fixed.
            new_param = '%s=0' % self.setting

        cgparam = ''
        if self.use_cg or self.use_index:
            cgparam = 'columns=(k,v),'
        if self.use_cg:
            cgparam += 'colgroups=(g0),'

        self.session.create(uri, create_params + cgparam)
        # Add in column group or index settings.
        if self.use_cg:
            cgparam = 'columns=(v),'
            suburi = 'colgroup:' + self.name + ':g0'
            self.session.create(suburi, complex_params + cgparam)
        if self.use_index:
            suburi = 'index:' + self.name + ':i0'
            self.session.create(suburi, complex_params + cgparam)

        # Put some data in table.
        c = self.session.open_cursor(uri, None)
        for k in range(self.entries):
            c[k + 1] = 1
        c.close()

        # Verify the string in the metadata
        self.verify_metadata(new_param)

        # Run through all combinations of the alter commands
        # for all allowed settings.
        for a in self.cache_alter:
            alter_param = '%s=%s' % (self.setting, a)
            self.session.alter(uri, alter_param)
            if self.reopen:
                self.reopen_conn()
            special = self.use_cg or self.use_index
            if not special:
                self.verify_metadata(alter_param)
            else:
                self.session.alter(suburi, alter_param)
                self.verify_metadata(alter_param)
Exemplo n.º 14
0
class test_rollback_to_stable14(test_rollback_to_stable_base):
    session_config = 'isolation=snapshot'

    key_format_values = [
        ('column', dict(key_format='r')),
        ('integer_row', dict(key_format='i')),
    ]
    value_format='S'

    prepare_values = [
        ('no_prepare', dict(prepare=False)),
        ('prepare', dict(prepare=True))
    ]

    scenarios = make_scenarios(key_format_values, prepare_values)

    def conn_config(self):
        config = 'cache_size=25MB,statistics=(all),statistics_log=(json,on_close,wait=1),log=(enabled=true),timing_stress_for_test=[history_store_checkpoint_delay]'
        return config

    def test_rollback_to_stable(self):
        nrows = 100

        # Create a table without logging.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)
        value_modW = mod_val(value_modT, 'W', 4)
        value_modX = mod_val(value_modW, 'X', 5)
        value_modY = mod_val(value_modX, 'Y', 6)
        value_modZ = mod_val(value_modY, 'Z', 7)

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, self.prepare, 30)
        self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 40)
        self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 50)
        self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modR, uri, nrows, None, 40)
        self.check(value_modS, uri, nrows, None, 50)
        self.check(value_modT, uri, nrows, None, 60)

        # Pin stable to timestamp 60 if prepare otherwise 50.
        if self.prepare:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(60))
        else:
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            self.retry_rollback('modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, 4, 1, nrows, self.prepare, 70))
            self.evict_cursor(uri, nrows, value_modW)
            self.retry_rollback('modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, 5, 1, nrows, self.prepare, 80))
            self.evict_cursor(uri, nrows, value_modX)
            self.retry_rollback('modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, 6, 1, nrows, self.prepare, 90))
            self.evict_cursor(uri, nrows, value_modY)
            self.retry_rollback('modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, 7, 1, nrows, self.prepare, 100))
            self.evict_cursor(uri, nrows, value_modZ)
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modR, uri, nrows, None, 40)
        self.check(value_modS, uri, nrows, None, 50)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")

    def test_rollback_to_stable_same_ts(self):
        nrows = 100

        # Create a table without logging.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = mod_val(value_a, 'Q', 0)
        value_modR = mod_val(value_modQ, 'R', 1)
        value_modS = mod_val(value_modR, 'S', 2)
        value_modT = mod_val(value_modS, 'T', 3)
        value_modW = mod_val(value_modT, 'W', 4)
        value_modX = mod_val(value_modW, 'X', 5)
        value_modY = mod_val(value_modX, 'Y', 6)
        value_modZ = mod_val(value_modY, 'Z', 7)

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, 0, 1, nrows, self.prepare, 30)
        # prepare cannot use same timestamp always, so use a different timestamps that are aborted.
        if self.prepare:
            self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 51)
            self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 55)
            self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 60)
        else:
            self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 60)
            self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 60)
            self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modT, uri, nrows, None, 60)

        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            self.retry_rollback('modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, 4, 1, nrows, self.prepare, 70))
            self.evict_cursor(uri, nrows, value_modW)
            self.retry_rollback('modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, 5, 1, nrows, self.prepare, 80))
            self.evict_cursor(uri, nrows, value_modX)
            self.retry_rollback('modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, 6, 1, nrows, self.prepare, 90))
            self.evict_cursor(uri, nrows, value_modY)
            self.retry_rollback('modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, 7, 1, nrows, self.prepare, 100))
            self.evict_cursor(uri, nrows, value_modZ)
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows * 3)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")

    def test_rollback_to_stable_same_ts_append(self):
        nrows = 100

        # Create a table without logging.
        self.pr("create/populate table")
        uri = "table:rollback_to_stable14"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config='log=(enabled=false)')
        ds.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100

        value_modQ = append_val(value_a, 'Q')
        value_modR = append_val(value_modQ, 'R')
        value_modS = append_val(value_modR, 'S')
        value_modT = append_val(value_modS, 'T')
        value_modW = append_val(value_modT, 'W')
        value_modX = append_val(value_modW, 'X')
        value_modY = append_val(value_modX, 'Y')
        value_modZ = append_val(value_modY, 'Z')

        # Perform a combination of modifies and updates.
        self.pr("large updates and modifies")
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_modifies(uri, 'Q', ds, len(value_a), 1, nrows, self.prepare, 30)
        # prepare cannot use same timestamp always, so use a different timestamps that are aborted.
        if self.prepare:
            self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows, self.prepare, 51)
            self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows, self.prepare, 55)
            self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows, self.prepare, 60)
        else:
            self.large_modifies(uri, 'R', ds, len(value_modQ), 1, nrows, self.prepare, 60)
            self.large_modifies(uri, 'S', ds, len(value_modR), 1, nrows, self.prepare, 60)
            self.large_modifies(uri, 'T', ds, len(value_modS), 1, nrows, self.prepare, 60)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)
        self.check(value_modT, uri, nrows, None, 60)

        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(50))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            self.pr("start checkpoint")
            ckpt.start()
            # Sleep for sometime so that checkpoint starts.
            time.sleep(2)

            # Perform several modifies in parallel with checkpoint.
            # Rollbacks may occur when checkpoint is running, so retry as needed.
            self.pr("modifies")
            self.retry_rollback('modify ds1, W', None,
                           lambda: self.large_modifies(uri, 'W', ds, len(value_modT), 1, nrows, self.prepare, 70))
            self.retry_rollback('modify ds1, X', None,
                           lambda: self.large_modifies(uri, 'X', ds, len(value_modW), 1, nrows, self.prepare, 80))
            self.evict_cursor(uri, nrows, value_modX)
            self.retry_rollback('modify ds1, Y', None,
                           lambda: self.large_modifies(uri, 'Y', ds, len(value_modX), 1, nrows, self.prepare, 90))
            self.retry_rollback('modify ds1, Z', None,
                           lambda: self.large_modifies(uri, 'Z', ds, len(value_modY), 1, nrows, self.prepare, 100))
            self.evict_cursor(uri, nrows, value_modZ)
        finally:
            done.set()
            ckpt.join()

        # Simulate a server crash and restart.
        self.pr("restart")
        simulate_crash_restart(self, ".", "RESTART")
        self.pr("restart complete")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 0)
        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(keys_restored, 0)
        if self.prepare:
            self.assertGreaterEqual(upd_aborted, 0)
        else:
            self.assertEqual(upd_aborted, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(hs_removed, nrows * 3)
        self.assertGreaterEqual(hs_sweep, 0)

        # Check that the correct data is seen at and after the stable timestamp.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_modQ, uri, nrows, None, 30)

        # The test may output the following message in eviction under cache pressure. Ignore that.
        self.ignoreStdoutPatternIfExists("oldest pinned transaction ID rolled back for eviction")
Exemplo n.º 15
0
class test_rollback_to_stable06(test_rollback_to_stable_base):

    format_values = [
        ('column', dict(key_format='r', value_format='S')),
        ('column_fix', dict(key_format='r', value_format='8t')),
        ('row_integer', dict(key_format='i', value_format='S')),
    ]

    in_memory_values = [('no_inmem', dict(in_memory=False)),
                        ('inmem', dict(in_memory=True))]

    prepare_values = [('no_prepare', dict(prepare=False)),
                      ('prepare', dict(prepare=True))]

    scenarios = make_scenarios(format_values, in_memory_values, prepare_values)

    def conn_config(self):
        config = 'cache_size=50MB,statistics=(all)'
        if self.in_memory:
            config += ',in_memory=true'
        else:
            config += ',log=(enabled),in_memory=false'
        return config

    def test_rollback_to_stable(self):
        nrows = 1000

        # Create a table without logging.
        uri = "table:rollback_to_stable06"
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config='log=(enabled=false)')
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
                                ',stable_timestamp=' + self.timestamp_str(10))

        # Perform several updates.
        self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
        self.large_updates(uri, value_b, ds, nrows, self.prepare, 30)
        self.large_updates(uri, value_c, ds, nrows, self.prepare, 40)
        self.large_updates(uri, value_d, ds, nrows, self.prepare, 50)

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)
        self.check(value_c, uri, nrows, None, 40)
        self.check(value_d, uri, nrows, None, 50)

        # Checkpoint to ensure the data is flushed, then rollback to the stable timestamp.
        if not self.in_memory:
            self.session.checkpoint()
        self.conn.rollback_to_stable()

        # Check that all keys are removed.
        # (For FLCS, at least for now, they will read back as 0, meaning deleted, rather
        # than disappear.)
        self.check(value_a, uri, 0, nrows, 20)
        self.check(value_b, uri, 0, nrows, 30)
        self.check(value_c, uri, 0, nrows, 40)
        self.check(value_d, uri, 0, nrows, 50)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        calls = stat_cursor[stat.conn.txn_rts][2]
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
        pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
        upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
        stat_cursor.close()

        self.assertEqual(calls, 1)
        self.assertEqual(keys_restored, 0)
        self.assertGreater(pages_visited, 0)
        self.assertGreaterEqual(keys_removed, 0)
        if self.in_memory:
            self.assertEqual(upd_aborted, nrows * 4)
            self.assertEqual(hs_removed, 0)
        else:
            self.assertGreaterEqual(upd_aborted + hs_removed + keys_removed,
                                    nrows * 4)
Exemplo n.º 16
0
class test_compact02(wttest.WiredTigerTestCase):

    types = [
        ('table', dict(uri='table:test_compact02')),
    ]
    cacheSize = [
        ('default', dict(cacheSize='')),
        ('1mb', dict(cacheSize='cache_size=1MB')),
        ('10gb', dict(cacheSize='cache_size=10GB')),
    ]

    # There's a balance between the pages we create and the size of the records
    # being stored: compaction doesn't work on tables with many overflow items
    # because we don't rewrite them. Experimentally, 8KB is as small as the test
    # can go. Additionally, we can't set the maximum page size too large because
    # there won't be enough pages to rewrite. Experimentally, 128KB works.
    fileConfig = [
        ('default', dict(fileConfig='')),
        ('8KB', dict(fileConfig='leaf_page_max=8kb')),
        ('64KB', dict(fileConfig='leaf_page_max=64KB')),
        ('128KB', dict(fileConfig='leaf_page_max=128KB')),
    ]
    scenarios = make_scenarios(types, cacheSize, fileConfig)

    # We want about 22K records that total about 130Mb.  That is an average
    # of 6196 bytes per record.  Half the records should be smaller, about
    # 2700 bytes (about 30Mb) and the other half should be larger, 9666 bytes
    # per record (about 100Mb).
    #
    # Test flow is as follows.
    #
    # 1. Create a table with the data, alternating record size.
    # 2. Checkpoint and get stats on the table to confirm the size.
    # 3. Delete the half of the records with the larger record size.
    # 4. Checkpoint so compact finds something to work with.
    # 5. Call compact.
    # 6. Get stats on compacted table.
    #
    nrecords = 22000
    bigvalue = "abcdefghi" * 1074  # 9*1074 == 9666
    smallvalue = "ihgfedcba" * 303  # 9*303 == 2727

    fullsize = nrecords / 2 * len(bigvalue) + nrecords / 2 * len(smallvalue)

    # Return the size of the file
    def getSize(self):
        # To allow this to work on systems without ftruncate,
        # get the portion of the file allocated, via 'statistics=(all)',
        # not the physical file size, via 'statistics=(size)'.
        cstat = self.session.open_cursor('statistics:' + self.uri, None,
                                         'statistics=(all)')
        sz = cstat[stat.dsrc.block_size][2]
        cstat.close()
        return sz

    # This test varies the cache size and so needs to set up its own connection.
    # Override the standard methods.
    def setUpConnectionOpen(self, dir):
        return None

    def setUpSessionOpen(self, conn):
        return None

    def ConnectionOpen(self, cacheSize):
        self.home = '.'
        conn_params = 'create,' + \
            cacheSize + ',error_prefix="%s: ",' % self.shortid() + \
            'statistics=(all),' + \
            'eviction_dirty_target=99,eviction_dirty_trigger=99'
        try:
            self.conn = wiredtiger.wiredtiger_open(self.home, conn_params)
        except wiredtiger.WiredTigerError as e:
            print "Failed conn at '%s' with config '%s'" % (dir, conn_params)
        self.session = self.conn.open_session(None)

    # Create a table, add keys with both big and small values.
    def test_compact02(self):
        self.ConnectionOpen(self.cacheSize)

        mb = 1024 * 1024
        params = 'key_format=i,value_format=S,' + self.fileConfig

        # 1. Create a table with the data, alternating record size.
        self.session.create(self.uri, params)
        c = self.session.open_cursor(self.uri, None)
        for i in range(self.nrecords):
            if i % 2 == 0:
                c[i] = str(i) + self.bigvalue
            else:
                c[i] = str(i) + self.smallvalue
        c.close()

        # 2. Checkpoint and get stats on the table to confirm the size.
        self.session.checkpoint()
        sz = self.getSize()
        self.pr('After populate ' + str(sz / mb) + 'MB')
        self.assertGreater(sz, self.fullsize)

        # 3. Delete the half of the records with the larger record size.
        c = self.session.open_cursor(self.uri, None)
        count = 0
        for i in range(self.nrecords):
            if i % 2 == 0:
                count += 1
                c.set_key(i)
                c.remove()
        c.close()
        self.pr('Removed total ' + str((count * 9666) / mb) + 'MB')

        # 4. Checkpoint
        self.session.checkpoint()

        # 5. Call compact.
        self.session.compact(self.uri, None)

        # 6. Get stats on compacted table.
        sz = self.getSize()
        self.pr('After compact ' + str(sz / mb) + 'MB')

        # After compact, the file size should be less than half the full size.
        self.assertLess(sz, self.fullsize / 2)
Exemplo n.º 17
0
class test_prepare_lookaside02(wttest.WiredTigerTestCase, suite_subprocess):
    tablename = 'test_prepare_cursor'
    uri = 'table:' + tablename
    txn_config = 'isolation=snapshot'

    types = [
        ('col',
         dict(s_config='value_format=i,log=(enabled=false),key_format=r')),
        ('row',
         dict(s_config='key_format=i,value_format=i,log=(enabled=false)')),
        ('lsm',
         dict(s_config=
              'key_format=i, value_format=i,log=(enabled=false),type=lsm')),
    ]

    # Transaction end types
    txn_end = [
        ('txn_commit', dict(txn_commit=True)),
        ('txn_rollback', dict(txn_commit=False)),
    ]

    scenarios = make_scenarios(types, txn_end)

    def test_prepare_conflict(self):
        self.session.create(self.uri, self.s_config)
        c = self.session.open_cursor(self.uri)

        # Insert keys 1..100 each with timestamp=key, in some order
        orig_keys = range(1, 101)
        keys = orig_keys[:]
        random.shuffle(keys)

        # Scenario: 1
        # Check insert operation
        self.session.begin_transaction(self.txn_config)
        c[1] = 1
        # update the value with in this transaction
        self.session.prepare_transaction('prepare_timestamp=' +
                                         timestamp_str(100))
        if self.txn_commit == True:
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(101) +
                                            ',durable_timestamp=' +
                                            timestamp_str(101))
        else:
            self.session.rollback_transaction()

        # Trigger a checkpoint, which could trigger reconciliation
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(150))
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(150))
        self.session.checkpoint()

        # Scenario: 2
        # Check update operation
        #   update a existing key.
        #   update a newly inserted key with in this transaction
        self.session.begin_transaction(self.txn_config)
        # update a committed value, key 1 is inserted above.
        c[1] = 2
        # update a uncommitted value, insert and update a key.
        c[2] = 1
        c[2] = 2
        self.session.prepare_transaction('prepare_timestamp=' +
                                         timestamp_str(200))
        if self.txn_commit == True:
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(201) +
                                            ',durable_timestamp=' +
                                            timestamp_str(201))
        else:
            self.session.rollback_transaction()

        # Trigger a checkpoint, which could trigger reconciliation
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(250))
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(250))
        self.session.checkpoint()

        # Scenario: 3
        # Check remove operation
        #   remove an existing key.
        #   remove a previously updated key.
        #   remove a newly inserted and updated key.
        self.session.begin_transaction(self.txn_config)
        # update a committed value, key 1 is inserted above.
        c.set_key(1)
        c.remove()
        c.set_key(2)
        c.remove()
        c[3] = 1
        c[3] = 2
        c.set_key(3)
        c.remove()
        self.session.prepare_transaction('prepare_timestamp=' +
                                         timestamp_str(300))
        if self.txn_commit == True:
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(301) +
                                            ',durable_timestamp=' +
                                            timestamp_str(301))
        else:
            self.session.rollback_transaction()

        # commit some keys, to generate the update chain subsequently.
        self.session.begin_transaction(self.txn_config)
        c[1] = 1
        c[2] = 1
        c[3] = 1
        self.session.commit_transaction('commit_timestamp=' +
                                        timestamp_str(301))

        # Trigger a checkpoint, which could trigger reconciliation
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(350))
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(350))
        self.session.checkpoint()

        #Scenario: 4
        # Check update operation on a checkpointed key. Re-open is to facilitate
        # creating the modify update_chain for key instead of insert update
        # chain.
        self.reopen_conn()
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(350))
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(350))

        self.session.create(self.uri, self.s_config)
        cur = self.session.open_cursor(self.uri)
        self.session.begin_transaction(self.txn_config)
        cur[1] = 2
        cur[2] = 2
        cur[3] = 2
        # Update a key twice
        cur[2] = 3
        # Remove a updated key
        cur.set_key(3)
        cur.remove()
        self.session.prepare_transaction('prepare_timestamp=' +
                                         timestamp_str(400))
        if self.txn_commit == True:
            self.session.commit_transaction('commit_timestamp=' +
                                            timestamp_str(401) +
                                            ',durable_timestamp=' +
                                            timestamp_str(401))
        else:
            self.session.rollback_transaction()

        # Trigger a checkpoint, which could trigger reconciliation
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(450))
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(450))
        self.session.checkpoint()

        cur.close()
        self.session.close()
class test_durable_rollback_to_stable(wttest.WiredTigerTestCase,
                                      suite_subprocess):
    session_config = 'isolation=snapshot'

    keyfmt = [
        ('row-string', dict(keyfmt='S')),
        ('row-int', dict(keyfmt='i')),
        # The commented columnar tests needs to be enabled once rollback to stable for columnar is fixed in (WT-5548).
        #    ('column-store', dict(keyfmt='r')),
    ]
    types = [
        ('file', dict(uri='file', ds=SimpleDataSet)),
        ('lsm', dict(uri='lsm', ds=SimpleDataSet)),
        ('table-simple', dict(uri='table', ds=SimpleDataSet)),
    ]

    iso_types = [('isolation_read_committed',
                  dict(isolation='read-committed')),
                 ('isolation_default', dict(isolation='')),
                 ('isolation_snapshot', dict(isolation='snapshot'))]
    scenarios = make_scenarios(types, keyfmt, iso_types)

    def skip(self):
        return self.keyfmt == 'r' and \
            (self.ds.is_lsm() or self.uri == 'lsm')

    # Test durable timestamp.
    def test_durable_rollback_to_stable(self):
        if self.skip():
            return

        # Build an object.
        uri = self.uri + ':test_durable_rollback_to_stable'
        ds = self.ds(self, uri, 50, key_format=self.keyfmt)
        ds.populate()

        session = self.conn.open_session(self.session_config)
        cursor = session.open_cursor(uri, None)

        # Set stable timestamp to checkpoint initial data set.
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(100))
        self.session.checkpoint()

        # Update all values with value 111 i.e. first update value.
        session.begin_transaction()
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            cursor.set_value(ds.value(111))
            self.assertEquals(cursor.update(), 0)
            self.assertEquals(cursor.next(), 0)

        session.prepare_transaction('prepare_timestamp=' + timestamp_str(150))
        session.timestamp_transaction('commit_timestamp=' + timestamp_str(200))
        session.timestamp_transaction('durable_timestamp=' +
                                      timestamp_str(220))
        session.commit_transaction()

        # Check the values read are correct with different timestamps.
        # Read the initial dataset.
        self.assertEquals(cursor.reset(), 0)
        session.begin_transaction('read_timestamp=' + timestamp_str(150))
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            self.assertEquals(cursor.get_value(), ds.value(i))
            self.assertEquals(cursor.next(), 0)
        session.commit_transaction()

        # Read the first update value with timestamp.
        self.assertEquals(cursor.reset(), 0)
        session.begin_transaction('read_timestamp=' + timestamp_str(200))
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            self.assertEquals(cursor.get_value(), ds.value(111))
            self.assertEquals(cursor.next(), 0)
        session.commit_transaction()

        # Check that latest value is same as first update value.
        self.assertEquals(cursor.reset(), 0)
        session.begin_transaction()
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            self.assertEquals(cursor.get_value(), ds.value(111))
            self.assertEquals(cursor.next(), 0)
        session.commit_transaction()

        # Set a stable timestamp so that first update value is durable.
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(250))

        # Update all values with value 222 i.e. second update value.
        self.assertEquals(cursor.reset(), 0)
        session.begin_transaction()
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            cursor.set_value(ds.value(222))
            self.assertEquals(cursor.update(), 0)
            self.assertEquals(cursor.next(), 0)

        session.prepare_transaction('prepare_timestamp=' + timestamp_str(200))

        # Commit timestamp is earlier to stable timestamp but durable timestamp
        # is later than stable timestamp. Hence second update value is not durable.
        session.timestamp_transaction('commit_timestamp=' + timestamp_str(240))
        session.timestamp_transaction('durable_timestamp=' +
                                      timestamp_str(300))
        session.commit_transaction()

        # Checkpoint so that first update value will be visible and durable,
        # but second update value will be only visible but not durable.
        self.session.checkpoint()

        # Check that second update value is visible.
        self.assertEquals(cursor.reset(), 0)
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            self.assertEquals(cursor.get_value(), ds.value(222))
            self.assertEquals(cursor.next(), 0)

        cursor.close()
        session.close()

        # Check that second update value was not durable by reopening.
        self.conn.rollback_to_stable()
        session = self.conn.open_session(self.session_config)
        cursor = session.open_cursor(uri, None)
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(250))
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(250))
        self.assertEquals(cursor.next(), 0)
        for i in range(1, 50):
            self.assertEquals(cursor.get_value(), ds.value(111))
            self.assertEquals(cursor.next(), 0)

        # Use util to verify that second updates values have been flushed.
        errfilename = "verifyrollbackerr.out"
        self.runWt(["verify", "-s", uri],
                   errfilename=errfilename,
                   failure=False)
        self.check_empty_file(errfilename)
Exemplo n.º 19
0
class test_compat04(wttest.WiredTigerTestCase, suite_subprocess):
    # Add enough entries and use a small log size to generate more than
    # one log file.
    entries = 2000
    logmax = "100K"
    tablename = 'test_compat04'
    uri = 'table:' + tablename
    # Declare the log versions that do and do not have prevlsn.
    # Log version 1 does not have the prevlsn record.
    # Log version 2 introduced that record.
    # Log versions 3 and higher continue to have that record.
    min_logv = 2

    # The outline of this test is that we create the database at the
    # create release value. Then we reconfigure the release to the
    # reconfig release value. Then we close and reopen the database with
    # a release and compatibility maximum of that release value. This
    # should be successful for all directions.
    #
    create_release = [
        ('def_rel', dict(create_rel='none', log_crrel=5)),
        ('100_rel', dict(create_rel="10.0", log_crrel=5)),
        ('33_rel', dict(create_rel="3.3", log_crrel=4)),
        ('32_rel', dict(create_rel="3.2", log_crrel=3)),
        ('31_rel', dict(create_rel="3.1", log_crrel=3)),
        ('30_rel', dict(create_rel="3.0", log_crrel=2)),
        ('26_rel', dict(create_rel="2.6", log_crrel=1)),
    ]
    reconfig_release = [
        ('100_rel', dict(rel="10.0", log_rel=5)),
        ('33_rel', dict(rel="3.3", log_rel=4)),
        ('32_rel', dict(rel="3.2", log_rel=3)),
        ('31_rel', dict(rel="3.1", log_rel=3)),
        ('30_rel', dict(rel="3.0", log_rel=2)),
        ('300_rel', dict(rel="3.0.0", log_rel=2)),
        ('26_rel', dict(rel="2.6", log_rel=1)),
    ]
    base_config = [
        ('basecfg_true', dict(basecfg='true')),
        ('basecfg_false', dict(basecfg='false')),
    ]
    scenarios = make_scenarios(create_release, reconfig_release, base_config)

    # This test creates scenarios that lead to errors. This is different
    # than compat02 because it is testing errors (or success) using the
    # compatibility settings on the initial database creation.
    def conn_config(self):
        config_str = 'create,config_base=%s,' % self.basecfg
        log_str = 'log=(enabled,file_max=%s,remove=false),' % self.logmax
        compat_str = ''
        if (self.create_rel != 'none'):
            compat_str += 'compatibility=(release="%s"),' % self.create_rel
        config_str += log_str + compat_str
        self.pr("Conn config:" + config_str)
        return config_str

    def test_compat04(self):
        #
        # Create initial database at the compatibility level requested
        # and a table with some data.
        #
        self.session.create(self.uri, 'key_format=i,value_format=i')
        c = self.session.open_cursor(self.uri, None)
        #
        # Add some entries to generate log files.
        #
        for i in range(self.entries):
            c[i] = i + 1
        c.close()

        # Reconfigure and close the connection. Then reopen with that release.
        # We expect success.
        config_str = 'compatibility=(release=%s)' % self.rel
        self.conn.reconfigure(config_str)
        self.conn.close()

        config_str = 'compatibility=(release=%s,require_max=%s)' % (self.rel,
                                                                    self.rel)
        conn = self.wiredtiger_open('.', config_str)
        conn.close()
Exemplo n.º 20
0
class test_prepare10(wttest.WiredTigerTestCase):
    # Force a small cache.
    conn_config = 'cache_size=10MB,eviction_dirty_trigger=80,eviction_updates_trigger=80'
    session_config = 'isolation=snapshot'

    key_format_values = [
        ('column', dict(key_format='r')),
        ('string-row', dict(key_format='S')),
    ]

    scenarios = make_scenarios(key_format_values)

    def updates(self, ds, uri, nrows, value, ts):
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        for i in range(1, nrows):
            cursor.set_key(ds.key(i))
            cursor.set_value(value)
            self.assertEquals(cursor.insert(), 0)
        self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(ts))
        cursor.close()

    def removes(self, ds, uri, nrows, ts):
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        for i in range(1, nrows):
            cursor.set_key(ds.key(i))
            self.assertEquals(cursor.remove(), 0)
        self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(ts))
        cursor.close()

    def check(self, ds, uri, nrows, value, ts):
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction('ignore_prepare=true,read_timestamp=' + self.timestamp_str(ts))
        for i in range(1, nrows):
            cursor.set_key(ds.key(i))
            self.assertEquals(cursor.search(), 0)
            self.assertEquals(cursor.get_value(),value)
        self.session.commit_transaction()
        cursor.close()

    def check_not_found(self, ds, uri, nrows, ts):
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction('ignore_prepare=true,read_timestamp=' + self.timestamp_str(ts))
        for i in range(1, nrows):
            cursor.set_key(ds.key(i))
            self.assertEquals(cursor.search(), wiredtiger.WT_NOTFOUND)
        self.session.commit_transaction()
        cursor.close()

    def test_prepare_rollback_retrieve_time_window(self):
        # Create a small table.
        uri = "table:test_prepare10"
        nrows = 1000
        ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format='u')
        ds.populate()

        value_a = b"aaaaa" * 100
        value_b = b"bbbbb" * 100
        value_c = b"ccccc" * 100

        # Commit some updates along with a prepared update, which is not resolved.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10))
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(10))

        # Initially load huge data
        self.updates(ds, uri, nrows, value_a, 20)
        # Add some more updates
        self.updates(ds, uri, nrows, value_b, 30)

        # Checkpoint
        self.session.checkpoint()

        # Validate that we do see the correct value.
        session2 = self.setUpSessionOpen(self.conn)
        cursor2 = session2.open_cursor(uri)
        session2.begin_transaction()
        for i in range(1, nrows):
            cursor2.set_key(ds.key(i))
            self.assertEquals(cursor2.search(), 0)
            self.assertEquals(cursor2.get_value(), value_b)
        session2.commit_transaction()

        # Reset the cursor.
        cursor2.reset()
        session2.begin_transaction()

        # Remove all keys
        self.removes(ds, uri, nrows, 40)

        # Validate that we do see the correct value.
        session3 = self.setUpSessionOpen(self.conn)
        cursor3 = session3.open_cursor(uri)
        session3.begin_transaction()
        for i in range(1, nrows):
            cursor3.set_key(ds.key(i))
            self.assertEquals(cursor3.search(), wiredtiger.WT_NOTFOUND)
        session3.commit_transaction()

        # Reset the cursor.
        cursor3.reset()
        session3.begin_transaction()

        # Insert the updates from a prepare session and keep it open.
        session_p = self.conn.open_session()
        cursor_p = session_p.open_cursor(uri)
        session_p.begin_transaction()
        for i in range(1, nrows):
            cursor_p.set_key(ds.key(i))
            cursor_p.set_value(value_c)
            self.assertEquals(cursor_p.insert(), 0)
        session_p.prepare_transaction('prepare_timestamp=' + self.timestamp_str(50))

        self.check(ds, uri, nrows, value_a, 20)
        self.check(ds, uri, nrows, value_b, 35)
        self.check_not_found(ds, uri, nrows, 60)

        #rollback the prepared session
        session_p.rollback_transaction()

        self.check(ds, uri, nrows, value_a, 20)
        self.check(ds, uri, nrows, value_b, 35)
        self.check_not_found(ds, uri, nrows, 60)

        # session2 still can see the value_b
        for i in range(1, nrows):
            cursor2.set_key(ds.key(i))
            self.assertEquals(cursor2.search(), 0)
            self.assertEquals(cursor2.get_value(), value_b)
        session2.commit_transaction()

        # session3 still can't see a value
        for i in range(1, nrows):
            cursor3.set_key(ds.key(i))
            self.assertEquals(cursor3.search(), wiredtiger.WT_NOTFOUND)
        session3.commit_transaction()

        # close sessions.
        cursor_p.close()
        session_p.close()
        cursor2.close()
        session2.close()
        cursor3.close()
        session3.close()
        self.session.close()
Exemplo n.º 21
0
class test_schema04(wttest.WiredTigerTestCase):
    """
    Test indices with duplicates.
    Our set of rows looks like a multiplication table:
      row 0:  [ 0, 0, 0, 0, 0, 0 ]
      row 1:  [ 0, 1, 2, 3, 4, 5 ]
      row 2:  [ 0, 2, 4, 6, 8, 10 ]
    with the twist that entries are mod 100.  So, looking further:
      row 31:  [ 0, 31, 62, 93, 24, 55 ]

    Each column is placed into its own index.  The mod twist,
    as well as the 0th column, guarantees we'll have some duplicates.
    """
    nentries = 100

    scenarios = make_scenarios([
        ('index-before', { 'create_index' : 0 }),
        ('index-during', { 'create_index' : 1 }),
        ('index-after', { 'create_index' : 2 }),
    ])

    def create_indices(self):
        # Create 6 index files, each with a column from the main table
        for i in range(0, 6):
            self.session.create("index:schema04:x" + str(i),
                                "key_format=i,columns=(v" + str(i) + "),")

    # We split the population into two phases
    # (in anticipation of future tests that create
    # indices between the two population steps).
    def populate(self, phase):
        cursor = self.session.open_cursor('table:schema04', None, None)
        if phase == 0:
            range_from = 0
            range_to = self.nentries // 2
        else:
            range_from = self.nentries // 2
            range_to = self.nentries

        for i in range(range_from, range_to):
            # e.g. element 31 is '0,31,62,93,24,55'
            cursor.set_key(i)
            cursor.set_value(
                (i*0)%100, (i*1)%100, (i*2)%100,
                (i*3)%100, (i*4)%100, (i*5)%100)
            cursor.insert()
        cursor.close()

    def check_entries(self):
        cursor = self.session.open_cursor('table:schema04', None, None)
        icursor = []
        for i in range(0, 6):
            icursor.append(self.session.open_cursor('index:schema04:x' + str(i),
                                                    None, None))
        i = 0
        for kv in cursor:
            # Check main table
            expect = [(i*j)%100 for j in range(0, 6)]
            primkey = kv.pop(0)
            self.assertEqual(i, primkey)
            self.assertEqual(kv, expect)
            for j in range(0, 6):
                self.assertEqual((i*j)%100, kv[j])
            for idx in range(0, 6):
                c = icursor[idx]
                indexkey = (i*idx)%100
                c.set_key(indexkey)
                self.assertEqual(c.search(), 0)
                value = c.get_value()
                while value != expect and value[idx] == expect[idx]:
                    c.next()
                    value = c.get_value()
                self.assertEqual(value, expect)
            i += 1
        self.assertEqual(self.nentries, i)

    def test_index(self):
        self.session.create("table:schema04",
                            "key_format=i,value_format=iiiiii,"
                            "columns=(primarykey,v0,v1,v2,v3,v4,v5)")
        if self.create_index == 0:
            self.create_indices()
        self.populate(0)
        if self.create_index == 1:
            self.create_indices()
        self.populate(1)
        if self.create_index == 2:
            self.create_indices()
        self.check_entries()
Exemplo n.º 22
0
class test_dump(wttest.WiredTigerTestCase, suite_subprocess):
    dir = 'dump.dir'  # Backup directory name

    name = 'test_dump'
    name2 = 'test_dumpb'
    nentries = 2500

    dumpfmt = [('hex', dict(hex=1)), ('txt', dict(hex=0))]
    keyfmt = [('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')),
              ('string', dict(keyfmt='S'))]
    types = [
        ('file', dict(uri='file:', dataset=SimpleDataSet)),
        ('lsm', dict(uri='lsm:', dataset=SimpleDataSet)),
        ('table-simple', dict(uri='table:', dataset=SimpleDataSet)),
        ('table-index', dict(uri='table:', dataset=SimpleIndexDataSet)),
        ('table-simple-lsm', dict(uri='table:', dataset=SimpleLSMDataSet)),
        ('table-complex', dict(uri='table:', dataset=ComplexDataSet)),
        ('table-complex-lsm', dict(uri='table:', dataset=ComplexLSMDataSet)),
        ('table-simple-proj',
         dict(uri='table:', dataset=ProjectionDataSet, projection=True)),
        ('table-index-proj',
         dict(uri='table:', dataset=ProjectionIndexDataSet, projection=True)),
    ]
    scenarios = make_scenarios(types, keyfmt, dumpfmt)

    def skip(self):
        return (self.dataset.is_lsm() or self.uri == 'lsm:') and \
            self.keyfmt == 'r'

    # Extract the values lines from the dump output.
    def value_lines(self, fname):
        # mode:
        #   0 == we are in the header
        #   1 == next line is key
        #   2 == next line is value
        mode = 0
        lines = []
        for line in open(fname).readlines():
            if mode == 0:
                if line == 'Data\n':
                    mode = 1
            elif mode == 1:
                mode = 2
            else:
                # This is a value line, keep it.
                lines.append(line)
                mode = 1
        return sorted(lines)

    def compare_dump_values(self, f1, f2):
        l1 = self.value_lines(f1)
        l2 = self.value_lines(f2)
        self.assertEqual(l1, l2)

    # Dump, re-load and do a content comparison.
    def test_dump(self):
        # LSM and column-store isn't a valid combination.
        if self.skip():
            return

        # Create the object.
        uri = self.uri + self.name
        uri2 = self.uri + self.name2
        pop = self.dataset(self, uri, self.nentries, key_format=self.keyfmt)
        pop.populate()

        # Dump the object.
        os.mkdir(self.dir)
        if self.hex == 1:
            self.runWt(['dump', '-x', uri], outfilename='dump.out')
        else:
            self.runWt(['dump', uri], outfilename='dump.out')

        # Re-load the object.
        self.runWt(['-h', self.dir, 'load', '-f', 'dump.out'])

        # Check the database contents
        self.runWt(['list'], outfilename='list.out')
        self.runWt(['-h', self.dir, 'list'], outfilename='list.out.new')
        s1 = set(open('list.out').read().split())
        s2 = set(open('list.out.new').read().split())
        self.assertEqual(not s1.symmetric_difference(s2), True)

        # Check the object's contents
        self.reopen_conn(self.dir)
        pop.check()

        # Re-load the object again in the original directory.
        self.reopen_conn('.')
        self.runWt(['-h', self.dir, 'load', '-f', 'dump.out'])

        # Check the contents, they shouldn't have changed.
        pop.check()

        # Re-load the object again, but confirm -n (no overwrite) fails.
        self.runWt(['-h', self.dir, 'load', '-n', '-f', 'dump.out'],
                   errfilename='errfile.out',
                   failure=True)
        self.check_non_empty_file('errfile.out')

        # If there are indices, dump one of them and check the output.
        if self.dataset == ComplexDataSet:
            indexuri = 'index:' + self.name + ':indx1'
            hexopt = ['-x'] if self.hex == 1 else []
            self.runWt(['-h', self.dir, 'dump'] + hexopt + [indexuri],
                       outfilename='dumpidx.out')
            self.check_non_empty_file('dumpidx.out')
            self.compare_dump_values('dump.out', 'dumpidx.out')

        # Re-load the object into a different table uri
        shutil.rmtree(self.dir)
        os.mkdir(self.dir)
        self.runWt(
            ['-h', self.dir, 'load', '-r', self.name2, '-f', 'dump.out'])

        # Check the contents in the new table.
        self.reopen_conn(self.dir)
        pop = self.dataset(self, uri2, self.nentries, key_format=self.keyfmt)
        pop.check()
Exemplo n.º 23
0
class test_cursor_random_invisible(wttest.WiredTigerTestCase):
    types = [('file', dict(type='file:random')),
             ('table', dict(type='table:random'))]
    config = [('sample',
               dict(config='next_random=true,next_random_sample_size=35')),
              ('not-sample', dict(config='next_random=true'))]
    scenarios = make_scenarios(types, config)

    def test_cursor_random_invisible_all(self):
        uri = self.type
        self.session.create(uri, 'key_format=S,value_format=S')
        cursor = self.session.open_cursor(uri, None)

        # Start a transaction.
        self.session.begin_transaction()
        for i in range(1, 100):
            cursor[simple_key(cursor, i)] = simple_value(cursor, i)

        # Open another session, the updates won't yet be visible, we shouldn't
        # find anything at all.
        s = self.conn.open_session()
        cursor = s.open_cursor(uri, None, self.config)
        self.assertEqual(cursor.next(), wiredtiger.WT_NOTFOUND)

    def test_cursor_random_invisible_after(self):
        uri = self.type
        self.session.create(uri, 'key_format=S,value_format=S')
        cursor = self.session.open_cursor(uri, None)

        # Insert a single leading record.
        cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)

        # Start a transaction.
        self.session.begin_transaction()
        for i in range(2, 100):
            cursor[simple_key(cursor, i)] = simple_value(cursor, i)

        # Open another session, the updates won't yet be visible, we should
        # return the only possible record.
        s = self.conn.open_session()
        cursor = s.open_cursor(uri, None, self.config)
        self.assertEquals(cursor.next(), 0)
        self.assertEqual(cursor.get_key(), simple_key(cursor, 1))

    def test_cursor_random_invisible_before(self):
        uri = self.type
        self.session.create(uri, 'key_format=S,value_format=S')
        cursor = self.session.open_cursor(uri, None)

        # Insert a single leading record.
        cursor[simple_key(cursor, 99)] = simple_value(cursor, 99)

        # Start a transaction.
        self.session.begin_transaction()
        for i in range(2, 100):
            cursor[simple_key(cursor, i)] = simple_value(cursor, i)

        # Open another session, the updates won't yet be visible, we should
        # return the only possible record.
        s = self.conn.open_session()
        cursor = s.open_cursor(uri, None, self.config)
        self.assertEquals(cursor.next(), 0)
        self.assertEqual(cursor.get_key(), simple_key(cursor, 99))
Exemplo n.º 24
0
class test_compat01(wttest.WiredTigerTestCase, suite_subprocess):
    # Add enough entries and use a small log size to generate more than
    # one log file.
    entries = 2000
    logmax = "100K"
    tablename = 'test_compat01'
    uri = 'table:' + tablename
    sync_list = [
        '(method=fsync,enabled)',
        '(method=none,enabled)',
    ]

    # The API uses only the major and minor numbers but accepts with
    # and without the patch number.  Test both.
    start_compat = [
        ('def', dict(compat1='none', current1=True)),
        ('current', dict(compat1="3.0", current1=True)),
        ('current_patch', dict(compat1="3.0.0", current1=True)),
        ('minor_only', dict(compat1="2.6", current1=False)),
        ('minor_patch', dict(compat1="2.6.1", current1=False)),
        ('old', dict(compat1="1.8", current1=False)),
        ('old_patch', dict(compat1="1.8.1", current1=False)),
    ]
    restart_compat = [
        ('def2', dict(compat2='none', current2=True)),
        ('current2', dict(compat2="3.0", current2=True)),
        ('current_patch2', dict(compat2="3.0.0", current2=True)),
        ('minor_only2', dict(compat2="2.6", current2=False)),
        ('minor_patch2', dict(compat2="2.6.1", current2=False)),
        ('old2', dict(compat2="1.8", current2=False)),
        ('old_patch2', dict(compat2="1.8.1", current2=False)),
    ]
    scenarios = make_scenarios(restart_compat, start_compat)

    def make_compat_str(self, create):
        compat_str = ''
        if (create == True and self.compat1 != 'none'):
            #compat_str = 'verbose=(temporary),compatibility=(release="%s"),' % self.compat1
            compat_str = 'compatibility=(release="%s"),' % self.compat1
        elif (create == False and self.compat2 != 'none'):
            #compat_str = 'verbose=(temporary),compatibility=(release="%s"),' % self.compat2
            compat_str = 'compatibility=(release="%s"),' % self.compat2
        return compat_str

    def conn_config(self):
        # Cycle through the different transaction_sync values in a
        # deterministic manner.
        txn_sync = self.sync_list[self.scenario_number % len(self.sync_list)]
        # Set archive false on the home directory.
        log_str = 'log=(archive=false,enabled,file_max=%s),' % self.logmax + \
            'transaction_sync="%s",' % txn_sync
        compat_str = self.make_compat_str(True)
        self.pr("Conn config:" + log_str + compat_str)
        return log_str + compat_str

    def check_prev_lsn(self, conn_close, prev_lsn_count):
        #
        # Run printlog and look for the prev_lsn log record.  Verify its
        # existence with the passed in expected result.  We don't use
        # check_file_contains because that only looks in the first 100K and
        # we don't know how big our text-based log output is.  Look through
        # the entire file if needed and set a boolean for comparison.
        #
        self.runWt(['printlog'],
                   outfilename='printlog.out',
                   closeconn=conn_close)
        contains = 0
        with open('printlog.out') as logfile:
            for line in logfile:
                if 'prev_lsn' in line:
                    contains += 1
        self.assertEqual(prev_lsn_count, contains)

    def check_log(self, reconfig):
        orig_logs = fnmatch.filter(os.listdir('.'), "*gerLog*")
        compat_str = self.make_compat_str(False)
        if self.current1:
            prev_lsn_logs = len(orig_logs)
        else:
            prev_lsn_logs = 0

        if not reconfig:
            #
            # Close and open the connection to force recovery and reset the
            # compatibility string on startup.
            #
            self.conn.close()
            log_str = 'log=(enabled,file_max=%s,archive=false),' % self.logmax
            restart_config = log_str + compat_str
            self.pr("Restart conn " + restart_config)
            #
            # Open a connection to force it to run recovery.
            #
            conn = self.wiredtiger_open('.', restart_config)
            conn.close()
            check_close = False
            #
            # If the version was upgraded we'll see a new log file containing
            # the new log record no matter what the original setting was.
            #
            if self.current2:
                prev_lsn_logs += 1
        else:
            self.pr("Reconfigure: " + compat_str)
            self.conn.reconfigure(compat_str)
            check_close = True
            #
            # If we're reconfiguring, we'll only see another new log file
            # when upgrading.  Staying at the same version has no effect.
            #
            if self.current2 and not self.current1:
                prev_lsn_logs += 1

        # Run printlog and verify the new record does or does not exist.
        # Need to check count of log files that should and should not have
        # the prev_lsn record based on the count of log files that exist
        # before and after.  Pass that into this function and check the
        # number of prev_lsn records we see.
        self.check_prev_lsn(check_close, prev_lsn_logs)

    def run_test(self, reconfig):
        # If reconfiguring with the empty string there is nothing to do.
        if reconfig == True and self.compat2 == 'none':
            return
        self.session.create(self.uri, 'key_format=i,value_format=i')
        c = self.session.open_cursor(self.uri, None)
        #
        # Add some entries to generate log files.
        #
        for i in range(self.entries):
            c[i] = i + 1
        c.close()

        # Check the log state after the entire op completes
        # and run recovery with the restart compatibility mode.
        self.check_log(reconfig)

    # Run the same test but reset the compatibility via
    # reconfigure or changing it when reopening the connection.
    def test_reconfig(self):
        self.run_test(True)

    def test_restart(self):
        self.run_test(False)
Exemplo n.º 25
0
class test_txn07(wttest.WiredTigerTestCase, suite_subprocess):
    logmax = "100K"
    tablename = 'test_txn07'
    uri = 'table:' + tablename
    remove_list = ['true', 'false']
    sync_list = [
        '(method=dsync,enabled)',
        '(method=fsync,enabled)',
        '(method=none,enabled)',
        '(enabled=false)'
    ]

    types = [
        ('row', dict(tabletype='row', create_params = 'key_format=i,value_format=S')),
        ('var', dict(tabletype='var', create_params = 'key_format=r,value_format=S')),
        ('fix', dict(tabletype='fix', create_params = 'key_format=r,value_format=8t')),
    ]
    op1s = [
        ('trunc-all', dict(op1=('all', 0))),
        ('trunc-both', dict(op1=('both', 2))),
        ('trunc-start', dict(op1=('start', 2))),
        ('trunc-stop', dict(op1=('stop', 2))),
    ]
    txn1s = [('t1c', dict(txn1='commit')), ('t1r', dict(txn1='rollback'))]
    compress = [
        ('nop', dict(compress='nop')),
        ('snappy', dict(compress='snappy')),
        ('zlib', dict(compress='zlib')),
        ('none', dict(compress='')),
    ]

    scenarios = make_scenarios(types, op1s, txn1s, compress,
                               prune=30, prunelong=1000)

    def conn_config(self):
        return 'log=(enabled,file_max=%s,' % self.logmax + \
        'compressor=%s,remove=false)' % self.compress + \
        ',create,error_prefix="%s",' % self.shortid() + \
        "statistics=(fast)," + \
        'transaction_sync="%s",' % \
        self.sync_list[self.scenario_number % len(self.sync_list)]

    def conn_extensions(self, extlist):
        extlist.skip_if_missing = True
        extlist.extension('compressors', self.compress)

    # Check that a cursor (optionally started in a new transaction), sees the
    # expected values.
    def check(self, session, txn_config, expected):
        if txn_config:
            session.begin_transaction(txn_config)
        c = session.open_cursor(self.uri, None)
        actual = dict((k, v) for k, v in c if v != 0)
        # Search for the expected items as well as iterating
        for k, v in expected.items():
            self.assertEqual(c[k], v)
        c.close()
        if txn_config:
            session.commit_transaction()
        self.assertEqual(actual, expected)

    # Check the state of the system with respect to the current cursor and
    # different isolation levels.
    def check_all(self, current, committed):
        # Transactions see their own changes.
        # Read-uncommitted transactions see all changes.
        # Snapshot and read-committed transactions should not see changes.
        self.check(self.session, None, current)
        self.check(self.session2, "isolation=snapshot", committed)
        self.check(self.session2, "isolation=read-committed", committed)
        self.check(self.session2, "isolation=read-uncommitted", current)

        # Opening a clone of the database home directory should run
        # recovery and see the committed results.  Flush the log because
        # the backup may not get all the log records if we are running
        # without a sync option.  Use sync=off to force a write to the OS.
        self.session.log_flush('sync=off')
        self.backup(self.backup_dir)
        backup_conn_params = 'log=(enabled,file_max=%s,' % self.logmax + \
                'compressor=%s)' % self.compress + \
                self.extensionsConfig()
        backup_conn = self.wiredtiger_open(self.backup_dir, backup_conn_params)
        try:
            self.check(backup_conn.open_session(), None, committed)
        finally:
            backup_conn.close()

    def test_ops(self):
        self.backup_dir = os.path.join(self.home, "WT_BACKUP")
        self.session2 = self.conn.open_session()

        # print "Creating %s with config '%s'" % (self.uri, self.create_params)
        self.session.create(self.uri, self.create_params)
        # Set up the table with entries for 1-5.
        # We then truncate starting or ending in various places.
        c = self.session.open_cursor(self.uri, None)
        if self.tabletype == 'fix':
            value = 1
        else:
            # Choose large compressible values for the string cases.
            value = 'abc' * 1000000
        current = {1:value, 2:value, 3:value, 4:value, 5:value}
        for k in current:
            c[k] = value
        committed = current.copy()

        ops = (self.op1, )
        txns = (self.txn1, )
        for i, ot in enumerate(zip(ops, txns)):
            self.session.begin_transaction()
            ok, txn = ot
            # print '%d: %s(%d)[%s]' % (i, ok[0], ok[1], txn)
            op, k = ok

            # print '%d: %s(%d)[%s]' % (i, ok[0], ok[1], txn)
            if op == 'stop':
                c.set_key(k)
                self.session.truncate(None, None, c, None)
                kstart = 1
                kstop = k
            elif op == 'start':
                c.set_key(k)
                self.session.truncate(None, c, None, None)
                kstart = k
                kstop = len(current)
            elif op == 'both':
                c2 = self.session.open_cursor(self.uri, None)
                # For both, the key given is the start key.  Add 2
                # for the stop key.
                kstart = k
                kstop = k + 2
                c.set_key(kstart)
                c2.set_key(kstop)
                self.session.truncate(None, c, c2, None)
                c2.close()
            elif op == 'all':
                c2 = self.session.open_cursor(self.uri, None)
                kstart = 1
                kstop = len(current)
                c.set_key(kstart)
                c2.set_key(kstop)
                self.session.truncate(None, c, c2, None)
                c2.close()

            while (kstart <= kstop):
                del current[kstart]
                kstart += 1

            # print current
            # Check the state after each operation.
            self.check_all(current, committed)

            if txn == 'commit':
                committed = current.copy()
                self.session.commit_transaction()
            elif txn == 'rollback':
                current = committed.copy()
                self.session.rollback_transaction()

            # Check the state after each commit/rollback.
            self.check_all(current, committed)

        # Gather statistics - this needs to be done before the connection is
        # closed or statistics would be reset.
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        clen = stat_cursor[stat.conn.log_compress_len][2]
        cmem = stat_cursor[stat.conn.log_compress_mem][2]
        cwrites = stat_cursor[stat.conn.log_compress_writes][2]
        cfails = stat_cursor[stat.conn.log_compress_write_fails][2]
        csmall = stat_cursor[stat.conn.log_compress_small][2]
        stat_cursor.close()

        if self.compress == '':
            self.assertEqual(clen, cmem)
            self.assertEqual(cwrites, 0)
            self.assertEqual(cfails, 0)
        elif self.compress == 'nop':
            self.assertEqual(clen, cmem)
            self.assertEqual(cwrites, 0)
            self.assertEqual((cfails > 0 or csmall > 0), True)
        else:
            self.assertEqual(clen < cmem, True)
            self.assertEqual(cwrites > 0, True)
            self.assertEqual((cfails > 0 or csmall > 0), True)

        #
        # Run printlog and make sure it exits with zero status. This should be
        # run as soon as we can after the crash to try and conflict with the
        # journal file read.
        #
        self.runWt(['-h', self.backup_dir, 'printlog'], outfilename='printlog.out')
Exemplo n.º 26
0
class test_backup26(backup_base):
    dir = 'backup.dir'  # Backup directory name
    uri = "table_backup"
    ntables = 10000 if wttest.islongtest() else 500

    # Reverse the backup restore list, WiredTiger should still succeed in this case.
    reverse = [
        ["reverse_target_list", dict(reverse=True)],
        ["target_list", dict(reverse=False)],
    ]

    # Percentage of tables to not copy over in selective backup.
    percentage = [
        ('hundred_precent', dict(percentage=1)),
        ('ninety_percent', dict(percentage=0.9)),
        ('fifty_percent', dict(percentage=0.5)),
        ('ten_percent', dict(percentage=0.1)),
        ('zero_percent', dict(percentage=0)),
    ]
    scenarios = make_scenarios(percentage, reverse)

    def test_backup26(self):
        selective_remove_uri_file_list = []
        selective_remove_uri_list = []
        selective_uri_list = []

        for i in range(0, self.ntables):
            uri = "table:{0}".format(self.uri + str(i))
            dataset = SimpleDataSet(self, uri, 100, key_format="S")
            dataset.populate()
            # Append the table uri to the selective backup remove list until the set percentage.
            # These tables will not be copied over in selective backup.
            if (i <= int(self.ntables * self.percentage)):
                selective_remove_uri_list.append(uri)
                selective_remove_uri_file_list.append(
                    "{0}.wt".format(self.uri + str(i)))
            else:
                selective_uri_list.append(uri)
        self.session.checkpoint()

        os.mkdir(self.dir)

        # Now copy the files using full backup. This should not include the tables inside the remove list.
        all_files = self.take_selective_backup(self.dir,
                                               selective_remove_uri_file_list)

        target_uris = None
        if self.reverse:
            target_uris = str(selective_uri_list[::-1]).replace("\'", "\"")
        else:
            target_uris = str(selective_uri_list).replace("\'", "\"")
        starttime = time.time()
        # After the full backup, open and recover the backup database.
        backup_conn = self.wiredtiger_open(
            self.dir, "backup_restore_target={0}".format(target_uris))
        elapsed = time.time() - starttime
        self.pr("%s partial backup has taken %.2f seconds." %
                (str(self), elapsed))

        bkup_session = backup_conn.open_session()
        # Open the cursor from uris that were not part of the selective backup and expect failure
        # since file doesn't exist.
        for remove_uri in selective_remove_uri_list:
            self.assertRaisesException(
                wiredtiger.WiredTigerError,
                lambda: bkup_session.open_cursor(remove_uri, None, None))

        # Open the cursors on tables that copied over to the backup directory. They should still
        # recover properly.
        for uri in selective_uri_list:
            c = bkup_session.open_cursor(uri, None, None)
            ds = SimpleDataSet(self, uri, 100, key_format="S")
            ds.check_cursor(c)
            c.close()
        backup_conn.close()
Exemplo n.º 27
0
class test_backup_target(wttest.WiredTigerTestCase, suite_subprocess):
    dir = 'backup.dir'  # Backup directory name

    # This test is written to test LSM hot backups: we test a simple LSM object
    # and a complex LSM object, but we can't test them both at the same time
    # because we need to load fast enough the merge threads catch up, and so we
    # test the real database, not what the database might look like after the
    # merging settles down.
    #
    # The way it works is we create 4 objects, only one of which is large, then
    # we do a hot backup of one or more of the objects and compare the original
    # to the backup to confirm the backup is correct.
    pfx = 'test_backup'
    objs = [  # Objects
        ('table:' + pfx + '.1', SimpleDataSet, 0),
        ('lsm:' + pfx + '.2', SimpleDataSet, 1),
        ('table:' + pfx + '.3', ComplexDataSet, 2),
        ('table:' + pfx + '.4', ComplexLSMDataSet, 3),
    ]
    list = [
        ('backup_1', dict(big=0, list=[0])),  # Target objects individually
        ('backup_2', dict(big=1, list=[1])),
        ('backup_3', dict(big=2, list=[2])),
        ('backup_4', dict(big=3, list=[3])),
        ('backup_5a', dict(big=0, list=[0, 2])),  # Target groups of objects
        ('backup_5b', dict(big=2, list=[0, 2])),
        ('backup_6a', dict(big=1, list=[1, 3])),
        ('backup_6b', dict(big=3, list=[1, 3])),
        ('backup_7a', dict(big=0, list=[0, 1, 2])),
        ('backup_7b', dict(big=1, list=[0, 1, 2])),
        ('backup_7c', dict(big=2, list=[0, 1, 2])),
        ('backup_8a', dict(big=0, list=[0, 1, 2, 3])),
        ('backup_8b', dict(big=1, list=[0, 1, 2, 3])),
        ('backup_8c', dict(big=2, list=[0, 1, 2, 3])),
        ('backup_8d', dict(big=3, list=[0, 1, 2, 3])),
        ('backup_9', dict(big=3, list=[])),  # Backup everything
    ]

    scenarios = make_scenarios(list, prune=3, prunelong=1000)
    # Create a large cache, otherwise this test runs quite slowly.
    conn_config = 'cache_size=1G'

    # Populate a set of objects.
    def populate(self):
        for i in self.objs:
            if self.big == i[2]:
                rows = 200000  # Big object
            else:
                rows = 1000  # Small object
            i[1](self, i[0], rows).populate()
        # Backup needs a checkpoint
        self.session.checkpoint(None)

    # Compare the original and backed-up files using the wt dump command.
    def compare(self, uri):
        self.runWt(['dump', uri], outfilename='orig')
        self.runWt(['-h', self.dir, 'dump', uri], outfilename='backup')
        self.assertEqual(True, compare_files(self, 'orig', 'backup'))

    # Check that a URI doesn't exist, both the meta-data and the file names.
    def confirmPathDoesNotExist(self, uri):
        conn = self.wiredtiger_open(self.dir)
        session = conn.open_session()
        self.assertRaises(wiredtiger.WiredTigerError,
                          lambda: session.open_cursor(uri, None, None))
        conn.close()

        self.assertEqual(
            glob.glob(self.dir + '*' + uri.split(":")[1] + '*'), [],
            'confirmPathDoesNotExist: URI exists, file name matching \"' +
            uri.split(":")[1] + '\" found')

    # Backup a set of target tables using a backup cursor.
    def backup_table_cursor(self, l):
        # Create the backup directory.
        os.mkdir(self.dir)

        # Build the target list.
        config = ""
        if l:
            config = 'target=('
            for i in range(0, len(self.objs)):
                if i in l:
                    config += '"' + self.objs[i][0] + '",'
            config += ')'

        # Open up the backup cursor, and copy the files.
        cursor = self.session.open_cursor('backup:', None, config)
        while True:
            ret = cursor.next()
            if ret != 0:
                break
            #print 'Copy from: ' + cursor.get_key() + ' to ' + self.dir
            shutil.copy(cursor.get_key(), self.dir)
        self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
        cursor.close()

        # Confirm the objects we backed up exist, with correct contents.
        for i in range(0, len(self.objs)):
            if not l or i in l:
                self.compare(self.objs[i][0])

        # Confirm the other objects don't exist.
        if l:
            for i in range(0, len(self.objs)):
                if i not in l:
                    self.confirmPathDoesNotExist(self.objs[i][0])

    # Test backup with targets.
    def test_backup_target(self):
        self.populate()
        self.backup_table_cursor(self.list)
Exemplo n.º 28
0
class test_assert01(wttest.WiredTigerTestCase, suite_subprocess):
    base = 'assert01'
    base_uri = 'file:' + base
    uri_always = base_uri + '.always.wt'
    uri_def = base_uri + '.def.wt'
    uri_never = base_uri + '.never.wt'
    uri_none = base_uri + '.none.wt'
    cfg_always = 'verbose=[write_timestamp],write_timestamp_usage=always,assert=(write_timestamp=on)'
    cfg_def = ''
    cfg_never = 'verbose=(write_timestamp=true),write_timestamp_usage=never,assert=(write_timestamp=on)'
    cfg_none = 'assert=(write_timestamp=off)'

    key_format_values = [('column', dict(key_format='r', usestrings=False)),
                         ('string-row', dict(key_format='S', usestrings=True))]
    scenarios = make_scenarios(key_format_values)

    count = 1

    #
    # Commit a k/v pair making sure that it detects an error if needed, when
    # used with and without a commit timestamp.
    #
    def insert_check(self, uri, use_ts):
        c = self.session.open_cursor(uri)
        key = 'key' + str(self.count) if self.usestrings else self.count
        val = 'value' + str(self.count)

        # Commit with a timestamp
        self.session.begin_transaction()
        self.session.timestamp_transaction('commit_timestamp=' +
                                           self.timestamp_str(self.count))
        c[key] = val
        # All settings other than never should commit successfully
        if (use_ts != 'never'):
            self.session.commit_transaction()
        else:
            msg = "/timestamp set on this transaction/"
            self.assertRaisesWithMessage(
                wiredtiger.WiredTigerError, lambda: self.assertEquals(
                    self.session.commit_transaction(), 0), msg)
        c.close()
        self.count += 1

        # Commit without a timestamp
        key = 'key' + str(self.count) if self.usestrings else self.count
        val = 'value' + str(self.count)
        c = self.session.open_cursor(uri)
        self.session.begin_transaction()
        c[key] = val
        # All settings other than always should commit successfully
        if (use_ts != 'always'):
            self.session.commit_transaction()
        else:
            msg = "/none set on this transaction/"
            self.assertRaisesWithMessage(
                wiredtiger.WiredTigerError, lambda: self.assertEquals(
                    self.session.commit_transaction(), 0), msg)
        self.count += 1
        c.close()

    def test_commit_timestamp(self):
        cfg = 'key_format={},value_format=S,'.format(self.key_format)

        # Create a data item at a timestamp
        self.session.create(self.uri_always, cfg + self.cfg_always)
        self.session.create(self.uri_def, cfg + self.cfg_def)
        self.session.create(self.uri_never, cfg + self.cfg_never)
        self.session.create(self.uri_none, cfg + self.cfg_none)

        # Check inserting into each table
        self.insert_check(self.uri_always, 'always')
        self.insert_check(self.uri_def, 'none')
        self.insert_check(self.uri_never, 'never')
        self.insert_check(self.uri_none, 'none')
Exemplo n.º 29
0
class test_rollback_to_stable26(test_rollback_to_stable_base):

    format_values = [
        ('column', dict(key_format='r', value_format='S')),
        ('column_fix', dict(key_format='r', value_format='8t')),
        ('row_integer', dict(key_format='i', value_format='S')),
    ]

    hs_remove_values = [
        ('no_hs_remove', dict(hs_remove=False)),
        ('hs_remove', dict(hs_remove=True))
    ]

    prepare_remove_values = [
        ('no_prepare_remove', dict(prepare_remove=False)),
        ('prepare_remove', dict(prepare_remove=True))
    ]

    scenarios = make_scenarios(format_values, hs_remove_values, prepare_remove_values)

    def conn_config(self):
        config = 'cache_size=10MB,statistics=(all),timing_stress_for_test=[history_store_checkpoint_delay]'
        return config

    def evict_cursor(self, uri, nrows):
        # Configure debug behavior on a cursor to evict the page positioned on when the reset API is used.
        evict_cursor = self.session.open_cursor(uri, None, "debug=(release_evict)")
        self.session.begin_transaction("ignore_prepare=true")
        for i in range (1, nrows + 1):
            evict_cursor.set_key(i)
            evict_cursor.search()
            evict_cursor.reset()
        evict_cursor.close()
        self.session.rollback_transaction()

    def test_rollback_to_stable(self):
        nrows = 10

        # Create a table without logging.
        uri = "table:rollback_to_stable26"
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format,
            config='log=(enabled=false)')
        ds.populate()

        if self.value_format == '8t':
             value_a = 97
             value_b = 98
             value_c = 99
             value_d = 100
             value_e = 101
        else:
             value_a = "aaaaa" * 100
             value_b = "bbbbb" * 100
             value_c = "ccccc" * 100
             value_d = "ddddd" * 100
             value_e = "eeeee" * 100

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        self.large_updates(uri, value_a, ds, nrows, False, 20)
        self.large_updates(uri, value_b, ds, nrows, False, 30)

        if self.hs_remove:
            self.large_removes(uri, ds, nrows, False, 40)

        prepare_session = self.conn.open_session()
        prepare_session.begin_transaction()
        cursor = prepare_session.open_cursor(uri)
        for i in range (1, nrows + 1):
            cursor[i] = value_c
            if self.prepare_remove:
                cursor.set_key(i)
                self.assertEqual(cursor.remove(), 0)
        cursor.close()
        prepare_session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(50))

        # Verify data is visible and correct.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)

        self.evict_cursor(uri, nrows)

        # Pin stable to timestamp 40.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(5)
            prepare_session.rollback_transaction()
        finally:
            done.set()
            ckpt.join()

        self.large_updates(uri, value_d, ds, nrows, False, 60)

        # Check that the correct data.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)
        self.check(value_d, uri, nrows, None, 60)

        # Simulate a server crash and restart.
        simulate_crash_restart(self, ".", "RESTART")

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
        hs_restore_updates = stat_cursor[stat.conn.txn_rts_hs_restore_updates][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertEqual(keys_removed, 0)
        self.assertEqual(hs_restore_updates, nrows)
        self.assertEqual(hs_removed, nrows)

        # Check that the correct data.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)

        self.large_updates(uri, value_e, ds, nrows, False, 60)

        self.evict_cursor(uri, nrows)

        # Check that the correct data.
        self.check(value_a, uri, nrows, None, 20)
        self.check(value_b, uri, nrows, None, 30)
        self.check(value_e, uri, nrows, None, 60)
Exemplo n.º 30
0
class test_compat01(wttest.WiredTigerTestCase, suite_subprocess):
    # Add enough entries and use a small log size to generate more than
    # one log file.
    entries = 2000
    logmax = "100K"
    tablename = 'test_compat01'
    uri = 'table:' + tablename
    # Declare the log versions that do and do not have prevlsn.
    # Log version 1 does not have the prevlsn record.
    # Log version 2 introduced that record.
    # Log version 3 continues to have that record.
    min_logv = 2
    latest_logv = 3

    # The API uses only the major and minor numbers but accepts with
    # and without the patch number.  Test both.
    start_compat = [
        ('def', dict(compat1='none', logv1=3)),
        ('31', dict(compat1="3.1", logv1=3)),
        ('30', dict(compat1="3.0", logv1=2)),
        ('30_patch', dict(compat1="3.0.0", logv1=2)),
        ('26', dict(compat1="2.6", logv1=1)),
        ('old', dict(compat1="1.8", logv1=1)),
        ('old_patch', dict(compat1="1.8.1", logv1=1)),
    ]
    restart_compat = [
        ('def2', dict(compat2='none', logv2=3)),
        ('31_2', dict(compat2="3.1", logv2=3)),
        ('30_2', dict(compat2="3.0", logv2=2)),
        ('30_patch2', dict(compat2="3.0.0", logv2=2)),
        ('26_2', dict(compat2="2.6", logv2=1)),
        ('old2', dict(compat2="1.8", logv2=1)),
        ('old_patch2', dict(compat2="1.8.1", logv2=1)),
    ]
    scenarios = make_scenarios(restart_compat, start_compat)

    def make_compat_str(self, create):
        compat_str = ''
        if (create == True and self.compat1 != 'none'):
            #compat_str = 'verbose=(temporary),compatibility=(release="%s"),' % self.compat1
            compat_str = 'compatibility=(release="%s"),' % self.compat1
        elif (create == False and self.compat2 != 'none'):
            #compat_str = 'verbose=(temporary),compatibility=(release="%s"),' % self.compat2
            compat_str = 'compatibility=(release="%s"),' % self.compat2
        return compat_str

    def conn_config(self):
        # Set archive false on the home directory.
        log_str = 'log=(archive=false,enabled,file_max=%s),' % self.logmax
        compat_str = self.make_compat_str(True)
        self.pr("Conn config:" + log_str + compat_str)
        return log_str + compat_str

    def check_prev_lsn(self, exists, conn_close):
        #
        # Run printlog and look for the prev_lsn log record.  Verify its
        # existence with the passed in expected result.  We don't use
        # check_file_contains because that only looks in the first 100K and
        # we don't know how big our text-based log output is.  Look through
        # the entire file if needed and set a boolean for comparison.
        #
        self.runWt(['printlog'],
                   outfilename='printlog.out',
                   closeconn=conn_close)
        contains = False
        with open('printlog.out') as logfile:
            for line in logfile:
                if 'optype' in line and 'prev_lsn' in line:
                    contains = True
                    break
        self.assertEqual(exists, contains)

    def check_log(self, reconfig):
        orig_logs = fnmatch.filter(os.listdir('.'), "*gerLog*")
        compat_str = self.make_compat_str(False)

        if not reconfig:
            #
            # Close and open the connection to force recovery and log archiving
            # even if archive is turned off (in some circumstances). If we are
            # downgrading we must archive newer logs. Verify the log files
            # have or have not been archived.
            #
            exist = True
            if self.logv1 < self.min_logv:
                exist = False
            self.check_prev_lsn(exist, True)

            self.conn.close()
            log_str = 'log=(enabled,file_max=%s,archive=false),' % self.logmax
            restart_config = log_str + compat_str
            self.pr("Restart conn " + restart_config)
            #
            # Open a connection to force it to run recovery.
            #
            conn = self.wiredtiger_open('.', restart_config)
            conn.close()
            check_close = False
        else:
            self.pr("Reconfigure: " + compat_str)
            self.conn.reconfigure(compat_str)
            check_close = True

            #
            # Archiving is turned off explicitly.
            #
            # Check logs. The original logs should have been archived only if
            # we downgraded.  In all other cases the original logs should be there.
            # Downgrade means not running the latest possible log version, not
            # the difference between original and current.
            cur_logs = fnmatch.filter(os.listdir('.'), "*Log*")
            log_present = True
            if self.logv1 != self.logv2 and self.logv2 != self.latest_logv:
                log_present = False
            for o in orig_logs:
                self.assertEqual(log_present, o in cur_logs)

        # Run printlog and verify the new record does or does not exist.
        exist = True
        if self.logv2 < self.min_logv:
            exist = False
        self.check_prev_lsn(exist, check_close)

    def run_test(self, reconfig):
        # If reconfiguring with the empty string there is nothing to do.
        if reconfig == True and self.compat2 == 'none':
            return
        self.session.create(self.uri, 'key_format=i,value_format=i')
        c = self.session.open_cursor(self.uri, None)
        #
        # Add some entries to generate log files.
        #
        for i in range(self.entries):
            c[i] = i + 1
        c.close()

        # Check the log state after the entire op completes
        # and run recovery with the restart compatibility mode.
        self.check_log(reconfig)

    # Run the same test but reset the compatibility via
    # reconfigure or changing it when reopening the connection.
    def test_reconfig(self):
        self.run_test(True)

    def test_restart(self):
        self.run_test(False)