예제 #1
0
    def test_modify_smoke_recover(self):
        # Close the original database.
        self.conn.close()

        # Open a new database with logging configured.
        self.conn_config = \
            'log=(enabled=true),transaction_sync=(method=dsync,enabled)'
        self.conn = self.setUpConnectionOpen(".")
        self.session = self.setUpSessionOpen(self.conn)

        # Populate a database, and checkpoint it so it exists after recovery.
        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()
        self.session.checkpoint()
        self.modify_load(ds, False)

        # Crash and recover in a new directory.
        newdir = 'RESTART'
        copy_wiredtiger_home('.', newdir)
        self.conn.close()
        self.conn = self.setUpConnectionOpen(newdir)
        self.session = self.setUpSessionOpen(self.conn)
        self.session.verify(self.uri)

        self.modify_confirm(ds, False)
예제 #2
0
    def test_prepare_lookaside(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        # Create a small table.
        uri = "table:test_prepare_lookaside01"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(bigvalue)
            self.assertEquals(cursor.insert(), 0)
        cursor.close()
        self.session.checkpoint()

        # Check if lookaside is working properly with prepare transactions.
        # We put prepared updates in multiple sessions so that we do not hang
        # because of cache being full with uncommitted updates.
        nsessions = 3
        nkeys = 4000
        self.prepare_updates(uri, ds, nrows, nsessions, nkeys)
예제 #3
0
    def test_truncate_cursor_order(self):
        uri = self.type + self.name

        # A simple, one-file file or table object.
        ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt)
        ds.populate()
        c1 = self.session.open_cursor(uri, None)
        c1.set_key(ds.key(1000))
        c2 = self.session.open_cursor(uri, None)
        c2.set_key(ds.key(2000))
        self.session.truncate(None, c1, c2, None)
        self.assertEqual(c1.close(), 0)
        self.assertEqual(c2.close(), 0)
        self.session.drop(uri)

        if self.type == "table:":
            ds = ComplexDataSet(self, uri, 100, key_format=self.keyfmt)
            ds.populate()
            c1 = self.session.open_cursor(uri, None)
            c1.set_key(ds.key(1000))
            c2 = self.session.open_cursor(uri, None)
            c2.set_key(ds.key(2000))
            self.session.truncate(None, c1, c2, None)
            self.assertEqual(c1.close(), 0)
            self.assertEqual(c2.close(), 0)
            self.session.drop(uri)
예제 #4
0
파일: test_bug008.py 프로젝트: DINKIN/mongo
    def test_search_eot(self):
        # Populate the tree and reopen the connection, forcing it to disk
        # and moving the records to an on-page format.
        ds = SimpleDataSet(self, self.uri, 100, key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()
        self.reopen_conn()

        # Open a cursor.
        cursor = self.session.open_cursor(self.uri, None)

        # Search for a record at the end of the table, which should succeed.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search(), 0)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))

        # Search-near for a record at the end of the table, which should
        # succeed, returning the last record.
        cursor.set_key(ds.key(100))
        self.assertEqual(cursor.search_near(), 0)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))

        # Search for a record past the end of the table, which should fail.
        cursor.set_key(ds.key(200))
        self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for a record past the end of the table, which should
        # succeed, returning the last record.
        cursor.set_key(ds.key(200))
        self.assertEqual(cursor.search_near(), -1)
        self.assertEqual(cursor.get_key(), ds.key(100))
        self.assertEqual(cursor.get_value(), ds.value(100))
예제 #5
0
    def test_drop(self):
        uri = 'lsm:' + self.name
        ds = SimpleDataSet(self, uri, 100000)
        ds.populate()
        self.reopen_conn()

        self.session.drop(uri, None)
예제 #6
0
 def test_basic(self):
     ds = SimpleDataSet(self, self.uri, self.nentries,
         config=self.config, key_format=self.keyfmt)
     ds.populate()
     self.reopen_conn()
     c = self.session.open_cursor(self.uri, None)
     self.forward(c, ds, self.nentries, [])
     self.backward(c, ds, self.nentries, [])
예제 #7
0
    def test_modify_smoke_single(self):
        if self.skip():
            return

        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format='u')
        ds.populate()
        self.modify_load(ds, True)
예제 #8
0
    def test_modify_smoke_reopen(self):
        ds = SimpleDataSet(self,
            self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()
        self.modify_load(ds, False)

        # Flush to disk, forcing reconciliation.
        self.reopen_conn()

        self.modify_confirm(ds, False)
예제 #9
0
파일: test_bug008.py 프로젝트: DINKIN/mongo
    def test_search_invisible_two(self):
        # Populate the tree and reopen the connection, forcing it to disk
        # and moving the records to an on-page format.
        ds = SimpleDataSet(self, self.uri, 100, key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()
        self.reopen_conn()

        # Add some additional visible records.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(100, 120):
            cursor[ds.key(i)] = ds.value(i)
        cursor.close()

        # Begin a transaction, and add some additional records.
        self.session.begin_transaction()
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(120, 140):
            cursor[ds.key(i)] = ds.value(i)

        # Open a separate session and cursor.
        s = self.conn.open_session()
        cursor = s.open_cursor(self.uri, None)

        # Search for an invisible record.
        cursor.set_key(ds.key(130))
        if self.empty:
            # Invisible updates to fixed-length column-store objects are
            # invisible to the reader, but the fact that they exist past
            # the end of the initial records causes the instantiation of
            # empty records: confirm successful return of an empty row.
            cursor.search()
            self.assertEqual(cursor.get_key(), 130)
            self.assertEqual(cursor.get_value(), 0)
        else:
            # Otherwise, we should not find any matching records.
            self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)

        # Search-near for an invisible record, which should succeed, returning
        # the last visible record.
        cursor.set_key(ds.key(130))
        cursor.search_near()
        if self.empty:
            # Invisible updates to fixed-length column-store objects are
            # invisible to the reader, but the fact that they exist past
            # the end of the initial records causes the instantiation of
            # empty records: confirm successful return of an empty row.
            cursor.search()
            self.assertEqual(cursor.get_key(), 130)
            self.assertEqual(cursor.get_value(), 0)
        else:
            # Otherwise, we should find the closest record for which we can see
            # the value.
            self.assertEqual(cursor.get_key(), ds.key(119))
            self.assertEqual(cursor.get_value(), ds.value(119))
예제 #10
0
 def test_checkpoint_stats(self):
     ds = SimpleDataSet(self, self.uri, self.nentries,
         config=self.config, key_format=self.keyfmt)
     for name in ('first', 'second', 'third'):
         ds.populate()
         self.session.checkpoint('name=' + name)
         cursor = self.session.open_cursor(
             'statistics:' + self.uri, None, 'checkpoint=' + name)
         self.assertEqual(
             cursor[stat.dsrc.btree_entries][2], self.nentries)
         cursor.close()
예제 #11
0
    def test_truncate_cursor_order(self):
        uri = self.type + self.name
        ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt)
        ds.populate()
        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)

        c1.set_key(ds.key(20))
        c2.set_key(ds.key(10))
        msg = "/the start cursor position is after the stop cursor position/"
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg)
        c2.set_key(ds.key(20))
        self.session.truncate(None, c1, c2, None)
예제 #12
0
    def test_las(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        nrows = 10000

        # Create a table without logging to ensure we get "skew_newest" lookaside eviction behavior.
        uri = "table:las02_main"
        ds = SimpleDataSet(
            self, uri, 0, key_format="S", value_format="S", config='log=(enabled=false)')
        ds.populate()

        uri2 = "table:las02_extra"
        ds2 = SimpleDataSet(self, uri2, 0, key_format="S", value_format="S")
        ds2.populate()

        # Pin oldest and stable to timestamp 1.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) +
            ',stable_timestamp=' + timestamp_str(1))

        bigvalue = "aaaaa" * 100
        self.large_updates(uri, bigvalue, ds, nrows / 3, 1)

        # Check that all updates are seen
        self.check(bigvalue, uri, nrows / 3, 1)

        # Check to see lookaside working with old timestamp
        bigvalue2 = "ddddd" * 100
        self.large_updates(uri, bigvalue2, ds, nrows, 100)

        # Check that the new updates are only seen after the update timestamp
        self.check(bigvalue, uri, nrows / 3, 1)
        self.check(bigvalue2, uri, nrows, 100)

        # Force out most of the pages by updating a different tree
        self.large_updates(uri2, bigvalue, ds2, nrows, 100)

        # Now truncate half of the records
        self.session.begin_transaction()
        end = self.session.open_cursor(uri)
        end.set_key(ds.key(nrows / 2))
        self.session.truncate(None, None, end)
        end.close()
        self.session.commit_transaction('commit_timestamp=' + timestamp_str(200))

        # Check that the truncate is visible after commit
        self.check(bigvalue2, uri, nrows / 2, 200)

        # Repeat earlier checks
        self.check(bigvalue, uri, nrows / 3, 1)
        self.check(bigvalue2, uri, nrows, 100)
예제 #13
0
    def test_truncate_cursor_notset(self):
        uri = self.type + self.name
        msg = "/requires key be set/"

        ds = SimpleDataSet(self, uri, 100)
        ds.populate()

        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)
        c2.set_key(ds.key(10))
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c2, c1, None), msg)
        c1.close()
        c2.close()
예제 #14
0
    def test_las(self):
        # Create a small table.
        uri = "table:test_las"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S")
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor.set_key(ds.key(nrows + i))
            cursor.set_value(bigvalue)
            self.assertEquals(cursor.insert(), 0)
        cursor.close()
        self.session.checkpoint()

        # Scenario: 1
        # Check to see LAS working with old snapshot
        bigvalue1 = "bbbbb" * 100
        self.session.snapshot("name=xxx")
        # Update the values in different session after snapshot
        self.large_updates(self.session, uri, bigvalue1, ds, nrows)
        # Check to see the value after recovery
        self.durable_check(bigvalue1, uri, ds, nrows)
        self.session.snapshot("drop=(all)")

        # Scenario: 2
        # Check to see LAS working with old reader
        bigvalue2 = "ccccc" * 100
        session2 = self.conn.open_session()
        session2.begin_transaction('isolation=snapshot')
        self.large_updates(self.session, uri, bigvalue2, ds, nrows)
        # Check to see the value after recovery
        self.durable_check(bigvalue2, uri, ds, nrows)
        session2.rollback_transaction()
        session2.close()

        # Scenario: 3
        # Check to see LAS working with old timestamp
        bigvalue3 = "ddddd" * 100
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))
        self.large_updates(self.session, uri, bigvalue3, ds, nrows, timestamp=True)
        # Check to see data can be see only till the stable_timestamp
        self.durable_check(bigvalue2, uri, ds, nrows)

        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(i + 1))
        # Check to see latest data can be seen
        self.durable_check(bigvalue3, uri, ds, nrows)
예제 #15
0
 def test_checkpoint_cursor_update(self):
     ds = SimpleDataSet(self, self.uri, 100, key_format=self.fmt)
     ds.populate()
     self.session.checkpoint("name=ckpt")
     cursor = self.session.open_cursor(self.uri, None, "checkpoint=ckpt")
     cursor.set_key(ds.key(10))
     cursor.set_value("XXX")
     msg = "/Unsupported cursor/"
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: cursor.insert(), msg)
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: cursor.remove(), msg)
     self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
         lambda: cursor.update(), msg)
     cursor.close()
예제 #16
0
 def test_smoke(self):
     ds = SimpleDataSet(self, self.uri, self.nentries,
         config=self.config, key_format=self.keyfmt)
     ds.populate()
     self.reopen_conn()
     c = self.session.open_cursor(self.uri, None)
     c.set_key(ds.key(100))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(100))
     c.set_key(ds.key(101))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(101))
     c.set_key(ds.key(9999))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(9999))
예제 #17
0
    def test_modify_delete(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format='u')
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        self.assertEquals(c.remove(), 0)

        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)

        c.set_key(ds.key(10))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
예제 #18
0
파일: test_dupc.py 프로젝트: Machyne/mongo
    def test_duplicate_cursor(self):
        uri = self.uri + self.name

        # A simple, one-file file or table object.
        ds = SimpleDataSet(self, uri, self.nentries, key_format=self.fmt)
        ds.populate()
        self.iterate(uri, ds)
        self.session.drop(uri, None)

        # A complex, multi-file table object.
        if self.uri == "table:":
            ds = ComplexDataSet(self, uri, self.nentries, key_format=self.fmt)
            ds.populate()
            self.iterate(uri, ds)
            self.session.drop(uri, None)
예제 #19
0
    def test_reconfig_fail(self):
        uri = 'table:reconfig_fail'
        ds = SimpleDataSet(self, uri, 100, key_format='S')
        ds.populate()

        self.session.begin_transaction("isolation=snapshot")
        c = self.session.open_cursor(uri, None)
        c.set_key(ds.key(20))
        c.set_value("abcde")
        self.assertEquals(c.update(), 0)

        compat_str = 'compatibility=(release="3.0.0")'
        msg = '/system must be quiescent/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
            lambda:self.conn.reconfigure(compat_str), msg)
예제 #20
0
    def test_insert_over_delete_replace(self):
        msg = '/WT_CACHE_FULL.*/'
        ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt,
            value_format=self.valuefmt, config=self.table_config)
        self.assertRaisesHavingMessage(wiredtiger.WiredTigerError,
            lambda:ds.populate(), msg)

        cursor = self.session.open_cursor(self.uri, None)
        cursor.prev()
        last_key = int(cursor.get_key())

        # Now that the database contains as much data as will fit into
        # the configured cache, verify removes succeed.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(1, last_key / 4, 1):
            cursor.set_key(ds.key(i))
            cursor.remove()

        cursor.reset()
        # Spin inserting to give eviction a chance to reclaim space
        inserted = False
        for i in range(1, 1000):
            try:
                cursor[ds.key(1)] = ds.value(1)
            except wiredtiger.WiredTigerError:
                cursor.reset()
                sleep(1)
                continue
            inserted = True
            break
        self.assertTrue(inserted)
예제 #21
0
    def test_hazard(self):
        uri = "table:hazard"
        ds = SimpleDataSet(self, uri, 1000)
        ds.populate()

        # Open 10,000 cursors and pin a page to set a hazard pointer.
        cursors = []
        for i in range(0, 10000):
            c = self.session.open_cursor(uri, None)
            c.set_key(ds.key(10))
            c.search()
            cursors.append(c)

        # Close the cursors, clearing the hazard pointer.
        for c in cursors:
            c.close()
예제 #22
0
    def test_modify_smoke_reopen(self):
        if self.skip():
            return

        ds = SimpleDataSet(self,
                           self.uri,
                           100,
                           key_format=self.keyfmt,
                           value_format='u')
        ds.populate()
        self.modify_load(ds, False)

        # Flush to disk, forcing reconciliation.
        self.reopen_conn()

        self.modify_confirm(ds, False)
    def test_truncate_cursor_notset(self):
        uri = self.type + self.name
        msg = '/requires key be set/'

        ds = SimpleDataSet(self, uri, 100)
        ds.populate()

        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)
        c2.set_key(ds.key(10))
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
            lambda: self.session.truncate(None, c1, c2, None), msg)
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
            lambda: self.session.truncate(None, c2, c1, None), msg)
        c1.close()
        c2.close()
예제 #24
0
    def test_bug024(self):
        nrows = 10
        ds = SimpleDataSet(self,
                           self.uri,
                           nrows,
                           key_format="S",
                           value_format='u')
        ds.populate()

        self.conn.close()
        # Copying the file manually to recreate the issue described in WT-6526.
        shutil.copy('WiredTiger.turtle', 'WiredTiger.turtle.set')

        # Open wiredtiger in new directory and in readonly mode.
        conn = self.wiredtiger_open(self.home, "readonly")
        conn.close()
예제 #25
0
    def test_truncate_cursor_order(self):
        uri = self.type + self.name
        ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt)
        ds.populate()
        c1 = self.session.open_cursor(uri, None)
        c2 = self.session.open_cursor(uri, None)

        c1.set_key(ds.key(20))
        c2.set_key(ds.key(10))
        msg = '/the start cursor position is after the stop cursor position/'
        self.assertRaisesWithMessage(
            wiredtiger.WiredTigerError,
            lambda: self.session.truncate(None, c1, c2, None), msg)
        c1.set_key(ds.key(10))
        c2.set_key(ds.key(20))
        self.session.truncate(None, c1, c2, None)
예제 #26
0
    def test_hazard(self):
        uri = "table:hazard"
        ds = SimpleDataSet(self, uri, 1000)
        ds.populate()

        # Open 10,000 cursors and pin a page to set a hazard pointer.
        cursors = []
        for i in range(0, 10000):
            c = self.session.open_cursor(uri, None)
            c.set_key(ds.key(10))
            c.search()
            cursors.append(c)

        # Close the cursors, clearing the hazard pointer.
        for c in cursors:
            c.close()
예제 #27
0
    def test_reconfig_fail(self):
        uri = 'table:reconfig_fail'
        ds = SimpleDataSet(self, uri, 100, key_format='S')
        ds.populate()

        self.session.begin_transaction("isolation=snapshot")
        c = self.session.open_cursor(uri, None)
        c.set_key(ds.key(20))
        c.set_value("abcde")
        self.assertEquals(c.update(), 0)

        compat_str = 'compatibility=(release="3.0.0")'
        msg = '/system must be quiescent/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                                     lambda: self.conn.reconfigure(compat_str),
                                     msg)
예제 #28
0
    def test_sharing(self):
        # FIXME: WT-8235 Enable the test once file containing transaction ids is supported.
        self.skipTest('Sharing the checkpoint file containing transaction ids is not supported')

        ds = SimpleDataSet(self, self.uri, 10)
        ds.populate()
        ds.check()
        self.session.checkpoint()
        ds.check()

        # Create a secondary database
        dir2 = os.path.join(self.home, 'SECONDARY')
        os.mkdir(dir2)
        conn2 = self.setUpConnectionOpen(dir2)
        session2 = conn2.open_session()

        # Reference the tree from the secondary:
        metac = self.session.open_cursor('metadata:')
        metac2 = session2.open_cursor('metadata:', None, 'readonly=0')
        uri2 = self.uri[:5] + '../' + self.uri[5:]
        metac2[uri2] = metac[self.uri] + ",readonly=1"

        cursor2 = session2.open_cursor(uri2)
        ds.check_cursor(cursor2)
        cursor2.close()

        newds = SimpleDataSet(self, self.uri, 10000)
        newds.populate()
        newds.check()
        self.session.checkpoint()
        newds.check()

        # Check we can still read from the last checkpoint
        cursor2 = session2.open_cursor(uri2)
        ds.check_cursor(cursor2)
        cursor2.close()

        # Bump to new checkpoint
        origmeta = metac[self.uri]
        checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:]
        self.pr('Orig checkpoint: ' + checkpoint)
        session2.alter(uri2, checkpoint)
        self.pr('New metadata on secondaery: ' + metac2[uri2])

        # Check that we can see the new data
        cursor2 = session2.open_cursor(uri2)
        newds.check_cursor(cursor2)
예제 #29
0
 def test_rollback_to_stable_with_stable_remove(self):
     nrows = 1000
     # Prepare transactions for column store table is not yet supported.
     if self.prepare and self.key_format == 'r':
         self.skipTest('Prepare transactions for column store table is not yet supported')
     # Create a table without logging.
     uri = "table:rollback_to_stable13"
     ds = SimpleDataSet(
         self, uri, 0, key_format=self.key_format, value_format="S", config='split_pct=50,log=(enabled=false)')
     ds.populate()
     # Pin oldest and stable to timestamp 10.
     self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10) +
         ',stable_timestamp=' + timestamp_str(10))
     value_a = "aaaaa" * 100
     value_b = "bbbbb" * 100
     value_c = "ccccc" * 100
     # Perform several updates.
     self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
     # Perform several updates.
     self.large_updates(uri, value_b, ds, nrows, self.prepare, 30)
     # Perform several removes.
     self.large_removes(uri, ds, nrows, self.prepare, 40)
     # Pin stable to timestamp 50 if prepare otherwise 40.
     if self.prepare:
         self.conn.set_timestamp('stable_timestamp=' + timestamp_str(50))
     else:
         self.conn.set_timestamp('stable_timestamp=' + timestamp_str(40))
     # Perform several updates and checkpoint.
     self.large_updates(uri, value_c, ds, nrows, self.prepare, 60)
     self.session.checkpoint()
     # Verify data is visible and correct.
     self.check(value_a, uri, nrows, 20)
     self.check(None, uri, 0, 40)
     self.check(value_c, uri, nrows, 60)
     self.conn.rollback_to_stable()
     # Perform several updates and checkpoint.
     self.large_updates(uri, value_c, ds, nrows, self.prepare, 60)
     self.session.checkpoint()
     # Simulate a server crash and restart.
     simulate_crash_restart(self, ".", "RESTART")
     # Check that the correct data is seen at and after the stable timestamp.
     self.check(None, uri, 0, 50)
     # Check that we restore the correct value from the history store.
     self.check(value_a, uri, nrows, 20)
     stat_cursor = self.session.open_cursor('statistics:', None, None)
     restored_tombstones = stat_cursor[stat.conn.txn_rts_hs_restore_tombstones][2]
     self.assertEqual(restored_tombstones, nrows)
예제 #30
0
    def test_sharing(self):
        args = 'block_allocation=log-structured'
        self.verbose(3,
            'Test log-structured allocation with config: ' + args + ' count: ' + str(self.nrecs))
        ds = SimpleDataSet(self, self.uri, 10, config=args)
        ds.populate()
        ds.check()
        self.session.checkpoint()
        ds.check()

        # Create a secondary database
        dir2 = os.path.join(self.home, 'SECONDARY')
        os.mkdir(dir2)
        conn2 = self.setUpConnectionOpen(dir2)
        session2 = conn2.open_session()

        # Reference the tree from the secondary:
        metac = self.session.open_cursor('metadata:')
        metac2 = session2.open_cursor('metadata:', None, 'readonly=0')
        uri2 = self.uri[:5] + '../' + self.uri[5:]
        metac2[uri2] = metac[self.uri] + ",readonly=1"

        cursor2 = session2.open_cursor(uri2)
        ds.check_cursor(cursor2)
        cursor2.close()

        newds = SimpleDataSet(self, self.uri, 10000, config=args)
        newds.populate()
        newds.check()
        self.session.checkpoint()
        newds.check()

        # Check we can still read from the last checkpoint
        cursor2 = session2.open_cursor(uri2)
        ds.check_cursor(cursor2)
        cursor2.close()

        # Bump to new checkpoint
        origmeta = metac[self.uri]
        checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:]
        self.pr('Orig checkpoint: ' + checkpoint)
        session2.alter(uri2, checkpoint)
        self.pr('New metadata on secondaery: ' + metac2[uri2])

        # Check that we can see the new data
        cursor2 = session2.open_cursor(uri2)
        newds.check_cursor(cursor2)
예제 #31
0
    def test_bug(self):
        uri = "table:bug026"
        nrows = 1000
        ntxns = 500

        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config='log=(enabled=false)')
        ds.populate()

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100

        # Write some data.
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        for i in range(1, nrows + 1):
            cursor[ds.key(i)] = value_a
        self.session.commit_transaction()
        self.session.checkpoint()

        # Create a bunch of transactions and leave all but one hanging.
        sessions = {}
        cursors = {}
        for i in range(1, ntxns + 1):
            sessions[i] = self.conn.open_session()
            cursors[i] = sessions[i].open_cursor(uri)
            sessions[i].begin_transaction()
            cursors[i][ds.key(i)] = value_b
        self.session.begin_transaction()
        cursor[ds.key(nrows)] = value_c
        self.session.commit_transaction()
        self.session.checkpoint()

        # Should not see value_b.
        self.check(ds, nrows, value_a, value_c)

        # Now crash.
        simulate_crash_restart(self, ".", "RESTART")

        # Should still not see value_b.
        self.check(ds, nrows, value_a, value_c)
예제 #32
0
    def test_checkpoint(self):
        uri = 'table:checkpoint24'
        nrows = 10000

        # Create a table.
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
        else:
            value_a = "aaaaa" * 100

        # Write some data at time 10.
        self.large_updates(uri, ds, nrows, value_a)

        # Reopen the connection (which checkpoints it) so it's all on disk and not in memory.
        self.reopen_conn()

        # Truncate half of it.
        self.do_truncate(ds, nrows // 4 + 1, nrows // 4 + nrows // 2)

        # Check stats to make sure we fast-deleted at least one page.
        # (Except for FLCS, where it's not supported and we should fast-delete zero pages.)
        stat_cursor = self.session.open_cursor('statistics:', None, None)
        fastdelete_pages = stat_cursor[stat.conn.rec_page_delete_fast][2]
        if self.value_format == '8t':
            self.assertEqual(fastdelete_pages, 0)
        else:
            self.assertGreater(fastdelete_pages, 0)

        # Take a checkpoint.
        self.do_checkpoint(self.first_checkpoint)

        if self.do_reopen:
            self.reopen_conn()

        # Read the checkpoint.
        nonzeros = nrows // 2
        zeros = nrows - nonzeros
        self.check(ds, self.first_checkpoint, nonzeros, zeros, value_a)
예제 #33
0
    def test_duplicate_cursor(self):
        uri = self.uri + self.name

        # A simple, one-file file or table object.
        ds = SimpleDataSet(self, uri, self.nentries, \
                key_format=self.keyfmt, value_format=self.valfmt)
        ds.populate()
        self.iterate(uri, ds)
        self.dropUntilSuccess(self.session, uri)

        # A complex, multi-file table object.
        if self.uri == "table:" and self.valfmt != '8t':
            ds = ComplexDataSet(self, uri, self.nentries, \
                    key_format=self.keyfmt, value_format=self.valfmt)
            ds.populate()
            self.iterate(uri, ds)
            self.dropUntilSuccess(self.session, uri)
예제 #34
0
    def test_checkpoint(self):
        uri = 'table:checkpoint12'
        nrows = 1000

        # Create a table.
        ds = SimpleDataSet(
            self, uri, 0, key_format=self.key_format, value_format=self.value_format)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100

        # Pin oldest and stable timestamps to 5.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) +
            ',stable_timestamp=' + self.timestamp_str(5))

        # Write some data at time 10.
        self.large_updates(uri, ds, nrows, value_a, 10)

        # Make a checkpoint.
        self.session.checkpoint()

        # Write some more data at time 20.
        self.large_updates(uri, ds, nrows, value_a, 20)

        # Open the checkpoint.
        ckpt_cursor = self.session.open_cursor(uri, None, 'checkpoint=WiredTigerCheckpoint')
        ckpt_cursor.set_key(ds.key(1))

        # Write some further data, and prepare it at time 30.
        cursor = self.session.open_cursor(uri)
        self.session.begin_transaction()
        for i in range(1, nrows // 2):
            cursor[ds.key(i)] = value_b
        self.session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(30))

        # Now try reading the checkpoint.
        msg = '/Invalid argument/'
        self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
                lambda: self.operate(ckpt_cursor), msg)
예제 #35
0
    def test_checkpoint_snapshot(self):
        self.moresetup()

        ds = SimpleDataSet(self, self.uri, 0, \
                key_format=self.key_format, value_format=self.value_format, \
                config='log=(enabled=false)'+self.extraconfig)
        ds.populate()

        self.large_updates(self.uri, self.valuea, ds, self.nrows, 0)
        self.check(self.valuea, self.uri, self.nrows, 0, False)

        session1 = self.conn.open_session()
        session1.begin_transaction()
        cursor1 = session1.open_cursor(self.uri)

        for i in range(self.nrows+1, (self.nrows*2)+1):
            cursor1.set_key(ds.key(i))
            cursor1.set_value(self.valuea)
            self.assertEqual(cursor1.insert(), 0)

        # Create a checkpoint thread
        done = threading.Event()
        ckpt = checkpoint_thread(self.conn, done)
        try:
            ckpt.start()
            # Sleep for sometime so that checkpoint starts before committing last transaction.
            time.sleep(2)
            session1.commit_transaction()

        finally:
            done.set()
            ckpt.join()

        #Simulate a crash by copying to a new directory(RESTART).
        simulate_crash_restart(self, ".", "RESTART")

        # Check the table contains the last checkpointed value.
        self.check(self.valuea, self.uri, self.nrows, 0, True)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2]
        keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
        stat_cursor.close()

        self.assertGreater(inconsistent_ckpt, 0)
        self.assertGreaterEqual(keys_removed, 0)
예제 #36
0
    def test_wedge(self):
        # Try to really wedge the cache full
        ds = SimpleDataSet(self,
                           self.uri,
                           0,
                           key_format=self.keyfmt,
                           value_format=self.valuefmt,
                           config=self.table_config)
        ds.populate()
        cursor = self.session.open_cursor(self.uri, None)

        run = 0
        start, last_key = -1000, 0
        while last_key - start > 100:
            msg = '/WT_CACHE_FULL.*/'
            start = last_key
            self.assertRaisesHavingMessage(
                wiredtiger.WiredTigerError,
                lambda: self.fill(cursor, ds, start, 10000000), msg)
            cursor.reset()
            sleep(1)

            # Figure out the last key we successfully inserted, and check all
            # previous inserts are still there.
            cursor.prev()
            last_key = int(cursor.get_key())
            run += 1
            self.pr('Finished iteration ' + str(run) + ', last_key = ' +
                    str(last_key))

        self.pr('Checking ' + str(last_key) + ' keys')
        ds = SimpleDataSet(self,
                           self.uri,
                           last_key,
                           key_format=self.keyfmt,
                           value_format=self.valuefmt,
                           config=self.table_config)

        # This test is *much* slower for fixed-length column stores: we fit
        # many more records into the cache, so don't do as many passes through
        # the data.
        checks = 10 if self.valuefmt.endswith('t') else 100
        for run in range(checks):
            ds.check()
            self.pr('Finished check ' + str(run))
            sleep(1)
예제 #37
0
    def test_basic_conn_stats(self):
        # Build an object and force some writes.
        ds = SimpleDataSet(self, self.uri, 1000,
                      config=self.config, key_format=self.keyfmt, value_format = self.valfmt)
        ds.populate()
        self.session.checkpoint(None)

        # See that we can get a specific stat value by its key and verify its
        # entry is self-consistent.
        allstat_cursor = self.session.open_cursor('statistics:', None, None)
        self.check_stats(allstat_cursor, 10, 'block-manager: blocks written')

        values = allstat_cursor[stat.conn.block_write]
        self.assertEqual(values[0], 'block-manager: blocks written')
        val = self.statstr_to_int(values[1])
        self.assertEqual(val, values[2])
        allstat_cursor.close()
예제 #38
0
    def test_checkpoint(self):
        uri = 'table:checkpoint17'
        nrows = 1000

        # Create a table.
        ds = SimpleDataSet(self,
                           uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format,
                           config=self.extraconfig)
        ds.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
            value_c = 99
            value_d = 100
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100
            value_c = "ccccc" * 100
            value_d = "ddddd" * 100

        # Set oldest and stable to 5.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) +
                                ',stable_timestamp=' + self.timestamp_str(5))

        # Write some history and checkpoint it.
        self.large_updates(ds, 1, nrows + 1, value_a, 10)
        self.large_updates(ds, 1, nrows + 1, value_b, 20)
        self.large_updates(ds, 1, nrows + 1, value_c, 30)
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(30))
        self.session.checkpoint()

        # Write some disjoint data that should not generate more history.
        self.large_updates(ds, nrows + 1, 2 * nrows + 1, value_d, 40)

        # Mark this data stable and checkpoint it.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))
        self.do_checkpoint(self.second_checkpoint)

        # Make sure we can still read the history.
        self.check(ds, self.second_checkpoint, nrows, value_a, nrows, 10)
        self.check(ds, self.second_checkpoint, nrows, value_b, nrows, 20)
        self.check(ds, self.second_checkpoint, nrows, value_c, nrows, 30)
예제 #39
0
    def test_checkpoint(self):
        uri1 = 'table:checkpoint16a'
        uri2 = 'table:checkpoint16b'
        nrows = 1000

        # Create two tables.
        ds1 = SimpleDataSet(self,
                            uri1,
                            0,
                            key_format=self.key_format,
                            value_format=self.value_format,
                            config=self.extraconfig)
        ds1.populate()
        ds2 = SimpleDataSet(self,
                            uri2,
                            0,
                            key_format=self.key_format,
                            value_format=self.value_format,
                            config=self.extraconfig)
        ds2.populate()

        if self.value_format == '8t':
            value_a = 97
            value_b = 98
        else:
            value_a = "aaaaa" * 100
            value_b = "bbbbb" * 100

        # Set oldest and stable to 5.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) +
                                ',stable_timestamp=' + self.timestamp_str(5))

        # Write some data to both tables and checkpoint it.
        self.large_updates(ds1, nrows, value_a)
        self.large_updates(ds2, nrows, value_a)
        self.session.checkpoint()

        # Write some more data but only to table 2.
        self.large_updates(ds2, nrows, value_b)

        # Checkpoint this data.
        self.do_checkpoint(self.second_checkpoint)

        # Make sure we can read table 1 from the second checkpoint.
        self.check(ds1, self.second_checkpoint, nrows, value_a)
예제 #40
0
    def test_modify_abort(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()

        # Start a transaction.
        self.session.begin_transaction()

        # Insert a new record.
        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(30))
        c.set_value(ds.value(30))
        self.assertEquals(c.insert(), 0)

        # Test that we can successfully modify our own record.
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), 0)

        # Test that another transaction cannot modify our uncommitted record.
        xs = self.conn.open_session()
        xc = xs.open_cursor(self.uri, None)
        xs.begin_transaction()
        xc.set_key(ds.key(30))
        xc.set_value(ds.value(30))
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        xc.set_key(ds.key(30))
        self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND)
        xs.rollback_transaction()

        # Rollback our transaction.
        self.session.rollback_transaction()

        # Test that we can't modify our aborted insert.
        self.session.begin_transaction()
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
        self.session.rollback_transaction()
예제 #41
0
    def test_checkpoint_hs_reads(self):
        # Create a small table.
        uri = "table:test_hs03"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format=self.key_format, value_format=self.value_format)
        ds.populate()

        if self.value_format == '8t':
            bigvalue = 97
            bigvalue2 = 100
        else:
            bigvalue = b"aaaaa" * 100
            bigvalue2 = b"ddddd" * 100

        # Initially load huge data.
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor[ds.key(nrows + i)] = bigvalue
        cursor.close()
        self.session.checkpoint()

        # Check to see the history store working with old timestamp.
        self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(1))
        hs_writes_start = self.get_stat(stat.conn.cache_write_hs)
        self.large_updates(self.session, uri, bigvalue2, ds, nrows, 10000)

        # If the test sizing is correct, the history will overflow the cache.
        self.session.checkpoint()
        hs_writes = self.get_stat(stat.conn.cache_write_hs) - hs_writes_start
        self.assertGreaterEqual(hs_writes, 0)

        for ts in range(2, 4):
            self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(ts))

            # Now just update one record and checkpoint again.
            self.large_updates(self.session, uri, bigvalue2, ds, nrows, 1)

            hs_reads_start = self.get_stat(stat.conn.cache_hs_read)
            self.session.checkpoint()
            hs_reads = self.get_stat(stat.conn.cache_hs_read) - hs_reads_start

            # Since we're dealing with eviction concurrent with checkpoints
            # and skewing is controlled by a heuristic, we can't put too tight
            # a bound on this.
            self.assertLessEqual(hs_reads, 200)
예제 #42
0
    def test_txn(self):
        nrows = 2000

        # Create a table.
        uri_1 = "table:txn23_1"
        ds_1 = SimpleDataSet(
            self, uri_1, 0, key_format=self.key_format, value_format="S")
        ds_1.populate()

        # Create another table.
        uri_2 = "table:txn23_2"
        ds_2 = SimpleDataSet(
            self, uri_2, 0, key_format=self.key_format, value_format="S")
        ds_2.populate()

        # Pin oldest and stable to timestamp 10.
        self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
            ',stable_timestamp=' + self.timestamp_str(10))

        value_a = "aaaaa" * 100
        value_b = "bbbbb" * 100
        value_c = "ccccc" * 100
        value_d = "ddddd" * 100

        # Perform several updates.
        self.large_updates(uri_1, value_d, ds_1, nrows, 20)
        self.large_updates(uri_1, value_c, ds_1, nrows, 30)
        self.large_updates(uri_1, value_b, ds_1, nrows, 40)
        self.large_updates(uri_1, value_a, ds_1, nrows, 50)

        self.large_updates(uri_2, value_d, ds_2, nrows, 20)
        self.large_updates(uri_2, value_c, ds_2, nrows, 30)
        self.large_updates(uri_2, value_b, ds_2, nrows, 40)
        self.large_updates(uri_2, value_a, ds_2, nrows, 50)

        # Verify data is visible and correct.
        self.check(value_d, uri_1, ds_1, nrows, 20)
        self.check(value_c, uri_1, ds_1, nrows, 30)
        self.check(value_b, uri_1, ds_1, nrows, 40)
        self.check(value_a, uri_1, ds_1, nrows, 50)

        self.check(value_d, uri_2, ds_2, nrows, 20)
        self.check(value_c, uri_2, ds_2, nrows, 30)
        self.check(value_b, uri_2, ds_2, nrows, 40)
        self.check(value_a, uri_2, ds_2, nrows, 50)
예제 #43
0
    def test_rollback_to_stable(self):
        nrows = 1000
        nds = 10

        # Create a few tables and populate them with some initial data.
        #
        # Our way of preventing history store operations from interfering with rollback to stable's
        # transaction check is by draining active evictions from each open dhandle.
        #
        # It's important that we have a few different tables to work with so that it's
        # representative of a real situation. But also don't make the number too high relative to
        # the number of updates or we may not have history for any of the tables.
        ds_list = list()
        for i in range(0, nds):
            uri = 'table:rollback_to_stable22_{}'.format(i)
            ds = SimpleDataSet(self,
                               uri,
                               0,
                               key_format='i',
                               value_format='S',
                               config='log=(enabled=false)')
            ds.populate()
            ds_list.append(ds)
        self.assertEqual(len(ds_list), nds)

        # 100 bytes of data are being inserted into 1000 rows.
        # This happens 1000 iterations.
        # Overall, that's 100MB of data which is guaranteed to kick start eviction.
        for i in range(1, 1000):
            # Generate a value, timestamp and table based off the index.
            value = str(i)[0] * 100
            ts = i * 10
            ds = ds_list[i % nds]

            # Perform updates.
            self.large_updates(ds.uri, value, ds, nrows, False, ts)

            # Every hundred updates, let's run rollback to stable. This is likely to happen during
            # a history store eviction at least once.
            if i % 100 == 0:
                # Put the timestamp backwards so we can rollback the updates we just did.
                stable_ts = (i - 1) * 10
                self.conn.set_timestamp('stable_timestamp=' +
                                        self.timestamp_str(stable_ts))
                self.conn.rollback_to_stable()
예제 #44
0
    def address_deleted(self):
        # Create the object, force it to disk, and verify the object.
        ds = SimpleDataSet(self, self.uri, self.nentries, config=self.config)
        ds.populate()
        self.reopen_conn()
        self.session.verify(self.uri)

        # Create a new session and start a transaction to force the upcoming
        # checkpoint operation to write address-deleted cells to disk.
        tmp_session = self.conn.open_session(None)
        tmp_session.begin_transaction("isolation=snapshot")

        # Truncate a big range of rows; the leaf pages aren't in memory, so
        # leaf page references will be deleted without being read.
        start = self.session.open_cursor(self.uri, None)
        start.set_key(ds.key(10))
        end = self.session.open_cursor(self.uri, None)
        end.set_key(ds.key(self.nentries - 10))
        self.session.truncate(None, start, end, None)
        self.assertEqual(start.close(), 0)
        self.assertEqual(end.close(), 0)

        # Checkpoint, forcing address-deleted cells to be written.
        self.session.checkpoint()

        # Crash/reopen the connection and verify the object.
        self.reopen_conn()
        self.session.verify(self.uri)

        # Open a cursor and update a record (to dirty the tree, else we won't
        # mark pages with address-deleted cells dirty), then walk the tree so
        # we get a good look at all the internal pages and the address-deleted
        # cells.
        cursor = self.session.open_cursor(self.uri, None)
        cursor.set_key(ds.key(5))
        cursor.set_value("changed value")
        self.assertEqual(cursor.update(), 0)
        cursor.reset()
        for key,val in cursor:
            continue
        self.assertEqual(cursor.close(), 0)

        # Checkpoint, freeing the pages.
        self.session.checkpoint()
        return ds
예제 #45
0
    def test_checkpoint_las_reads(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        # Create a small table.
        uri = "table:test_las03"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor[ds.key(nrows + i)] = bigvalue
        cursor.close()
        self.session.checkpoint()

        # Check to see LAS working with old timestamp
        bigvalue2 = "ddddd" * 100
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))
        las_writes_start = self.get_stat(stat.conn.cache_write_lookaside)
        self.large_updates(self.session, uri, bigvalue2, ds, nrows, 10000)

        # If the test sizing is correct, the history will overflow the cache
        self.session.checkpoint()
        las_writes = self.get_stat(
            stat.conn.cache_write_lookaside) - las_writes_start
        self.assertGreaterEqual(las_writes, 0)

        for ts in range(2, 4):
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(ts))

            # Now just update one record and checkpoint again
            self.large_updates(self.session, uri, bigvalue2, ds, nrows, 1)

            las_reads_start = self.get_stat(stat.conn.cache_read_lookaside)
            self.session.checkpoint()
            las_reads = self.get_stat(
                stat.conn.cache_read_lookaside) - las_reads_start

            # Since we're dealing with eviction concurrent with checkpoints
            # and skewing is controlled by a heuristic, we can't put too tight
            # a bound on this.
            self.assertLessEqual(las_reads, 100)
예제 #46
0
    def test_las(self):
        # Create a small table.
        uri = "table:test_las"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S")
        ds.populate()

        # Take a snapshot.
        self.session.snapshot("name=xxx")

        # Insert a large number of records, we'll hang if the lookaside table
        # isn't doing its thing.
        c = self.session.open_cursor(uri)
        bigvalue = "abcde" * 100
        for i in range(1, 1000000):
            c.set_key(ds.key(nrows + i))
            c.set_value(bigvalue)
            self.assertEquals(c.insert(), 0)
예제 #47
0
    def test_modify_delete(self):
        ds = SimpleDataSet(self,
                           self.uri,
                           20,
                           key_format=self.keyfmt,
                           value_format='u')
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        self.assertEquals(c.remove(), 0)

        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)

        c.set_key(ds.key(10))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
예제 #48
0
 def test_smoke(self):
     ds = SimpleDataSet(self,
                        self.uri,
                        self.nentries,
                        config=self.config,
                        key_format=self.keyfmt)
     ds.populate()
     self.reopen_conn()
     c = self.session.open_cursor(self.uri, None)
     c.set_key(ds.key(100))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(100))
     c.set_key(ds.key(101))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(101))
     c.set_key(ds.key(9999))
     self.assertEqual(c.search(), 0)
     self.assertEqual(c.get_value(), ds.value(9999))
예제 #49
0
    def test_modify_delete(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt)
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        self.assertEquals(c.remove(), 0)

        self.session.begin_transaction("isolation=snapshot")
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        mods = self.fix_mods(mods)

        c.set_key(ds.key(10))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
        self.session.commit_transaction()
예제 #50
0
파일: test_las.py 프로젝트: DINKIN/mongo
    def test_las(self):
        # Create a small table.
        uri = "table:test_las"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S")
        ds.populate()

        # Take a snapshot.
        self.session.snapshot("name=xxx")

        # Insert a large number of records, we'll hang if the lookaside table
        # isn't doing its thing.
        c = self.session.open_cursor(uri)
        bigvalue = "abcde" * 100
        for i in range(1, 1000000):
            c.set_key(ds.key(nrows + i))
            c.set_value(bigvalue)
            self.assertEquals(c.insert(), 0)
예제 #51
0
    def test_modify_abort(self):
        ds = SimpleDataSet(self,
                           self.uri,
                           20,
                           key_format=self.keyfmt,
                           value_format='u')
        ds.populate()

        # Start a transaction.
        self.session.begin_transaction()

        # Insert a new record.
        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(30))
        c.set_value(ds.value(30))
        self.assertEquals(c.insert(), 0)

        # Test that we can successfully modify our own record.
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), 0)

        # Test that another transaction cannot modify our uncommitted record.
        xs = self.conn.open_session()
        xc = xs.open_cursor(self.uri, None)
        xc.set_key(ds.key(30))
        xc.set_value(ds.value(30))
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        xc.set_key(ds.key(30))
        self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND)

        # Rollback our transaction.
        self.session.rollback_transaction()

        # Test that we can't modify our aborted insert.
        mods = []
        mod = wiredtiger.Modify('ABCD', 3, 3)
        mods.append(mod)
        c.set_key(ds.key(30))
        self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
예제 #52
0
    def test_prepare(self):
        # Create a small table.
        uri = "table:test"
        nrows = 1000
        ds = SimpleDataSet(self, uri, 0, key_format="S", value_format='u')
        ds.populate()

        value_a = b"aaaaa" * 100
        value_b = b"bbbbb" * 100

        # Commit some updates along with a prepared update, which is not resolved.
        self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(10))
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(10))

        # Initially load huge data
        self.updates(ds, uri, nrows, value_a, 20)
        # Add some more updates
        self.updates(ds, uri, nrows, value_b, 30)

        # Checkpoint
        self.session.checkpoint()

        # Remove the updates from a prepare session and and keep it open.
        session_p = self.conn.open_session()
        cursor_p = session_p.open_cursor(uri)
        session_p.begin_transaction('isolation=snapshot')
        for i in range(1, nrows):
            cursor_p.set_key(ds.key(i))
            self.assertEquals(cursor_p.remove(), 0)
        session_p.prepare_transaction('prepare_timestamp=' + timestamp_str(40))

        self.check(ds, uri, nrows, value_a, 20)
        self.check(ds, uri, nrows, value_b, 50)

        #rollback the prepared session
        session_p.rollback_transaction()

        self.check(ds, uri, nrows, value_a, 20)
        self.check(ds, uri, nrows, value_b, 50)

        # close sessions.
        cursor_p.close()
        session_p.close()
        self.session.close()
예제 #53
0
파일: test_lsm03.py 프로젝트: DINKIN/mongo
    def test_lsm_drop_active(self):
        uri = 'lsm:' + self.name
        ds = SimpleDataSet(self, uri, 10000, config=self.config)
        ds.populate()

        # Force to disk
        self.reopen_conn()

        # An open cursors should cause failure.
        cursor = self.session.open_cursor(uri, None, None)
        self.assertRaises(wiredtiger.WiredTigerError,
            lambda: self.session.drop(uri, None))
        cursor.close()

        # Add enough records that a merge should be running
        ds = SimpleDataSet(self, uri, 50000, config=self.config)
        ds.populate()
        # The drop should succeed even when LSM work units are active
        self.session.drop(uri)
예제 #54
0
    def test_checkpoint_las_reads(self):
        if not wiredtiger.timestamp_build():
            self.skipTest('requires a timestamp build')

        # Create a small table.
        uri = "table:test_las03"
        nrows = 100
        ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u')
        ds.populate()
        bigvalue = "aaaaa" * 100

        # Initially load huge data
        cursor = self.session.open_cursor(uri)
        for i in range(1, 10000):
            cursor[ds.key(nrows + i)] = bigvalue
        cursor.close()
        self.session.checkpoint()

        # Check to see LAS working with old timestamp
        bigvalue2 = "ddddd" * 100
        self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1))
        las_writes_start = self.get_stat(stat.conn.cache_write_lookaside)
        self.large_updates(self.session, uri, bigvalue2, ds, nrows, 10000)

        # If the test sizing is correct, the history will overflow the cache
        self.session.checkpoint()
        las_writes = self.get_stat(stat.conn.cache_write_lookaside) - las_writes_start
        self.assertGreaterEqual(las_writes, 0)

        for ts in range(2, 4):
            self.conn.set_timestamp('stable_timestamp=' + timestamp_str(ts))

            # Now just update one record and checkpoint again
            self.large_updates(self.session, uri, bigvalue2, ds, nrows, 1)

            las_reads_start = self.get_stat(stat.conn.cache_read_lookaside)
            self.session.checkpoint()
            las_reads = self.get_stat(stat.conn.cache_read_lookaside) - las_reads_start

            # Since we're dealing with eviction concurrent with checkpoints
            # and skewing is controlled by a heuristic, we can't put too tight
            # a bound on this.
            self.assertLessEqual(las_reads, 100)
예제 #55
0
    def test_cursor_random_reasonable_distribution(self):
        uri = self.type
        num_entries = self.records
        if uri == 'table:random':
            config = 'leaf_page_max=100MB'
        else:
            config = ''

        # Set the leaf-page-max value, otherwise the page might split.
        ds = SimpleDataSet(self, uri, num_entries, config=config)
        ds.populate()
        # Setup an array to track which keys are seen
        visitedKeys = [0] * (num_entries + 1)
        # Setup a counter to see when we find a sequential key
        sequentialKeys = 0

        cursor = self.session.open_cursor(uri, None, 'next_random=true')
        lastKey = None
        for i in range(0, num_entries):
            self.assertEqual(cursor.next(), 0)
            current = cursor.get_key()
            current = int(current)
            visitedKeys[current] = visitedKeys[current] + 1
            if lastKey != None:
                if current == (lastKey + 1):
                    sequentialKeys += 1
            lastKey = current

        differentKeys = sum(x > 0 for x in visitedKeys)

        #print visitedKeys
        #print differentKeys
        '''
        self.tty('differentKeys: ' + str(differentKeys) + ' of ' + \
            str(num_entries) + ', ' + \
            str((int)((differentKeys * 100) / num_entries)) + '%')
        '''
        # Can't test for non-sequential data when there is 1 item in the table
        if num_entries > 1:
            self.assertGreater(num_entries - 1, sequentialKeys,
                'cursor is returning sequential data')
        self.assertGreater(differentKeys, num_entries / 4,
            'next_random random distribution not adequate')
예제 #56
0
    def test_modify_many(self):
        ds = SimpleDataSet(self,
            self.uri, 20, key_format=self.keyfmt, value_format='u')
        ds.populate()

        c = self.session.open_cursor(self.uri, None)
        c.set_key(ds.key(10))
        orig = 'abcdefghijklmnopqrstuvwxyz'
        c.set_value(orig)
        self.assertEquals(c.update(), 0)
        for i in range(0, 50000):
            new = "".join([random.choice(string.digits) for i in xrange(5)])
            orig = orig[:10] + new + orig[15:]
            mods = []
            mod = wiredtiger.Modify(new, 10, 5)
            mods.append(mod)
            self.assertEquals(c.modify(mods), 0)

        c.set_key(ds.key(10))
        self.assertEquals(c.search(), 0)
        self.assertEquals(c.get_value(), orig)
예제 #57
0
    def test_checkpoint_target(self):
        # Create 3 objects, change one record to an easily recognizable string.
        uri = self.uri + '1'
        ds1 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds1.populate()
        self.update(uri, ds1, 'ORIGINAL')

        uri = self.uri + '2'
        ds2 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds2.populate()
        self.update(uri, ds2, 'ORIGINAL')

        uri = self.uri + '3'
        ds3 = SimpleDataSet(self, uri, 100, key_format=self.fmt)
        ds3.populate()
        self.update(uri, ds3, 'ORIGINAL')

        # Checkpoint all three objects.
        self.session.checkpoint("name=checkpoint-1")

        # Update all 3 objects, then checkpoint two of the objects with the
        # same checkpoint name.
        self.update(self.uri + '1', ds1, 'UPDATE')
        self.update(self.uri + '2', ds2, 'UPDATE')
        self.update(self.uri + '3', ds3, 'UPDATE')
        target = 'target=("' + self.uri + '1"' + ',"' + self.uri + '2")'
        self.session.checkpoint("name=checkpoint-1," + target)

        # Confirm the checkpoint has the old value in objects that weren't
        # checkpointed, and the new value in objects that were checkpointed.
        self.check(self.uri + '1', ds1, 'UPDATE')
        self.check(self.uri + '2', ds2, 'UPDATE')
        self.check(self.uri + '3', ds3, 'ORIGINAL')
예제 #58
0
파일: test_bug008.py 프로젝트: DINKIN/mongo
    def test_search_duplicate(self):
        if self.colvar == 0:
                return

        # Populate the tree.
        ds = SimpleDataSet(self, self.uri, 105, key_format=self.key_format,
                           value_format=self.value_format)
        ds.populate()

        # Set up deleted records before and after a set of duplicate records,
        # and make sure search/search-near returns the correct record.
        cursor = self.session.open_cursor(self.uri, None)
        for i in range(20, 100):
            cursor[ds.key(i)] = '=== IDENTICAL VALUE ==='
        for i in range(15, 25):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)
        for i in range(95, 106):
            cursor.set_key(ds.key(i))
            self.assertEqual(cursor.remove(), 0)
        cursor.close()

        # Reopen the connection, forcing it to disk and moving the records to
        # an on-page format.
        self.reopen_conn()

        # Open a cursor.
        cursor = self.session.open_cursor(self.uri, None)

        # Search-near for a record in the deleted set before the duplicate set,
        # which should succeed, returning the first record in the duplicate set.
        cursor.set_key(ds.key(18))
        self.assertEqual(cursor.search_near(), 1)
        self.assertEqual(cursor.get_key(), ds.key(25))

        # Search-near for a record in the deleted set after the duplicate set,
        # which should succeed, returning the last record in the duplicate set.
        cursor.set_key(ds.key(98))
        self.assertEqual(cursor.search_near(), -1)
        self.assertEqual(cursor.get_key(), ds.key(94))