Ejemplo n.º 1
0
class test_lsm01(wttest.WiredTigerTestCase):
    K = 1024
    M = 1024 * K
    G = 1024 * M
    uri = "lsm:test_lsm01"

    chunk_size_scenarios = wtscenario.quick_scenarios('s_chunk_size',
                                                      [1 * M, 20 * M, None],
                                                      [0.6, 0.6, 0.6])
    merge_max_scenarios = wtscenario.quick_scenarios('s_merge_max',
                                                     [2, 10, 20, None], None)
    bloom_scenarios = wtscenario.quick_scenarios('s_bloom',
                                                 [True, False, None], None)
    bloom_bit_scenarios = wtscenario.quick_scenarios('s_bloom_bit_count',
                                                     [2, 8, 20, None], None)
    bloom_hash_scenarios = wtscenario.quick_scenarios('s_bloom_hash_count',
                                                      [2, 10, 20, None], None)
    # Occasionally add a lot of records, so that merges (and bloom) happen.
    record_count_scenarios = wtscenario.quick_scenarios(
        'nrecs', [10, 10000], [0.9, 0.1])

    config_vars = [
        'chunk_size', 'merge_max', 'bloom', 'bloom_bit_count',
        'bloom_hash_count'
    ]

    all_scenarios = wtscenario.multiply_scenarios(
        '_', chunk_size_scenarios, merge_max_scenarios, bloom_scenarios,
        bloom_bit_scenarios, bloom_hash_scenarios, record_count_scenarios)

    scenarios = wtscenario.prune_scenarios(all_scenarios, 500)
    scenarios = wtscenario.number_scenarios(scenarios)

    # Test drop of an object.
    def test_lsm(self):
        args = 'key_format=S'
        args += ',lsm=('  # Start the LSM configuration options.
        # add names to args, e.g. args += ',session_max=30'
        for var in self.config_vars:
            value = getattr(self, 's_' + var)
            if value != None:
                if var == 'verbose':
                    value = '[' + str(value) + ']'
                if value == True:
                    value = 'true'
                if value == False:
                    value = 'false'
                args += ',' + var + '=' + str(value)
        args += ')'  # Close the LSM configuration option group
        self.verbose(
            3, 'Test LSM with config: ' + args + ' count: ' + str(self.nrecs))
        simple_populate(self, self.uri, args, self.nrecs)
Ejemplo n.º 2
0
class test_config03(test_base03.test_base03):
    K = 1024
    M = 1024 * K
    G = 1024 * M

    cache_size_scenarios = wtscenario.quick_scenarios(
        's_cache_size', [1 * M, 20 * M, 100 * M, 1 * G, None],
        [0.6, 0.6, 0.6, 0.6, 0.6])
    create_scenarios = wtscenario.quick_scenarios('s_create',
                                                  [True, False, None],
                                                  [1.0, 0.2, 0.3])
    error_prefix_scenarios = wtscenario.quick_scenarios(
        's_error_prefix', [None, "errpfx:"], [1.0, 0.2])
    # eviction_target < eviction_trigger -- checked later
    eviction_target_scenarios = wtscenario.quick_scenarios(
        's_eviction_target', [10, 40, 85, 98], None)
    eviction_trigger_scenarios = wtscenario.quick_scenarios(
        's_eviction_trigger', [50, 90, 95, 99], None)
    hazard_max_scenarios = wtscenario.quick_scenarios('s_hazard_max',
                                                      [15, 50, 500],
                                                      [0.4, 0.8, 0.8])
    multiprocess_scenarios = wtscenario.quick_scenarios(
        's_multiprocess', [True, False], [1.0, 1.0])
    session_max_scenarios = wtscenario.quick_scenarios('s_session_max',
                                                       [3, 30, 300], None)
    transactional_scenarios = wtscenario.quick_scenarios(
        's_transactional', [True, False], [0.2, 1.0])

    # Note: we are not using any truly verbose scenarios until we have
    # a way to redirect verbose output to a file in Python.
    #
    #verbose_scenarios = wtscenario.quick_scenarios('s_verbose',
    #    ['block', 'evict,evictserver', 'fileops,hazard,mutex',
    #     'read,readserver,reconcile,salvage','verify,write',''], None)
    verbose_scenarios = wtscenario.quick_scenarios('s_verbose', [None], None)

    config_vars = [
        'cache_size', 'create', 'error_prefix', 'eviction_target',
        'eviction_trigger', 'hazard_max', 'multiprocess', 'session_max',
        'verbose'
    ]

    all_scenarios = wtscenario.multiply_scenarios(
        '_', cache_size_scenarios, create_scenarios, error_prefix_scenarios,
        eviction_target_scenarios, eviction_trigger_scenarios,
        hazard_max_scenarios, multiprocess_scenarios, session_max_scenarios,
        transactional_scenarios, verbose_scenarios)

    scenarios = wtscenario.prune_scenarios(all_scenarios, 1000)
    scenarios = wtscenario.number_scenarios(scenarios)

    #wttest.WiredTigerTestCase.printVerbose(2, 'test_config03: running ' + \
    #                      str(len(scenarios)) + ' of ' + \
    #                      str(len(all_scenarios)) + ' possible scenarios')

    def setUpConnectionOpen(self, dir):
        args = ''
        # add names to args, e.g. args += ',session_max=30'
        for var in self.config_vars:
            value = getattr(self, 's_' + var)
            if value != None:
                if var == 'verbose':
                    value = '[' + str(value) + ']'
                if value == True:
                    value = 'true'
                if value == False:
                    value = 'false'
                args += ',' + var + '=' + str(value)
        args += ','
        self.pr('wiredtiger_open with args: ' + args)

        expect_fail = False
        successargs = args
        if self.s_create == False:
            successargs = successargs.replace(',create=false,', ',create,')
            expect_fail = True
            fail_msg = '/(No such file or directory|The system cannot find the file specified)/'
        elif self.s_create == None:
            successargs = successargs + 'create=true,'
            expect_fail = True
            fail_msg = '/(No such file or directory|The system cannot find the file specified)/'

        if self.s_eviction_target >= self.s_eviction_trigger:
            # construct args that guarantee that target < trigger
            # we know that trigger >= 1
            repfrom = ',eviction_target=' + str(self.s_eviction_target)
            repto = ',eviction_target=' + str(self.s_eviction_trigger - 1)
            successargs = successargs.replace(repfrom, repto)
            if not expect_fail:
                expect_fail = True
                fail_msg = \
                    '/eviction target must be lower than the eviction trigger/'

        if expect_fail:
            self.verbose(3, 'wiredtiger_open (should fail) with args: ' + args)
            self.assertRaisesWithMessage(
                wiredtiger.WiredTigerError,
                lambda: wiredtiger.wiredtiger_open(dir, args), fail_msg)
            args = successargs

        self.verbose(3, 'wiredtiger_open with args: ' + args)
        conn = wiredtiger.wiredtiger_open(dir, args)
        self.pr( ` conn `)
        return conn
Ejemplo n.º 3
0
class test_txn02(wttest.WiredTigerTestCase, suite_subprocess):
    logmax = "100K"
    tablename = 'test_txn02'
    uri = 'table:' + tablename
    archive_list = ['true', 'false']
    conn_list = ['reopen', 'stay_open']
    sync_list = [
        '(method=dsync,enabled)', '(method=fsync,enabled)',
        '(method=none,enabled)', '(enabled=false)'
    ]

    types = [
        ('row',
         dict(tabletype='row', create_params='key_format=i,value_format=i')),
        ('var',
         dict(tabletype='var', create_params='key_format=r,value_format=i')),
        ('fix',
         dict(tabletype='fix', create_params='key_format=r,value_format=8t')),
    ]
    op1s = [
        ('i4', dict(op1=('insert', 4))),
        ('r1', dict(op1=('remove', 1))),
        ('u10', dict(op1=('update', 10))),
    ]
    op2s = [
        ('i6', dict(op2=('insert', 6))),
        ('r4', dict(op2=('remove', 4))),
        ('u4', dict(op2=('update', 4))),
    ]
    op3s = [
        ('i12', dict(op3=('insert', 12))),
        ('r4', dict(op3=('remove', 4))),
        ('u4', dict(op3=('update', 4))),
    ]
    op4s = [
        ('i14', dict(op4=('insert', 14))),
        ('r12', dict(op4=('remove', 12))),
        ('u12', dict(op4=('update', 12))),
    ]
    txn1s = [('t1c', dict(txn1='commit')), ('t1r', dict(txn1='rollback'))]
    txn2s = [('t2c', dict(txn2='commit')), ('t2r', dict(txn2='rollback'))]
    txn3s = [('t3c', dict(txn3='commit')), ('t3r', dict(txn3='rollback'))]
    txn4s = [('t4c', dict(txn4='commit')), ('t4r', dict(txn4='rollback'))]

    all_scenarios = multiply_scenarios('.', types, op1s, txn1s, op2s, txn2s,
                                       op3s, txn3s, op4s, txn4s)

    # This test generates thousands of potential scenarios.
    # For default runs, we'll use a small subset of them, for
    # long runs (when --long is set) we'll set a much larger limit.
    scenarios = number_scenarios(prune_scenarios(all_scenarios, 20, 5000))

    # Each check_log() call takes a second, so we don't call it for
    # every scenario, we'll limit it to the value of checklog_calls.
    checklog_calls = 100 if wttest.islongtest() else 2
    checklog_mod = (len(scenarios) / checklog_calls + 1)

    # scenarios = number_scenarios(multiply_scenarios('.', types,
    # op1s, txn1s, op2s, txn2s, op3s, txn3s, op4s, txn4s)) [:3]
    # Overrides WiredTigerTestCase
    def setUpConnectionOpen(self, dir):
        self.home = dir
        # Cycle through the different transaction_sync values in a
        # deterministic manner.
        self.txn_sync = self.sync_list[self.scenario_number %
                                       len(self.sync_list)]
        #
        # We don't want to run zero fill with only the same settings, such
        # as archive or sync, which are an even number of options.
        #
        freq = 3
        zerofill = 'false'
        if self.scenario_number % freq == 0:
            zerofill = 'true'
        self.backup_dir = os.path.join(self.home, "WT_BACKUP")
        conn_params = \
                'log=(archive=false,enabled,file_max=%s),' % self.logmax + \
                'log=(zero_fill=%s),' % zerofill + \
                'create,error_prefix="%s: ",' % self.shortid() + \
                'transaction_sync="%s",' % self.txn_sync
        # print "Creating conn at '%s' with config '%s'" % (dir, conn_params)
        conn = wiredtiger_open(dir, conn_params)
        self.pr( ` conn `)
        self.session2 = conn.open_session()
        return conn

    # Check that a cursor (optionally started in a new transaction), sees the
    # expected values.
    def check(self, session, txn_config, expected):
        if txn_config:
            session.begin_transaction(txn_config)
        c = session.open_cursor(self.uri, None)
        actual = dict((k, v) for k, v in c if v != 0)
        # Search for the expected items as well as iterating
        for k, v in expected.iteritems():
            self.assertEqual(c[k], v)
        c.close()
        if txn_config:
            session.commit_transaction()
        self.assertEqual(actual, expected)

    # Check the state of the system with respect to the current cursor and
    # different isolation levels.
    def check_all(self, current, committed):
        # Transactions see their own changes.
        # Read-uncommitted transactions see all changes.
        # Snapshot and read-committed transactions should not see changes.
        self.check(self.session, None, current)
        self.check(self.session2, "isolation=snapshot", committed)
        self.check(self.session2, "isolation=read-committed", committed)
        self.check(self.session2, "isolation=read-uncommitted", current)

        # Opening a clone of the database home directory should run
        # recovery and see the committed results.
        self.backup(self.backup_dir)
        backup_conn_params = 'log=(enabled,file_max=%s)' % self.logmax
        backup_conn = wiredtiger_open(self.backup_dir, backup_conn_params)
        try:
            self.check(backup_conn.open_session(), None, committed)
        finally:
            backup_conn.close()

    def check_log(self, committed):
        self.backup(self.backup_dir)
        #
        # Open and close the backup connection a few times to force
        # repeated recovery and log archiving even if later recoveries
        # are essentially no-ops. Confirm that the backup contains
        # the committed operations after recovery.
        #
        # Cycle through the different archive values in a
        # deterministic manner.
        self.archive = self.archive_list[self.scenario_number %
                                         len(self.archive_list)]
        backup_conn_params = \
            'log=(enabled,file_max=%s,archive=%s)' % (self.logmax, self.archive)
        orig_logs = fnmatch.filter(os.listdir(self.backup_dir), "*Log*")
        endcount = 2
        count = 0
        while count < endcount:
            backup_conn = wiredtiger_open(self.backup_dir, backup_conn_params)
            try:
                self.check(backup_conn.open_session(), None, committed)
            finally:
                # Sleep long enough so that the archive thread is guaranteed
                # to run before we close the connection.
                time.sleep(1.0)
                backup_conn.close()
            count += 1
        #
        # Check logs after repeated openings. The first log should
        # have been archived if configured. Subsequent openings would not
        # archive because no checkpoint is written due to no modifications.
        #
        cur_logs = fnmatch.filter(os.listdir(self.backup_dir), "*Log*")
        for o in orig_logs:
            if self.archive == 'true':
                self.assertEqual(False, o in cur_logs)
            else:
                self.assertEqual(True, o in cur_logs)
        #
        # Run printlog and make sure it exits with zero status.
        # Printlog should not run recovery nor advance the logs.  Make sure
        # it does not.
        #
        self.runWt(['-h', self.backup_dir, 'printlog'],
                   outfilename='printlog.out')
        pr_logs = fnmatch.filter(os.listdir(self.backup_dir), "*Log*")
        self.assertEqual(cur_logs, pr_logs)

    def test_ops(self):
        # print "Creating %s with config '%s'" % (self.uri, self.create_params)
        self.session.create(self.uri, self.create_params)
        # Set up the table with entries for 1, 2, 10 and 11.
        # We use the overwrite config so insert can update as needed.
        c = self.session.open_cursor(self.uri, None, 'overwrite')
        c[1] = c[2] = c[10] = c[11] = 1
        current = {1: 1, 2: 1, 10: 1, 11: 1}
        committed = current.copy()

        reopen = self.conn_list[self.scenario_number % len(self.conn_list)]
        ops = (self.op1, self.op2, self.op3, self.op4)
        txns = (self.txn1, self.txn2, self.txn3, self.txn4)
        # for ok, txn in zip(ops, txns):
        # print ', '.join('%s(%d)[%s]' % (ok[0], ok[1], txn)
        for i, ot in enumerate(zip(ops, txns)):
            ok, txn = ot
            op, k = ok

            # Close and reopen the connection and cursor.
            if reopen == 'reopen':
                self.reopen_conn()
                c = self.session.open_cursor(self.uri, None, 'overwrite')

            self.session.begin_transaction(
                (self.scenario_number % 2) and 'sync' or None)
            # Test multiple operations per transaction by always
            # doing the same operation on key k + 1.
            k1 = k + 1
            # print '%d: %s(%d)[%s]' % (i, ok[0], ok[1], txn)
            if op == 'insert' or op == 'update':
                c[k] = c[k1] = i + 2
                current[k] = current[k1] = i + 2
            elif op == 'remove':
                c.set_key(k)
                c.remove()
                c.set_key(k1)
                c.remove()
                if k in current:
                    del current[k]
                if k1 in current:
                    del current[k1]

            # print current
            # Check the state after each operation.
            self.check_all(current, committed)

            if txn == 'commit':
                committed = current.copy()
                self.session.commit_transaction()
            elif txn == 'rollback':
                current = committed.copy()
                self.session.rollback_transaction()

            # Check the state after each commit/rollback.
            self.check_all(current, committed)

        # check_log() is slow, we don't run it on every scenario.
        if self.scenario_number % test_txn02.checklog_mod == 0:
            self.check_log(committed)
Ejemplo n.º 4
0
class test_txn09(wttest.WiredTigerTestCase, suite_subprocess):
    tablename = 'test_txn09'
    uri = 'table:' + tablename
    log_enabled = True

    types = [
        ('row',
         dict(tabletype='row', create_params='key_format=i,value_format=i')),
        ('var',
         dict(tabletype='var', create_params='key_format=r,value_format=i')),
        ('fix',
         dict(tabletype='fix', create_params='key_format=r,value_format=8t')),
    ]
    op1s = [
        ('i4', dict(op1=('insert', 4))),
        ('r1', dict(op1=('remove', 1))),
        ('u10', dict(op1=('update', 10))),
    ]
    op2s = [
        ('i6', dict(op2=('insert', 6))),
        ('r4', dict(op2=('remove', 4))),
        ('u4', dict(op2=('update', 4))),
    ]
    op3s = [
        ('i12', dict(op3=('insert', 12))),
        ('r4', dict(op3=('remove', 4))),
        ('u4', dict(op3=('update', 4))),
    ]
    op4s = [
        ('i14', dict(op4=('insert', 14))),
        ('r12', dict(op4=('remove', 12))),
        ('u12', dict(op4=('update', 12))),
    ]
    txn1s = [('t1c', dict(txn1='commit')), ('t1r', dict(txn1='rollback'))]
    txn2s = [('t2c', dict(txn2='commit')), ('t2r', dict(txn2='rollback'))]
    txn3s = [('t3c', dict(txn3='commit')), ('t3r', dict(txn3='rollback'))]
    txn4s = [('t4c', dict(txn4='commit')), ('t4r', dict(txn4='rollback'))]

    all_scenarios = multiply_scenarios('.', types, op1s, txn1s, op2s, txn2s,
                                       op3s, txn3s, op4s, txn4s)

    # This test generates thousands of potential scenarios.
    # For default runs, we'll use a small subset of them, for
    # long runs (when --long is set) we'll set a much larger limit.
    scenarios = number_scenarios(prune_scenarios(all_scenarios, 20, 5000))

    # Overrides WiredTigerTestCase
    def setUpConnectionOpen(self, dir):
        self.home = dir
        conn_params = \
                'create,error_prefix="%s: ",' % self.shortid() + \
                'log=(archive=false,enabled=%s),' % int(self.log_enabled) + \
                'transaction_sync=(enabled=false),'

        # print "Opening conn at '%s' with config '%s'" % (dir, conn_params)
        conn = wiredtiger_open(dir, conn_params)
        self.pr( ` conn `)
        self.session2 = conn.open_session()
        return conn

    # Check that a cursor (optionally started in a new transaction), sees the
    # expected values.
    def check(self, session, txn_config, expected):
        if txn_config:
            session.begin_transaction(txn_config)
        c = session.open_cursor(self.uri, None)
        actual = dict((k, v) for k, v in c if v != 0)
        # Search for the expected items as well as iterating
        for k, v in expected.iteritems():
            self.assertEqual(c[k], v)
        c.close()
        if txn_config:
            session.commit_transaction()
        self.assertEqual(actual, expected)

    # Check the state of the system with respect to the current cursor and
    # different isolation levels.
    def check_all(self, current, committed):
        # Transactions see their own changes.
        # Read-uncommitted transactions see all changes.
        # Snapshot and read-committed transactions should not see changes.
        self.check(self.session, None, current)
        self.check(self.session2, "isolation=snapshot", committed)
        self.check(self.session2, "isolation=read-committed", committed)
        self.check(self.session2, "isolation=read-uncommitted", current)

    def test_ops(self):
        # print "Creating %s with config '%s'" % (self.uri, self.create_params)
        self.session.create(self.uri, self.create_params)
        # Set up the table with entries for 1, 2, 10 and 11.
        # We use the overwrite config so insert can update as needed.
        c = self.session.open_cursor(self.uri, None, 'overwrite')
        c[1] = c[2] = c[10] = c[11] = 1
        current = {1: 1, 2: 1, 10: 1, 11: 1}
        committed = current.copy()

        ops = (self.op1, self.op2, self.op3, self.op4)
        txns = (self.txn1, self.txn2, self.txn3, self.txn4)
        # for ok, txn in zip(ops, txns):
        # print ', '.join('%s(%d)[%s]' % (ok[0], ok[1], txn)
        for i, ot in enumerate(zip(ops, txns)):
            ok, txn = ot
            op, k = ok

            # Close and reopen the connection and cursor, toggling the log
            self.log_enabled = not self.log_enabled
            self.reopen_conn()
            c = self.session.open_cursor(self.uri, None, 'overwrite')

            self.session.begin_transaction(
                (self.scenario_number % 2) and 'sync' or None)
            # Test multiple operations per transaction by always
            # doing the same operation on key k + 1.
            k1 = k + 1
            # print '%d: %s(%d)[%s]' % (i, ok[0], ok[1], txn)
            if op == 'insert' or op == 'update':
                c[k] = c[k1] = i + 2
                current[k] = current[k1] = i + 2
            elif op == 'remove':
                del c[k]
                del c[k1]
                if k in current:
                    del current[k]
                if k1 in current:
                    del current[k1]

            # print current
            # Check the state after each operation.
            self.check_all(current, committed)

            if txn == 'commit':
                committed = current.copy()
                self.session.commit_transaction()
            elif txn == 'rollback':
                current = committed.copy()
                self.session.rollback_transaction()

            # Check the state after each commit/rollback.
            self.check_all(current, committed)
Ejemplo n.º 5
0
class test_schema03(wttest.WiredTigerTestCase):
    """
    Test schemas - a 'predictably random' assortment of columns,
    column groups and indices are created within tables, and are
    created in various orders as much as the API allows.  On some runs
    the connection will be closed and reopened at a particular point
    to test that the schemas (and data) are saved and read correctly.

    The test is run multiple times, using scenarios.
    The test always follows these steps:
    - table:      create tables
    - colgroup0:  create (some) colgroups
    - index0:     create (some) indices
    - colgroup1:  create (more) colgroups
    - index1:     create (more) indices
    - populate0:  populate 1st time
    - index2:     create (more) indices
    - populate1:  populate 2nd time (more key/values)
    - check:      check key/values

    The variations represented by scenarios are:
    - how many tables to create
    - how many colgroups to create at each step (may be 0)
    - how many indices to create at each step (may be 0)
    - between each step, whether to close/reopen the connection
    """

    ################################################################
    # These three variables can be altered to help generate
    # and pare down failing test cases.

    # Set to true to get python test program fragment on stdout,
    # used by show_python() below.
    SHOW_PYTHON = False

    # When SHOW_PYTHON is set, we print an enormous amount of output.
    # To only print for a given scenario, set this
    SHOW_PYTHON_ONLY_SCEN = None  # could be e.g. [2] or [0,1]

    # To print verbosely for only a given table, set this
    SHOW_PYTHON_ONLY_TABLE = None # could be e.g. [2] or [0,1]

    ################################################################

    # Set whenever we are working with a table
    current_table = None

    nentries = 50

    # We need to have a large number of open files available
    # to run this test.  We probably don't need quite this many,
    # but boost it up to this limit anyway.
    OPEN_FILE_LIMIT = 1000

    restart_scenarios = [('table', dict(s_restart=['table'],P=0.3)),
                         ('colgroup0', dict(s_restart=['colgroup0'],P=0.3)),
                         ('index0', dict(s_restart=['index0'],P=0.3)),
                         ('colgroup1', dict(s_restart=['colgroup1'],P=0.3)),
                         ('index1', dict(s_restart=['index1'],P=0.3)),
                         ('populate0', dict(s_restart=['populate0'],P=0.3)),
                         ('index2', dict(s_restart=['index2'],P=0.3)),
                         ('populate1', dict(s_restart=['populate1'],P=0.3)),
                         ('ipop', dict(s_restart=['index0','populate0'],P=0.3)),
                         ('all', dict(s_restart=['table','colgroup0','index0','colgroup1','index1','populate0','index2','populate1'],P=1.0))]

    ntable_scenarios = wtscenario.quick_scenarios('s_ntable',
        [1,2,5,8], [1.0,0.4,0.5,0.5])
    ncolgroup_scenarios = wtscenario.quick_scenarios('s_colgroup',
        [[1,0],[0,1],[2,4],[8,5]], [1.0,0.2,0.3,1.0])
    nindex_scenarios = wtscenario.quick_scenarios('s_index',
        [[1,1,1],[3,2,1],[5,1,3]], [1.0,0.5,1.0])
    idx_args_scenarios = wtscenario.quick_scenarios('s_index_args',
        ['', ',type=file', ',type=lsm'], [0.5, 0.3, 0.2])
    table_args_scenarios = wtscenario.quick_scenarios('s_extra_table_args',
        ['', ',type=file', ',type=lsm'], [0.5, 0.3, 0.2])

    all_scenarios = wtscenario.multiply_scenarios('_', restart_scenarios, ntable_scenarios, ncolgroup_scenarios, nindex_scenarios, idx_args_scenarios, table_args_scenarios)

    # Prune the scenarios according to the probabilities given above.
    scenarios = wtscenario.prune_scenarios(all_scenarios, 30)
    scenarios = wtscenario.number_scenarios(scenarios)

    # Note: the set can be reduced here for debugging, e.g.
    # scenarios = scenarios[40:44]
    #   or
    # scenarios = [ scenarios[0], scenarios[30], scenarios[40] ]

    #wttest.WiredTigerTestCase.printVerbose(2, 'test_schema03: running ' + \
    #                      str(len(scenarios)) + ' of ' + \
    #                      str(len(all_scenarios)) + ' possible scenarios')

    # This test requires a large number of open files.
    # Increase our resource limits before we start
    def setUp(self):
        if os.name == "nt":
            self.skipTest('Unix specific test skipped on Windows')

        self.origFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
        newlimit = (self.OPEN_FILE_LIMIT, self.origFileLimit[1])
        if newlimit[0] > newlimit[1]:
            self.skipTest('Require %d open files, only %d available' % newlimit)
        resource.setrlimit(resource.RLIMIT_NOFILE, newlimit)
        super(test_schema03, self).setUp()

    def setUpConnectionOpen(self, dir):
        conn = wiredtiger.wiredtiger_open(dir,
            'create,cache_size=100m,session_max=1000')
        self.pr(`conn`)
        return conn

    def tearDown(self):
        super(test_schema03, self).tearDown()
        resource.setrlimit(resource.RLIMIT_NOFILE, self.origFileLimit)

    def gen_formats(self, rand, n, iskey):
        result = ''
        for i in range(0, n):
            if rand.rand_range(0, 2) == 0:
                result += 'S'
            else:
                result += 'i'
        return result

    def show_python(self, s):
        if self.SHOW_PYTHON:
            if self.SHOW_PYTHON_ONLY_TABLE == None or self.current_table in self.SHOW_PYTHON_ONLY_TABLE:
                if self.SHOW_PYTHON_ONLY_SCEN == None or self.scenario_number in self.SHOW_PYTHON_ONLY_SCEN:
                    print '        ' + s

    def join_names(self, sep, prefix, list):
        return sep.join([prefix + str(val) for val in list])

    def create(self, what, tablename, whatname, columnlist, extra_args=''):
        createarg = what + ":" + tablename + ":" + whatname
        colarg = self.join_names(',', 'c', columnlist)
        self.show_python("self.session.create('" + createarg + "', 'columns=(" + colarg + ")" + extra_args + "')")
        result = self.session.create(createarg,
                "columns=(" + colarg + ")" + extra_args)
        self.assertEqual(result, 0)

    def finished_step(self, name):
        if self.s_restart == name:
            print "  # Reopening connection at step: " + name
            self.reopen_conn()

    def test_schema(self):
        rand = suite_random.suite_random()
        if self.SHOW_PYTHON:
            print '  ################################################'
            print '  # Running scenario ' + str(self.scenario_number)

        ntables = self.s_ntable

        # Report known limitations in the test,
        # we'll work around these later, in a loop where we don't want to print.
        self.KNOWN_LIMITATION('Indices created after data population will have no entries')
        self.KNOWN_LIMITATION('Column groups created after indices confuses things')

        # Column groups are created in two different times.
        # We call these two batches 'createsets'.
        # So we don't have the exactly the same number of column groups
        # for each table, for tests that indicate >1 colgroup, we
        # increase the number of column groups for each table
        tabconfigs = []
        for i in range(0, ntables):
            self.current_table = i
            tc = tabconfig()
            tc.tablename = 't' + str(i)
            tc.tableidx = i
            tabconfigs.append(tc)

            for createset in range(0, 2):
                ncg = self.s_colgroup[createset]
                if ncg > 1:
                    ncg += i
                for k in range(0, ncg):
                    thiscg = cgconfig()
                    thiscg.createset = createset

                    # KNOWN LIMITATION: Column groups created after
                    # indices confuses things.  So for now, put all
                    # column group creation in the first set.
                    # Remove this statement when the limitation is fixed.
                    thiscg.createset = 0
                    # END KNOWN LIMITATION

                    thiscg.cgname = 'g' + str(len(tc.cglist))
                    tc.cglist.append(thiscg)

            # The same idea for indices, except that we create them in
            # three sets
            for createset in range(0, 3):
                nindex = self.s_index[createset]
                if nindex > 1:
                    nindex += i
                for k in range(0, nindex):
                    thisidx = idxconfig()
                    thisidx.createset = createset
                    thisidx.idxname = 'i' + str(len(tc.idxlist))
                    thisidx.tab = tc
                    tc.idxlist.append(thisidx)

            # We'll base the number of key/value columns
            # loosely on the number of column groups and indices.

            colgroups = len(tc.cglist)
            indices = len(tc.idxlist)
            nall = colgroups * 2 + indices
            k = rand.rand_range(1, nall)
            v = rand.rand_range(0, nall)
            # we need at least one value per column group
            if v < colgroups:
                v = colgroups
            tc.nkeys = k
            tc.nvalues = v
            tc.keyformats = self.gen_formats(rand, tc.nkeys, True)
            tc.valueformats = self.gen_formats(rand, tc.nvalues, False)

            # Simple naming (we'll test odd naming elsewhere):
            #  tables named 't0' --> 't<N>'
            #  within each table:
            #     columns named 'c0' --> 'c<N>'
            #     colgroups named 'g0' --> 'g<N>'
            #     indices named 'i0' --> 'i<N>'

            config = ""
            config += "key_format=" + tc.keyformats
            config += ",value_format=" + tc.valueformats
            config += ",columns=("
            for j in range(0, tc.nkeys + tc.nvalues):
                if j != 0:
                    config += ","
                config += "c" + str(j)
            config += "),colgroups=("
            for j in range(0, len(tc.cglist)):
                if j != 0:
                    config += ","
                config += "g" + str(j)
            config += ")"
            config += self.s_extra_table_args
            # indices are not declared here
            self.show_python("self.session.create('table:" + tc.tablename + "', '" + config + "')")
            self.session.create("table:" + tc.tablename, config)

            tc.columns_for_groups(range(tc.nkeys, tc.nkeys + tc.nvalues))
            tc.columns_for_indices(range(0, tc.nkeys + tc.nvalues))

        self.finished_step('table')

        for createset in (0, 1):
            # Create column groups in this set
            # e.g. self.session.create("colgroup:t0:g1", "columns=(c3,c4)")
            for tc in tabconfigs:
                self.current_table = tc.tableidx
                for cg in tc.cglist:
                    if cg.createset == createset:
                        self.create('colgroup', tc.tablename, cg.cgname, cg.columns)

            self.finished_step('colgroup' + str(createset))

            # Create indices in this set
            # e.g. self.session.create("index:t0:i1", "columns=(c3,c4)")
            for tc in tabconfigs:
                self.current_table = tc.tableidx
                for idx in tc.idxlist:
                    if idx.createset == createset:
                        self.create('index', tc.tablename, idx.idxname, idx.columns, self.s_index_args)

            self.finished_step('index' + str(createset))

        # populate first batch
        for tc in tabconfigs:
            self.current_table = tc.tableidx
            max = rand.rand_range(0, self.nentries)
            self.populate(tc, xrange(0, max))

        self.finished_step('populate0')

#TODO
        # Create indices in third set
#        for tc in tabconfigs:
#            for idx in tc.idxlist:
#                if idx.createset == 2:
#                    self.create('index', tc.tablename, idx.idxname, idx.columns)

        self.finished_step('index2')

        # populate second batch
        for tc in tabconfigs:
            self.current_table = tc.tableidx
            self.populate(tc, xrange(tc.nentries, self.nentries))

        self.finished_step('populate1')

        for tc in tabconfigs:
            self.current_table = tc.tableidx
            self.check_entries(tc)

    def populate(self, tc, insertrange):
        self.show_python("cursor = self.session.open_cursor('table:" + tc.tablename + "', None, None)")
        cursor = self.session.open_cursor('table:' + tc.tablename, None, None)
        for i in insertrange:
            key = tc.gen_keys(i)
            val = tc.gen_values(i)
            self.show_python("cursor.set_key(*" + str(key) + ")")
            cursor.set_key(*key)
            self.show_python("cursor.set_value(*" + str(val) + ")")
            cursor.set_value(*val)
            self.show_python("cursor.insert()")
            cursor.insert()
            tc.nentries += 1
        self.show_python("cursor.close()")
        cursor.close()

    def check_one(self, name, cursor, key, val):
        keystr = str(key)
        valstr = str(val)
        self.show_python('# search[' + name + '](' + keystr + ')')
        self.show_python("cursor.set_key(*" + keystr + ")")
        cursor.set_key(*key)
        self.show_python("ok = cursor.search()")
        ok = cursor.search()
        self.show_python("self.assertEqual(ok, 0)")
        self.assertEqual(ok, 0)
        self.show_python("self.assertEqual(" + keystr + ", cursor.get_keys())")
        self.assertEqual(key, cursor.get_keys())
        self.show_python("self.assertEqual(" + valstr + ", cursor.get_values())")
        self.assertEqual(val, cursor.get_values())

    def check_entries(self, tc):
        """
        Verify entries in the primary and index table
        related to the tabconfig.
        """
        self.show_python('# check_entries: ' + tc.tablename)
        self.show_python("cursor = self.session.open_cursor('table:" + tc.tablename + "', None, None)")
        cursor = self.session.open_cursor('table:' + tc.tablename, None, None)
        count = 0
        for x in cursor:
            count += 1
        self.assertEqual(count, tc.nentries)
        for i in range(0, tc.nentries):
            key = tc.gen_keys(i)
            val = tc.gen_values(i)
            self.check_one(tc.tablename, cursor, key, val)
        cursor.close()
        self.show_python("cursor.close()")

        # for each index, check each entry
        for idx in tc.idxlist:
            # KNOWN LIMITATION: Indices created after data population
            # will have no entries, so don't bother with them here
            # Remove these statements when the limitation is fixed.
            if idx.createset == 2:
                continue
            # END KNOWN LIMITATION

            # Although it's possible to open an index on some partial
            # list of columns, we'll keep it simple here, and always
            # use all columns.
            full_idxname = 'index:' + tc.tablename + ':' + idx.idxname
            cols = '(' + ','.join([('c' + str(x)) for x in range(tc.nkeys, tc.nvalues + tc.nkeys)]) + ')'
            self.show_python('# check_entries: ' + full_idxname + cols)
            self.show_python("cursor = self.session.open_cursor('" + full_idxname + cols + "', None, None)")
            cursor = self.session.open_cursor(full_idxname + cols, None, None)
            count = 0
            for x in cursor:
                count += 1
            self.assertEqual(count, tc.nentries)
            for i in range(0, tc.nentries):
                key = idx.gen_keys(i)
                val = tc.gen_values(i)
                self.check_one(full_idxname, cursor, key, val)
            cursor.close()
            self.show_python("cursor.close()")
Ejemplo n.º 6
0
class test_sweep01(wttest.WiredTigerTestCase, suite_subprocess):
    tablebase = 'test_sweep01'
    uri = 'table:' + tablebase
    numfiles = 500
    numkv = 100
    ckpt_list = [
        ('off', dict(ckpt=0)),
        ('on', dict(ckpt=20)),
    ]

    types = [
        ('row',
         dict(tabletype='row', create_params='key_format=i,value_format=i')),
        ('var',
         dict(tabletype='var', create_params='key_format=r,value_format=i')),
        ('fix',
         dict(tabletype='fix', create_params='key_format=r,value_format=8t')),
    ]

    scenarios = number_scenarios(
        prune_scenarios(multiply_scenarios('.', types, ckpt_list), 1, 100))

    # Overrides WiredTigerTestCase
    def setUpConnectionOpen(self, dir):
        self.home = dir
        self.backup_dir = os.path.join(self.home, "WT_BACKUP")
        conn_params = \
                ',create,error_prefix="%s: ",' % self.shortid() + \
                'checkpoint=(wait=%d),' % self.ckpt + \
                'statistics=(fast),'
        # print "Creating conn at '%s' with config '%s'" % (dir, conn_params)
        try:
            conn = wiredtiger_open(dir, conn_params)
        except wiredtiger.WiredTigerError as e:
            print "Failed conn at '%s' with config '%s'" % (dir, conn_params)
        self.pr( ` conn `)
        self.session2 = conn.open_session()
        return conn

    def test_ops(self):

        #
        # Set up numfiles with numkv entries.  We just want some data in there
        # we don't care what it is.
        #
        for f in range(self.numfiles):
            uri = '%s.%d' % (self.uri, f)
            # print "Creating %s with config '%s'" % (uri, self.create_params)
            self.session.create(uri, self.create_params)
            c = self.session.open_cursor(uri, None)
            c.set_value(1)
            for k in range(self.numkv):
                c.set_key(k + 1)
                c.insert()
            c.close()
            if f % 20 == 0:
                time.sleep(1)

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        close1 = stat_cursor[stat.conn.dh_conn_handles][2]
        sweep1 = stat_cursor[stat.conn.dh_conn_sweeps][2]
        sclose1 = stat_cursor[stat.conn.dh_session_handles][2]
        ssweep1 = stat_cursor[stat.conn.dh_session_sweeps][2]
        tod1 = stat_cursor[stat.conn.dh_conn_tod][2]
        ref1 = stat_cursor[stat.conn.dh_conn_ref][2]
        nfile1 = stat_cursor[stat.conn.file_open][2]
        stat_cursor.close()
        # Inactive time on a handle must be a minute or more.
        # At some point the sweep thread will run and set the time of death
        # to be a minute later.  So sleep 2 minutes to make sure it has run
        # enough times to timeout the handles.
        uri = '%s.test' % self.uri
        self.session.create(uri, self.create_params)
        #
        # Keep inserting data to keep at least one handle active and give
        # checkpoint something to do.  Make sure checkpoint doesn't adjust
        # the time of death for inactive handles.
        #
        c = self.session.open_cursor(uri, None)
        k = 0
        sleep = 0
        while sleep < 120:
            k = k + 1
            c.set_key(k)
            c.set_value(1)
            c.insert()
            sleep += 10
            time.sleep(10)
        c.close()

        stat_cursor = self.session.open_cursor('statistics:', None, None)
        close2 = stat_cursor[stat.conn.dh_conn_handles][2]
        sweep2 = stat_cursor[stat.conn.dh_conn_sweeps][2]
        sclose2 = stat_cursor[stat.conn.dh_session_handles][2]
        ssweep2 = stat_cursor[stat.conn.dh_session_sweeps][2]
        nfile2 = stat_cursor[stat.conn.file_open][2]
        tod2 = stat_cursor[stat.conn.dh_conn_tod][2]
        ref2 = stat_cursor[stat.conn.dh_conn_ref][2]
        stat_cursor.close()
        # print "checkpoint: " + str(self.ckpt)
        # print "nfile1: " + str(nfile1) + " nfile2: " + str(nfile2)
        # print "close1: " + str(close1) + " close2: " + str(close2)
        # print "sweep1: " + str(sweep1) + " sweep2: " + str(sweep2)
        # print "ssweep1: " + str(ssweep1) + " ssweep2: " + str(ssweep2)
        # print "sclose1: " + str(sclose1) + " sclose2: " + str(sclose2)
        # print "tod1: " + str(tod1) + " tod2: " + str(tod2)
        # print "ref1: " + str(ref1) + " ref2: " + str(ref2)

        #
        # The files are all closed.  Check that sweep did its work even
        # in the presence of recent checkpoints.
        #
        if (close1 >= close2):
            print "XX: close1: " + str(close1) + " close2: " + str(close2)
            print "sweep1: " + str(sweep1) + " sweep2: " + str(sweep2)
            print "sclose1: " + str(sclose1) + " sclose2: " + str(sclose2)
            print "ssweep1: " + str(ssweep1) + " ssweep2: " + str(ssweep2)
            print "tod1: " + str(tod1) + " tod2: " + str(tod2)
            print "ref1: " + str(ref1) + " ref2: " + str(ref2)
            print "nfile1: " + str(nfile1) + " nfile2: " + str(nfile2)
        self.assertEqual(close1 < close2, True)
        if (sweep1 >= sweep2):
            print "close1: " + str(close1) + " close2: " + str(close2)
            print "XX: sweep1: " + str(sweep1) + " sweep2: " + str(sweep2)
            print "sclose1: " + str(sclose1) + " sclose2: " + str(sclose2)
            print "ssweep1: " + str(ssweep1) + " ssweep2: " + str(ssweep2)
            print "tod1: " + str(tod1) + " tod2: " + str(tod2)
            print "ref1: " + str(ref1) + " ref2: " + str(ref2)
        self.assertEqual(sweep1 < sweep2, True)
        if (nfile2 >= nfile1):
            print "close1: " + str(close1) + " close2: " + str(close2)
            print "sweep1: " + str(sweep1) + " sweep2: " + str(sweep2)
            print "sclose1: " + str(sclose1) + " sclose2: " + str(sclose2)
            print "ssweep1: " + str(ssweep1) + " ssweep2: " + str(ssweep2)
            print "tod1: " + str(tod1) + " tod2: " + str(tod2)
            print "ref1: " + str(ref1) + " ref2: " + str(ref2)
            print "XX: nfile1: " + str(nfile1) + " nfile2: " + str(nfile2)
        self.assertEqual(nfile2 < nfile1, True)
        # The only files that should be left is the metadata and the active one.
        if (nfile2 != 2):
            print "close1: " + str(close1) + " close2: " + str(close2)
            print "sweep1: " + str(sweep1) + " sweep2: " + str(sweep2)
            print "sclose1: " + str(sclose1) + " sclose2: " + str(sclose2)
            print "ssweep1: " + str(ssweep1) + " ssweep2: " + str(ssweep2)
            print "tod1: " + str(tod1) + " tod2: " + str(tod2)
            print "ref1: " + str(ref1) + " ref2: " + str(ref2)
            print "XX2: nfile1: " + str(nfile1) + " nfile2: " + str(nfile2)
        self.assertEqual(nfile2 == 2, True)