Exemplo n.º 1
0
def prune_scenarios(scenes, count=-1):
    """
    Use listed probabilities for pruning the list of scenarios.
    That is, the highest probability (value of P in the scendario)
    are chosen more often.  With a second argument, only the
    given number of scenarios are returned.  With no second argument,
    only scenarios with P > .5 are returned half the time, etc.
    """
    r = suite_random.suite_random()
    result = []
    if count == -1:
        # Missing second arg - return those with P == .3 at
        # 30% probability, for example.
        for scene in scenes:
            if "P" in scene[1]:
                p = scene[1]["P"]
                if p < r.rand_float():
                    continue
            result.append(scene)
        return result
    else:
        # With second arg, we want exactly 'count' items
        # returned.  So we'll sort them all and choose
        # the top number.  Not the most efficient solution,
        # but it's easy.
        for scene in scenes:
            scene[1]["_rand"] = r.rand_float()
        scenes = sorted(scenes, key=prune_sorter_key)
        for scene in scenes:
            del scene[1]["_rand"]
        l = len(scenes)
        return scenes[l - count : l]
Exemplo n.º 2
0
    def columns_for_indices(self, collist):
        totalindices = len(self.idxlist)
        ncolumns = len(collist)
        startcol = 0

        # KNOWN LIMITATION: Indices should not include primary keys
        # Remove this statement when the limitation is fixed.
        #startcol = self.nkeys
        # END KNOWN LIMITATION.

        rand = suite_random.suite_random(ncolumns, totalindices)

        # Initially, all indices get one column from the collist.
        # Overlaps are allowed.  Then probalistically, add some
        # more columns.
        for idx in self.idxlist:
            prob = 1.0
            for i in range(0, ncolumns - startcol):
                if rand.rand_float() > prob:
                    break
                colno = collist[rand.rand_range(startcol, ncolumns)]
                if not any(x == colno for x in idx.columns):
                    idx.columns.append(colno)
                    if colno < self.nkeys:
                        # ASSUME: each format is 1 char
                        idx.formats += self.keyformats[colno]
                    else:
                        # ASSUME: each format is 1 char
                        idx.formats += self.valueformats[colno - self.nkeys]
                prob *= 0.5
Exemplo n.º 3
0
    def columns_for_indices(self, collist):
        totalindices = len(self.idxlist)
        ncolumns = len(collist)
        startcol = 0

        # KNOWN LIMITATION: Indices should not include primary keys
        # Remove this statement when the limitation is fixed.
        #startcol = self.nkeys
        # END KNOWN LIMITATION.

        rand = suite_random.suite_random(ncolumns, totalindices)

        # Initially, all indices get one column from the collist.
        # Overlaps are allowed.  Then probabalistically, add some
        # more columns.
        for idx in self.idxlist:
            prob = 1.0
            for i in range(0, ncolumns - startcol):
                if rand.rand_float() > prob:
                    break
                colno = collist[rand.rand_range(startcol, ncolumns)]
                if not any(x == colno for x in idx.columns):
                    idx.columns.append(colno)
                    if colno < self.nkeys:
                        # ASSUME: each format is 1 char
                        idx.formats += self.keyformats[colno]
                    else:
                        # ASSUME: each format is 1 char
                        idx.formats += self.valueformats[colno - self.nkeys]
                prob *= 0.5
Exemplo n.º 4
0
def prune_scenarios(scenes, count=-1):
    """
    Use listed probabilities for pruning the list of scenarios.
    That is, the highest probability (value of P in the scendario)
    are chosen more often.  With a second argument, only the
    given number of scenarios are returned.  With no second argument,
    only scenarios with P > .5 are returned half the time, etc.
    """
    r = suite_random.suite_random()
    result = []
    if count == -1:
        # Missing second arg - return those with P == .3 at
        # 30% probability, for example.
        for scene in scenes:
            if 'P' in scene[1]:
                p = scene[1]['P']
                if p < r.rand_float():
                    continue
            result.append(scene)
        return result
    else:
        # With second arg, we want exactly 'count' items
        # returned.  So we'll sort them all and choose
        # the top number.  Not the most efficient solution,
        # but it's easy.
        for scene in scenes:
            scene[1]['_rand'] = r.rand_float()
        scenes = sorted(scenes, key=prune_sorter_key)
        for scene in scenes:
            del scene[1]['_rand']
        l = len(scenes)
        return scenes[l - count:l]
Exemplo n.º 5
0
    def columns_for_groups(self, collist):
        totalgroups = len(self.cglist)
        ncolumns = len(collist)
        rand = suite_random.suite_random(ncolumns, totalgroups)

        # Each columngroup must have at least one column, so
        # the only choice about distribution is with the
        # excess columns.
        excess = ncolumns - totalgroups
        if excess < 0:
            raise ValueError('columns_for_groups expects a column list (len=' + str(ncolumns) + ') larger than column group list (len=' + str(totalgroups) + ')')

        # Initially, all groups get column from the collist
        for cg in self.cglist:
            (colno, collist) = extract_random_from_list(rand, collist)
            cg.columns.append(colno)

        # Then divy up remainder in the collist
        for i in range(0, excess):
            pos = rand.rand_range(0, totalgroups)
            cg = self.cglist[pos]
            (colno, collist) = extract_random_from_list(rand, collist)
            cg.columns.append(colno)

        # collist should be emptied
        if len(collist) != 0:
            raise AssertionError('column list did not get emptied')
Exemplo n.º 6
0
    def columns_for_groups(self, collist):
        totalgroups = len(self.cglist)
        ncolumns = len(collist)
        rand = suite_random.suite_random(ncolumns, totalgroups)

        # Each columngroup must have at least one column, so
        # the only choice about distribution is with the
        # excess columns.
        excess = ncolumns - totalgroups
        if excess < 0:
            raise ValueError('columns_for_groups expects a column list (len=' +
                             str(ncolumns) +
                             ') larger than column group list (len=' +
                             str(totalgroups) + ')')

        # Initially, all groups get column from the collist
        for cg in self.cglist:
            (colno, collist) = extract_random_from_list(rand, collist)
            cg.columns.append(colno)

        # Then divy up remainder in the collist
        for i in range(0, excess):
            pos = rand.rand_range(0, totalgroups)
            cg = self.cglist[pos]
            (colno, collist) = extract_random_from_list(rand, collist)
            cg.columns.append(colno)

        # collist should be emptied
        if len(collist) != 0:
            raise AssertionError('column list did not get emptied')
Exemplo n.º 7
0
    def test_cursor_sweep(self):
        rand = suite_random()

        # Create a large number (self.nuris) of uris, and for each one,
        # create some number (self.deep) of cached cursors.
        urimap = {}
        for i in xrange(0, self.nuris):
            uri = self.uriname(i)
            cursors = []
            self.session.create(uri, None)
            for j in xrange(0, self.deep):
                cursors.append(self.session.open_cursor(uri, None))
            for c in cursors:
                c.close()

            # Each map entry has a list of the open cursors.
            # We start with none
            urimap[uri] = []

        # At this point, we'll randomly open/close lots of cursors, keeping
        # track of how many of each. As long as we don't have more than [deep]
        # cursors open for each uri, we should always be taking then from
        # the set of cached cursors.
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        #self.tty('stats before = ' + str(begin_stats))

        opencount = 0
        closecount = 0

        while opencount < self.nopens:
            uri = self.uriname(rand.rand_range(0, self.nuris))
            cursors = urimap[uri]
            ncursors = len(cursors)

            # Keep the range of open cursors between 0 and [deep],
            # with some random fluctuation
            if ncursors == 0:
                do_open = True
            elif ncursors == self.deep:
                do_open = False
            else:
                do_open = (rand.rand_range(0, 2) == 0)
            if do_open:
                cursors.append(self.session.open_cursor(uri, None))
                opencount += 1
            else:
                i = rand.rand_range(0, ncursors)
                cursors.pop(i).close()
                closecount += 1

        end_stats = self.caching_stats()

        #self.tty('opens = ' + str(opencount) + ', closes = ' + str(closecount))
        #self.tty('stats after = ' + str(end_stats))
        self.assertEquals(end_stats[0] - begin_stats[0], closecount)
        self.assertEquals(end_stats[1] - begin_stats[1], opencount)
Exemplo n.º 8
0
def prune_scenarios(scenes, default_count=-1, long_count=-1):
    """
    Use listed probabilities for pruning the list of scenarios.
    That is, the highest probability (value of P in the scendario)
    are chosen more often.  With just one argument, only scenarios
    with P > .5 are returned half the time, etc. A second argument
    limits the number of scenarios. When a third argument is present,
    it is a separate limit for a long run.
    """
    global _is_long_run
    r = suite_random.suite_random()
    result = []
    if default_count == -1:
        # Missing second arg - return those with P == .3 at
        # 30% probability, for example.
        for scene in scenes:
            if 'P' in scene[1]:
                p = scene[1]['P']
                if p < r.rand_float():
                    continue
            result.append(scene)
        return result
    else:
        # With at least a second arg present, we'll want a specific count
        # of items returned.  So we'll sort them all and choose
        # the top number.  Not the most efficient solution,
        # but it's easy.
        if _is_long_run and long_count != -1:
            count = long_count
        else:
            count = default_count

        l = len(scenes)
        if l <= count:
            return scenes
        if count == 0:
            return []
        order = 0
        for scene in scenes:
            scene[1]['_rand'] = r.rand_float()
            scene[1]['_order'] = order
            order += 1
        scenes = sorted(scenes,
                        key=prune_sorter_key)  # random sort driven by P
        scenes = scenes[l - count:l]  # truncate to get best
        scenes = sorted(scenes, key=prune_resort_key)  # original order
        for scene in scenes:
            del scene[1]['_rand']
            del scene[1]['_order']
        return check_scenarios(scenes)
Exemplo n.º 9
0
def prune_scenarios(scenes, default_count = -1, long_count = -1):
    """
    Use listed probabilities for pruning the list of scenarios.
    That is, the highest probability (value of P in the scendario)
    are chosen more often.  With just one argument, only scenarios
    with P > .5 are returned half the time, etc. A second argument
    limits the number of scenarios. When a third argument is present,
    it is a separate limit for a long run.
    """
    global _is_long_run
    r = suite_random.suite_random()
    result = []
    if default_count == -1:
        # Missing second arg - return those with P == .3 at
        # 30% probability, for example.
        for scene in scenes:
            if 'P' in scene[1]:
                p = scene[1]['P']
                if p < r.rand_float():
                    continue
            result.append(scene)
        return result
    else:
        # With at least a second arg present, we'll want a specific count
        # of items returned.  So we'll sort them all and choose
        # the top number.  Not the most efficient solution,
        # but it's easy.
        if _is_long_run and long_count != -1:
            count = long_count
        else:
            count = default_count

        l = len(scenes)
        if l <= count:
            return scenes
        if count == 0:
            return []
        order = 0
        for scene in scenes:
            scene[1]['_rand'] = r.rand_float()
            scene[1]['_order'] = order
            order += 1
        scenes = sorted(scenes, key=prune_sorter_key) # random sort driven by P
        scenes = scenes[l-count:l]                    # truncate to get best
        scenes = sorted(scenes, key=prune_resort_key) # original order
        for scene in scenes:
            del scene[1]['_rand']
            del scene[1]['_order']
        return check_scenarios(scenes)
Exemplo n.º 10
0
    def test_cursor_sweep(self):
        rand = suite_random()

        uri_map = self.create_uri_map(self.uri)
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        begin_sweep_stats = self.sweep_stats()
        #self.tty('stats before = ' + str(begin_stats))
        #self.tty('sweep stats before = ' + str(begin_sweep_stats))

        for round_cnt in range(0, self.rounds):
            if round_cnt % 2 == 1:
                # Close cursors in half of the range, and don't
                # use them during this round, so they will be
                # closed by sweep.
                half = self.nuris / 2
                self.close_uris(uri_map, xrange(0, half))
                bottom_range = half
                # Let the dhandle sweep run and find the closed cursors.
                time.sleep(3.0)
            else:
                bottom_range = 0

            i = 0
            while self.opencount < (1 + round_cnt) * self.opens_per_round:
                i += 1
                if i % 100 == 0:
                    time.sleep(0.0)  # Let other threads run
                self.open_or_close(uri_map, rand, bottom_range, self.nuris)

        end_stats = self.caching_stats()
        end_sweep_stats = self.sweep_stats()

        #self.tty('opens = ' + str(self.opencount) + \
        #         ', closes = ' + str(self.closecount))
        #self.tty('stats after = ' + str(end_stats))
        #self.tty('sweep stats after = ' + str(end_sweep_stats))
        self.assertEquals(end_stats[0] - begin_stats[0], self.closecount)
        swept = end_sweep_stats[3] - begin_sweep_stats[3]
        min_swept = self.deep * self.nuris
        self.assertGreaterEqual(swept, min_swept)

        # No strict equality test for the reopen stats. When we've swept
        # some closed cursors, we'll have fewer reopens. It's different
        # by approximately the number of swept cursors, but it's less
        # predictable.
        self.assertGreater(end_stats[1] - begin_stats[1], 0)
    def test_cursor_big(self):
        rand = suite_random()
        uri_map = self.create_uri_map(self.uri)
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        #self.tty('stats before = ' + str(begin_stats))

        # At this point, we'll randomly open/close lots of cursors, keeping
        # track of how many of each. As long as we don't have more than [deep]
        # cursors open for each uri, we should always be taking then from
        # the set of cached cursors.
        while self.opencount < self.nopens:
            self.open_or_close(uri_map, rand, 0, self.nuris)

        end_stats = self.caching_stats()

        #self.tty('opens = ' + str(self.opencount) + \
        #         ', closes = ' + str(self.closecount))
        #self.tty('stats after = ' + str(end_stats))
        self.assertEquals(end_stats[0] - begin_stats[0], self.closecount)
        self.assertEquals(end_stats[1] - begin_stats[1], self.opencount)
Exemplo n.º 12
0
    def test_cursor_big(self):
        rand = suite_random()
        uri_map = self.create_uri_map(self.uri)
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        #self.tty('stats before = ' + str(begin_stats))

        # At this point, we'll randomly open/close lots of cursors, keeping
        # track of how many of each. As long as we don't have more than [deep]
        # cursors open for each uri, we should always be taking then from
        # the set of cached cursors.
        while self.opencount < self.nopens:
            self.open_or_close(uri_map, rand, 0, self.nuris)

        end_stats = self.caching_stats()

        #self.tty('opens = ' + str(self.opencount) + \
        #         ', closes = ' + str(self.closecount))
        #self.tty('stats after = ' + str(end_stats))
        self.assertEquals(end_stats[0] - begin_stats[0], self.closecount)
        self.assertEquals(end_stats[1] - begin_stats[1], self.opencount)
Exemplo n.º 13
0
    def test_cursor_big(self):
        rand = suite_random()
        uri_map = self.create_uri_map(self.uri)
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        #self.tty('stats before = ' + str(begin_stats))

        # At this point, we'll randomly open/close lots of cursors, keeping
        # track of how many of each. As long as we don't have more than [deep]
        # cursors open for each uri, we should always be taking then from
        # the set of cached cursors.
        while self.opencount < self.nopens:
            self.open_or_close(uri_map, rand, 0, self.nuris)

        end_stats = self.caching_stats()

        #self.tty('opens = ' + str(self.opencount) + \
        #         ', closes = ' + str(self.closecount))
        #self.tty('stats after = ' + str(end_stats))

        # Stats won't be exact because they may include operations triggered by other
        # threads (e.g., eviction) opening and closing history store cursors.
        self.assertGreaterEqual(end_stats[0] - begin_stats[0], self.closecount)
        self.assertGreaterEqual(end_stats[1] - begin_stats[1], self.opencount)
Exemplo n.º 14
0
    def test_cursor_sweep(self):
        rand = suite_random()

        uri_map = self.create_uri_map(self.uri)
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        begin_sweep_stats = self.sweep_stats()
        #self.tty('stats before = ' + str(begin_stats))
        #self.tty('sweep stats before = ' + str(begin_sweep_stats))
        potential_dead = 0

        for round_cnt in range(0, self.rounds):
            if round_cnt % 2 == 1:
                # Close cursors in half of the range, and don't
                # use them during this round, so they will be
                # closed by sweep.
                half = self.nuris // 2
                potential_dead += self.close_uris(uri_map, list(range(0,
                                                                      half)))
                bottom_range = half
                # Let the dhandle sweep run and find the closed cursors.
                time.sleep(3.0)
            else:
                bottom_range = 0

            # The session cursor sweep runs at most once a second and
            # traverses a fraction of the cached cursors.  We'll run for
            # ten seconds with pauses to make sure we see sweep activity.
            pause_point = self.opens_per_round // 100
            if pause_point == 0:
                pause_point = 1
            pause_duration = 0.1

            i = 0
            while self.opencount < (1 + round_cnt) * self.opens_per_round:
                i += 1
                if i % pause_point == 0:
                    time.sleep(pause_duration)  # over time, let sweep run
                self.open_or_close(uri_map, rand, bottom_range, self.nuris)

        end_stats = self.caching_stats()
        end_sweep_stats = self.sweep_stats()

        #self.tty('opens = ' + str(self.opencount) + \
        #         ', closes = ' + str(self.closecount))
        #self.tty('stats after = ' + str(end_stats))
        #self.tty('sweep stats after = ' + str(end_sweep_stats))
        self.assertEquals(end_stats[0] - begin_stats[0], self.closecount)
        swept = end_sweep_stats[3] - begin_sweep_stats[3]

        # Although this is subject to tuning parameters, we know that
        # in an active session, we'll sweep through minimum of 1% of
        # the cached cursors per second.  We've set this test to run
        # 5 rounds. In 2 of the 5 rounds (sandwiched between the others),
        # some of the uris are allowed to close. So during the 'closing rounds'
        # we'll sweep a minimum of 20% of the uri space, and in the other
        # rounds we'll be referencing the closed uris again.

        # We'll pass the test if we see at least 20% of the 'potentially
        # dead' cursors swept.  There may be more, since the 1% per second
        # is a minimum.
        min_swept = 2 * potential_dead // 10
        self.assertGreaterEqual(swept, min_swept)

        # No strict equality test for the reopen stats. When we've swept
        # some closed cursors, we'll have fewer reopens. It's different
        # by approximately the number of swept cursors, but it's less
        # predictable.
        self.assertGreater(end_stats[1] - begin_stats[1], 0)
Exemplo n.º 15
0
 def reinit_joinconfig(self):
     self.rand = suite_random.suite_random(self.seed)
     self.seed += 1
Exemplo n.º 16
0
class test_timestamp22(wttest.WiredTigerTestCase):
    conn_config = 'cache_size=50MB'

    # Keep the number of rows low, as each additional row does
    # not test any new code paths.
    nrows = 3
    uri = "table:test_timestamp22"
    rand = suite_random.suite_random()
    oldest_ts = 0
    stable_ts = 0
    last_durable = 0
    SUCCESS = 'success'
    FAILURE = 'failure'

    format_values = [
        ('integer-row', dict(key_format='i', value_format='S')),
        ('column', dict(key_format='r', value_format='S')),
        ('column-fix', dict(key_format='r', value_format='8t')),
    ]
    scenarios = make_scenarios(format_values)

    # Control execution of an operation, looking for exceptions and error messages.
    # Usage:
    #  with self.expect(self.FAILURE, 'some operation'):
    #     some_operation()  # In this case, we expect it will fail
    #
    # "expected" argument can be self.SUCCESS, self.FAILURE, True, False, for convenience.
    @contextmanager
    def expect(self, expected, message):
        if expected == True:
            expected = self.SUCCESS
        elif expected == False:
            expected = self.FAILURE

        self.pr('TRYING: ' + message + ', expect ' + expected)
        got = None
        # If there are stray error messages from a previous operation,
        # let's find out now.  It can be confusing if we do something illegal
        # here and we have multiple messages to sort out.
        self.checkStderr()

        # 'yield' runs the subordinate operation, we'll catch any resulting exceptions.
        try:
            if expected == self.FAILURE:
                # Soak up any error messages that happen as a result of the failure.
                with self.expectedStderrPattern(r'^.*$',
                                                re_flags=re.MULTILINE):
                    yield
            else:
                yield
            got = self.SUCCESS
        except:
            got = self.FAILURE
            self.cleanStderr()

        message += ' got ' + got

        # If we're about to assert, show some extra info
        if expected != got:
            message += ': ERROR expected ' + expected
            self.checkStderr()
        self.pr(message)
        self.assertEquals(expected, got)

    # Create a predictable value based on the iteration number and timestamp.
    def gen_value(self, iternum, ts):
        if self.value_format == '8t':
            return (iternum * 7 + ts * 13) % 255
        return str(iternum) + '_' + str(ts) + '_' + 'x' * 1000

    # Given a number representing an "approximate timestamp", generate a timestamp
    # that is near that number, either plus or minus.
    def gen_ts(self, approx_ts):
        # a number between -10 and 10:
        n = self.rand.rand32() % 21 - 10
        ts = approx_ts + n
        if ts <= 0:
            ts = 1
        return ts

    # Asks whether we should do an illegal operation now. Return yes 5%.
    def do_illegal(self):
        return self.rand.rand32() % 20 == 0

    def report(self, func, arg=None):
        self.pr('DOING: ' + func + ('' if arg == None else '(' + arg + ')'))

    # Insert a set of rows, each insert in its own transaction, with the
    # given timestamps.
    def updates(self, value, ds, do_prepare, commit_ts, durable_ts, read_ts):

        # Generate a configuration for a timestamp_transaction() call.
        # Returns: 1) whether it expects success, 2) config 3) new running commit timestamp
        def timestamp_txn_config(commit_ts, running_commit_ts):
            ok = True
            config = ''
            this_commit_ts = -1
            if self.do_illegal():
                # setting durable timestamp must be after prepare call
                config += ',durable_timestamp=' + self.timestamp_str(
                    self.gen_ts(commit_ts))
                ok = False

            # We don't do the next part if we set an illegal durable timestamp.  It turns out
            # if we do set the durable timestamp illegally, with a valid commit timestamp,
            # the timestamp_transaction() call will fail, but may set the commit timestamp.
            # It makes testing more complex, so we just don't do it.
            elif self.rand.rand32() % 2 == 0:
                if self.do_illegal():
                    this_commit_ts = self.oldest_ts - 1
                elif self.do_illegal():
                    this_commit_ts = self.stable_ts - 1
                else:
                    # It's possible this will succeed, we'll check below.
                    this_commit_ts = self.gen_ts(commit_ts)

                    # OOD does not work with prepared updates. Hence, the commit ts should always be
                    # greater than the last durable ts.
                    if this_commit_ts <= self.last_durable:
                        this_commit_ts = self.last_durable + 1

                config += ',commit_timestamp=' + self.timestamp_str(
                    this_commit_ts)

            if this_commit_ts >= 0:
                if this_commit_ts < running_commit_ts:
                    ok = False
                if this_commit_ts < self.stable_ts:
                    ok = False
                if this_commit_ts < self.oldest_ts:
                    ok = False
            if not ok:
                this_commit_ts = -1
            if this_commit_ts >= 0:
                running_commit_ts = this_commit_ts
            return (ok, config, running_commit_ts)

        session = self.session
        needs_rollback = False
        prepare_config = None
        commit_config = 'commit_timestamp=' + self.timestamp_str(commit_ts)
        tstxn1_config = ''
        tstxn2_config = ''

        ok_commit = do_prepare or not self.do_illegal()
        ok_prepare = True
        ok_tstxn1 = True
        ok_tstxn2 = True

        # Occasionally put a durable timestamp on a commit without a prepare,
        # that will be an error.
        if do_prepare or not ok_commit:
            commit_config += ',durable_timestamp=' + self.timestamp_str(
                durable_ts)
        cursor = session.open_cursor(self.uri)
        prepare_ts = self.gen_ts(commit_ts)
        prepare_config = 'prepare_timestamp=' + self.timestamp_str(prepare_ts)
        begin_config = '' if read_ts < 0 else 'read_timestamp=' + self.timestamp_str(
            read_ts)

        # We might do timestamp_transaction calls either before/after inserting
        # values, or both.
        do_tstxn1 = (self.rand.rand32() % 10 == 0)
        do_tstxn2 = (self.rand.rand32() % 10 == 0)

        # Keep track of the commit timestamp that we'll set through the transaction.
        # If it decreases, it will trigger an error.  At the final commit_transaction
        # operation, we'll use the commit_ts.
        running_commit_ts = -1
        first_commit_ts = -1

        if do_tstxn1:
            (ok_tstxn1, tstxn1_config, running_commit_ts) = \
                timestamp_txn_config(commit_ts, running_commit_ts)
            if first_commit_ts < 0:
                first_commit_ts = running_commit_ts

        if do_tstxn2:
            (ok_tstxn2, tstxn2_config, running_commit_ts) = \
                timestamp_txn_config(commit_ts, running_commit_ts)
            if first_commit_ts < 0:
                first_commit_ts = running_commit_ts

        # If a call to set a timestamp fails, a subsequent prepare may assert in diagnostic mode.
        # We consider that acceptable, but we don't test it as it will crash the test suite.
        if not ok_tstxn1 or not ok_tstxn2:
            do_prepare = False  # AVOID ASSERT
            ok_prepare = False
            ok_commit = False

        if running_commit_ts >= 0 and do_prepare:
            # Cannot set prepare timestamp after commit timestamp is successfully set.
            ok_prepare = False

        if do_prepare:
            if commit_ts < prepare_ts:
                ok_commit = False
            if prepare_ts < self.oldest_ts:
                ok_prepare = False

        # If the final commit is too old, we'll fail.
        if commit_ts < self.oldest_ts or commit_ts < self.stable_ts:
            ok_commit = False

        # ODDITY: We don't have to move the commit_ts ahead, but it has to be
        # at least the value of the first commit timestamp set.
        if commit_ts < first_commit_ts:
            ok_commit = False

        # If a prepare fails, the commit fails as well.
        if not ok_prepare:
            ok_commit = False

        msg = 'inserts with commit config(' + commit_config + ')'

        try:
            for i in range(1, self.nrows + 1):
                needs_rollback = False
                if self.do_illegal():
                    # Illegal outside of transaction
                    self.report('prepare_transaction', prepare_config)
                    with self.expect(False, 'prepare outside of transaction'):
                        session.prepare_transaction(prepare_config)

                with self.expect(True,
                                 'begin_transaction(' + begin_config + ')'):
                    session.begin_transaction()
                    needs_rollback = True

                if do_tstxn1:
                    with self.expect(
                            ok_tstxn1,
                            'timestamp_transaction(' + tstxn1_config + ')'):
                        session.timestamp_transaction(tstxn1_config)

                self.report('set key/value')
                with self.expect(True, 'cursor insert'):
                    cursor[ds.key(i)] = value

                if do_tstxn2:
                    with self.expect(
                            ok_tstxn2,
                            'timestamp_transaction(' + tstxn2_config + ')'):
                        session.timestamp_transaction(tstxn2_config)

                if do_prepare:
                    self.report('prepare_transaction', prepare_config)
                    with self.expect(ok_prepare, 'prepare'):
                        session.prepare_transaction(prepare_config)

                # Doing anything else after the prepare, like a timestamp_transaction(), will fail
                # with a WT panic.  Don't do that, or else we can't do anything more in this test.

                # If we did a successful prepare and are set up (by virtue of bad timestamps)
                # to do a bad commit, WT will panic, and the test cannot continue.
                # Only proceed with the commit if we have don't have that particular case.
                if ok_commit or not do_prepare or not ok_prepare:
                    needs_rollback = False
                    self.report('commit_transaction', commit_config)
                    with self.expect(ok_commit, 'commit'):
                        session.commit_transaction(commit_config)
                        self.commit_value = value
                        if do_prepare:
                            self.last_durable = durable_ts
                if needs_rollback:
                    # Rollback this one transaction, and continue the loop
                    self.report('rollback_transaction')
                    needs_rollback = False
                    session.rollback_transaction()
        except Exception as e:
            # We don't expect any exceptions, they should be caught as part of self.expect statements.
            self.pr(msg + 'UNEXPECTED EXCEPTION!')
            self.pr(msg + 'fail: ' + str(e))
            raise e
        cursor.close()

    def make_timestamp_config(self, oldest, stable, durable):
        configs = []
        # Get list of 'oldest_timestamp=value' etc. that have non-negative values.
        for ts_name in ['oldest', 'stable', 'durable']:
            val = eval(ts_name)
            if val >= 0:
                configs.append(ts_name + '_timestamp=' +
                               self.timestamp_str(val))
        return ','.join(configs)

    # Determine whether we expect the set_timestamp to succeed.
    def expected_result_set_timestamp(self, oldest, stable, durable):

        # Update the current expected value.  ts is the timestamp being set.
        # If "ts" is negative, ignore it, it's not being set in this call.
        # It is unexpected if "ts" is before the "before" timestamp.
        # The "before" timestamp could be updated during this call
        # with value "before_arg", if not, use the global value for "before".
        def expected_newer(expected, ts, before_arg, before_global):
            if expected and ts >= 0:
                if before_arg >= 0:
                    if before_arg > ts:
                        expected = self.FAILURE
                else:
                    if before_global > ts:
                        expected = self.FAILURE
            return expected

        expected = self.SUCCESS

        # It is a no-op to provide oldest or stable behind the global values. If provided ahead, we
        # will treat the values as if not provided at all.
        if oldest <= self.oldest_ts:
            oldest = -1
        if stable <= self.stable_ts:
            stable = -1

        if oldest >= 0 and stable < 0:
            expected = expected_newer(expected, self.stable_ts, oldest,
                                      self.oldest_ts)
        expected = expected_newer(expected, stable, oldest, self.oldest_ts)
        expected = expected_newer(expected, durable, oldest, self.oldest_ts)
        expected = expected_newer(expected, durable, stable, self.stable_ts)

        return expected

    def set_global_timestamps(self, oldest, stable, durable):
        config = self.make_timestamp_config(oldest, stable, durable)
        expected = self.expected_result_set_timestamp(oldest, stable, durable)

        with self.expect(expected, 'set_timestamp(' + config + ')'):
            self.conn.set_timestamp(config)

        # Predict what we expect to happen to the timestamps.
        if expected == self.SUCCESS:
            # If that passes, then independently, oldest and stable can advance, but if they
            # are less than the current value, that is silently ignored.
            if oldest >= self.oldest_ts:
                self.oldest_ts = oldest
                self.pr('updating oldest: ' + str(oldest))
            if stable >= self.stable_ts:
                self.stable_ts = stable
                self.pr('updating stable: ' + str(stable))

        # Make sure the state of global timestamps is what we think.
        expect_query_oldest = self.timestamp_str(self.oldest_ts)
        expect_query_stable = self.timestamp_str(self.stable_ts)
        query_oldest = self.conn.query_timestamp('get=oldest_timestamp')
        query_stable = self.conn.query_timestamp('get=stable_timestamp')

        self.assertEquals(expect_query_oldest, query_oldest)
        self.assertEquals(expect_query_stable, query_stable)
        self.pr('oldest now: ' + query_oldest)
        self.pr('stable now: ' + query_stable)

        if expected == self.FAILURE:
            self.cleanStderr()

    def test_timestamp_randomizer(self):
        # Local function to generate a random timestamp, or return -1
        def maybe_ts(do_gen, iternum):
            if do_gen:
                return self.gen_ts(iternum)
            else:
                return -1

        if wttest.islongtest():
            iterations = 100000
        else:
            iterations = 1000

        create_params = 'key_format={},value_format={}'.format(
            self.key_format, self.value_format)
        self.session.create(self.uri, create_params)

        self.set_global_timestamps(1, 1, -1)

        # Create tables with no entries
        ds = SimpleDataSet(self,
                           self.uri,
                           0,
                           key_format=self.key_format,
                           value_format=self.value_format)

        # We do a bunch of iterations, doing transactions, prepare, and global timestamp calls
        # with timestamps that are sometimes valid, sometimes not. We use the iteration number
        # as an "approximate timestamp", and generate timestamps for our calls that are near
        # that number (within 10).  Thus, as the test runs, the timestamps generally get larger.
        # We always know the state of global timestamps, so we can predict the success/failure
        # on each call.
        self.commit_value = '<NOT_SET>'
        for iternum in range(1, iterations):
            self.pr('\n===== ITERATION ' + str(iternum) + '/' +
                    str(iterations))
            self.pr('RANDOM: ({0},{1})'.format(self.rand.seedw,
                                               self.rand.seedz))
            if self.rand.rand32() % 10 != 0:
                commit_ts = self.gen_ts(iternum)
                durable_ts = self.gen_ts(iternum)
                do_prepare = (self.rand.rand32() % 20 == 0)
                if self.rand.rand32() % 2 == 0:
                    read_ts = self.gen_ts(iternum)
                else:
                    read_ts = -1  # no read_timestamp used in txn

                # OOD does not work with prepared updates. Hence, the commit ts should always be
                # greater than the last durable ts.
                if commit_ts <= self.last_durable:
                    commit_ts = self.last_durable + 1

                if do_prepare:
                    # If we doing a prepare, we must abide by some additional rules.
                    # If we don't we'll immediately panic
                    if commit_ts < self.oldest_ts:
                        commit_ts = self.oldest_ts
                    if durable_ts < commit_ts:
                        durable_ts = commit_ts
                    if durable_ts <= self.stable_ts:
                        durable_ts = self.stable_ts + 1
                value = self.gen_value(iternum, commit_ts)
                self.updates(value, ds, do_prepare, commit_ts, durable_ts,
                             read_ts)

            if self.rand.rand32() % 2 == 0:
                # Set some combination of the global timestamps
                r = self.rand.rand32() % 16
                oldest = maybe_ts((r & 0x1) != 0, iternum)
                stable = maybe_ts((r & 0x2) != 0, iternum)
                commit = maybe_ts((r & 0x4) != 0, iternum)
                durable = maybe_ts((r & 0x8) != 0, iternum)
                self.set_global_timestamps(oldest, stable, durable)

        # Make sure the resulting rows are what we expect.
        cursor = self.session.open_cursor(self.uri)
        expect_key = 1
        expect_value = self.commit_value
        for k, v in cursor:
            self.assertEquals(k, expect_key)
            self.assertEquals(v, expect_value)
            expect_key += 1

        # Although it's theoretically possible to never successfully update a single row,
        # with a large number of iterations that should never happen.  I'd rather catch
        # a test code error where we mistakenly don't update any rows.
        self.assertGreater(expect_key, 1)
        cursor.close()
Exemplo n.º 17
0
    def test_schema(self):
        rand = suite_random.suite_random()
        if self.SHOW_PYTHON:
            print '  ################################################'
            print '  # Running scenario ' + str(self.scenario_number)

        ntables = self.s_ntable

        # Report known limitations in the test,
        # we'll work around these later, in a loop where we don't want to print.
        self.KNOWN_LIMITATION('Indices created after data population will have no entries')
        self.KNOWN_LIMITATION('Column groups created after indices confuses things')

        # Column groups are created in two different times.
        # We call these two batches 'createsets'.
        # So we don't have the exactly the same number of column groups
        # for each table, for tests that indicate >1 colgroup, we
        # increase the number of column groups for each table
        tabconfigs = []
        for i in range(0, ntables):
            self.current_table = i
            tc = tabconfig()
            tc.tablename = 't' + str(i)
            tc.tableidx = i
            tabconfigs.append(tc)

            for createset in range(0, 2):
                ncg = self.s_colgroup[createset]
                if ncg > 1:
                    ncg += i
                for k in range(0, ncg):
                    thiscg = cgconfig()
                    thiscg.createset = createset

                    # KNOWN LIMITATION: Column groups created after
                    # indices confuses things.  So for now, put all
                    # column group creation in the first set.
                    # Remove this statement when the limitation is fixed.
                    thiscg.createset = 0
                    # END KNOWN LIMITATION

                    thiscg.cgname = 'g' + str(len(tc.cglist))
                    tc.cglist.append(thiscg)

            # The same idea for indices, except that we create them in
            # three sets
            for createset in range(0, 3):
                nindex = self.s_index[createset]
                if nindex > 1:
                    nindex += i
                for k in range(0, nindex):
                    thisidx = idxconfig()
                    thisidx.createset = createset
                    thisidx.idxname = 'i' + str(len(tc.idxlist))
                    thisidx.tab = tc
                    tc.idxlist.append(thisidx)

            # We'll base the number of key/value columns
            # loosely on the number of column groups and indices.

            colgroups = len(tc.cglist)
            indices = len(tc.idxlist)
            nall = colgroups * 2 + indices
            k = rand.rand_range(1, nall)
            v = rand.rand_range(0, nall)
            # we need at least one value per column group
            if v < colgroups:
                v = colgroups
            tc.nkeys = k
            tc.nvalues = v
            tc.keyformats = self.gen_formats(rand, tc.nkeys, True)
            tc.valueformats = self.gen_formats(rand, tc.nvalues, False)

            # Simple naming (we'll test odd naming elsewhere):
            #  tables named 't0' --> 't<N>'
            #  within each table:
            #     columns named 'c0' --> 'c<N>'
            #     colgroups named 'g0' --> 'g<N>'
            #     indices named 'i0' --> 'i<N>'

            config = ""
            config += "key_format=" + tc.keyformats
            config += ",value_format=" + tc.valueformats
            config += ",columns=("
            for j in range(0, tc.nkeys + tc.nvalues):
                if j != 0:
                    config += ","
                config += "c" + str(j)
            config += "),colgroups=("
            for j in range(0, len(tc.cglist)):
                if j != 0:
                    config += ","
                config += "g" + str(j)
            config += ")"
            config += self.s_extra_table_args
            # indices are not declared here
            self.show_python("self.session.create('table:" + tc.tablename + "', '" + config + "')")
            self.session.create("table:" + tc.tablename, config)

            tc.columns_for_groups(range(tc.nkeys, tc.nkeys + tc.nvalues))
            tc.columns_for_indices(range(0, tc.nkeys + tc.nvalues))

        self.finished_step('table')

        for createset in (0, 1):
            # Create column groups in this set
            # e.g. self.session.create("colgroup:t0:g1", "columns=(c3,c4)")
            for tc in tabconfigs:
                self.current_table = tc.tableidx
                for cg in tc.cglist:
                    if cg.createset == createset:
                        self.create('colgroup', tc.tablename, cg.cgname, cg.columns)

            self.finished_step('colgroup' + str(createset))

            # Create indices in this set
            # e.g. self.session.create("index:t0:i1", "columns=(c3,c4)")
            for tc in tabconfigs:
                self.current_table = tc.tableidx
                for idx in tc.idxlist:
                    if idx.createset == createset:
                        self.create('index', tc.tablename, idx.idxname, idx.columns, self.s_index_args)

            self.finished_step('index' + str(createset))

        # populate first batch
        for tc in tabconfigs:
            self.current_table = tc.tableidx
            max = rand.rand_range(0, self.nentries)
            self.populate(tc, xrange(0, max))

        self.finished_step('populate0')

#TODO
        # Create indices in third set
#        for tc in tabconfigs:
#            for idx in tc.idxlist:
#                if idx.createset == 2:
#                    self.create('index', tc.tablename, idx.idxname, idx.columns)

        self.finished_step('index2')

        # populate second batch
        for tc in tabconfigs:
            self.current_table = tc.tableidx
            self.populate(tc, xrange(tc.nentries, self.nentries))

        self.finished_step('populate1')

        for tc in tabconfigs:
            self.current_table = tc.tableidx
            self.check_entries(tc)
Exemplo n.º 18
0
    def test_schema(self):
        rand = suite_random.suite_random()
        if self.SHOW_PYTHON:
            print('  ################################################')
            print('  # Running scenario ' + str(self.scenario_number))

        ntables = self.s_ntable

        # Report known limitations in the test,
        # we'll work around these later, in a loop where we don't want to print.
        self.KNOWN_LIMITATION(
            'Column groups created after indices confuses things')

        # Column groups are created in two different times.
        # We call these two batches 'createsets'.
        # So we don't have the exactly the same number of column groups
        # for each table, for tests that indicate >1 colgroup, we
        # increase the number of column groups for each table
        tabconfigs = []
        for i in range(0, ntables):
            self.current_table = i
            tc = tabconfig()
            tc.tablename = 't' + str(i)
            tc.tableidx = i
            tabconfigs.append(tc)

            for createset in range(0, 2):
                ncg = self.s_colgroup[createset]
                if ncg > 1:
                    ncg += i
                for k in range(0, ncg):
                    thiscg = cgconfig()
                    thiscg.createset = createset

                    # KNOWN LIMITATION: Column groups created after
                    # indices confuses things.  So for now, put all
                    # column group creation in the first set.
                    # Remove this statement when the limitation is fixed.
                    thiscg.createset = 0
                    # END KNOWN LIMITATION

                    thiscg.cgname = 'g' + str(len(tc.cglist))
                    tc.cglist.append(thiscg)

            # The same idea for indices, except that we create them in
            # three sets
            for createset in range(0, 3):
                nindex = self.s_index[createset]
                if nindex > 1:
                    nindex += i
                for k in range(0, nindex):
                    thisidx = idxconfig()
                    thisidx.createset = createset
                    thisidx.idxname = 'i' + str(len(tc.idxlist))
                    thisidx.tab = tc
                    tc.idxlist.append(thisidx)

            # We'll base the number of key/value columns
            # loosely on the number of column groups and indices.

            colgroups = len(tc.cglist)
            indices = len(tc.idxlist)
            nall = colgroups * 2 + indices
            k = rand.rand_range(1, nall)
            v = rand.rand_range(0, nall)
            # we need at least one value per column group
            if v < colgroups:
                v = colgroups
            tc.nkeys = k
            tc.nvalues = v
            tc.keyformats = self.gen_formats(rand, tc.nkeys, True)
            tc.valueformats = self.gen_formats(rand, tc.nvalues, False)

            # Simple naming (we'll test odd naming elsewhere):
            #  tables named 't0' --> 't<N>'
            #  within each table:
            #     columns named 'c0' --> 'c<N>'
            #     colgroups named 'g0' --> 'g<N>'
            #     indices named 'i0' --> 'i<N>'

            config = ""
            config += "key_format=" + tc.keyformats
            config += ",value_format=" + tc.valueformats
            config += ",columns=("
            for j in range(0, tc.nkeys + tc.nvalues):
                if j != 0:
                    config += ","
                config += "c" + str(j)
            config += "),colgroups=("
            for j in range(0, len(tc.cglist)):
                if j != 0:
                    config += ","
                config += "g" + str(j)
            config += ")"
            config += self.s_extra_table_args
            # indices are not declared here
            self.show_python("self.session.create('table:" + tc.tablename +
                             "', '" + config + "')")
            self.session.create("table:" + tc.tablename, config)

            tc.columns_for_groups(list(range(tc.nkeys, tc.nkeys + tc.nvalues)))
            tc.columns_for_indices(list(range(0, tc.nkeys + tc.nvalues)))

        self.finished_step('table')

        for createset in (0, 1):
            # Create column groups in this set
            # e.g. self.session.create("colgroup:t0:g1", "columns=(c3,c4)")
            for tc in tabconfigs:
                self.current_table = tc.tableidx
                for cg in tc.cglist:
                    if cg.createset == createset:
                        self.create('colgroup', tc.tablename, cg.cgname,
                                    cg.columns)

            self.finished_step('colgroup' + str(createset))

            # Create indices in this set
            # e.g. self.session.create("index:t0:i1", "columns=(c3,c4)")
            for tc in tabconfigs:
                self.current_table = tc.tableidx
                for idx in tc.idxlist:
                    if idx.createset == createset:
                        self.create('index', tc.tablename, idx.idxname,
                                    idx.columns, self.s_index_args)

            self.finished_step('index' + str(createset))

        # populate first batch
        for tc in tabconfigs:
            self.current_table = tc.tableidx
            max = rand.rand_range(0, self.nentries)
            self.populate(tc, list(range(0, max)))

        self.finished_step('populate0')

        # Create indices in third set
        for tc in tabconfigs:
            for idx in tc.idxlist:
                if idx.createset == 2:
                    self.create('index', tc.tablename, idx.idxname,
                                idx.columns)

        self.finished_step('index2')

        # populate second batch
        for tc in tabconfigs:
            self.current_table = tc.tableidx
            self.populate(tc, list(range(tc.nentries, self.nentries)))

        self.finished_step('populate1')

        for tc in tabconfigs:
            self.current_table = tc.tableidx
            self.check_entries(tc)
Exemplo n.º 19
0
    def test_cursor_sweep(self):
        rand = suite_random()

        uri_map = self.create_uri_map(self.uri)
        self.cursor_stats_init()
        begin_stats = self.caching_stats()
        begin_sweep_stats = self.sweep_stats()
        #self.tty('stats before = ' + str(begin_stats))
        #self.tty('sweep stats before = ' + str(begin_sweep_stats))
        potential_dead = 0

        for round_cnt in range(0, self.rounds):
            if round_cnt % 2 == 1:
                # Close cursors in half of the range, and don't
                # use them during this round, so they will be
                # closed by sweep.
                half = self.nuris // 2
                potential_dead += self.close_uris(uri_map, list(range(0, half)))
                bottom_range = half
                # Let the dhandle sweep run and find the closed cursors.
                time.sleep(3.0)
            else:
                bottom_range = 0

            # The session cursor sweep runs at most once a second and
            # traverses a fraction of the cached cursors.  We'll run for
            # ten seconds with pauses to make sure we see sweep activity.
            pause_point = self.opens_per_round // 100
            if pause_point == 0:
                pause_point = 1
            pause_duration = 0.1

            i = 0
            while self.opencount < (1 + round_cnt) * self.opens_per_round:
                i += 1
                if i % pause_point == 0:
                    time.sleep(pause_duration)   # over time, let sweep run
                self.open_or_close(uri_map, rand, bottom_range, self.nuris)

        end_stats = self.caching_stats()
        end_sweep_stats = self.sweep_stats()

        #self.tty('opens = ' + str(self.opencount) + \
        #         ', closes = ' + str(self.closecount))
        #self.tty('stats after = ' + str(end_stats))
        #self.tty('sweep stats after = ' + str(end_sweep_stats))
        self.assertEquals(end_stats[0] - begin_stats[0], self.closecount)
        swept = end_sweep_stats[3] - begin_sweep_stats[3]

        # Although this is subject to tuning parameters, we know that
        # in an active session, we'll sweep through minimum of 1% of
        # the cached cursors per second.  We've set this test to run
        # 5 rounds. In 2 of the 5 rounds (sandwiched between the others),
        # some of the uris are allowed to close. So during the 'closing rounds'
        # we'll sweep a minimum of 20% of the uri space, and in the other
        # rounds we'll be referencing the closed uris again.

        # We'll pass the test if we see at least 20% of the 'potentially
        # dead' cursors swept.  There may be more, since the 1% per second
        # is a minimum.
        min_swept = 2 * potential_dead // 10
        self.assertGreaterEqual(swept, min_swept)

        # No strict equality test for the reopen stats. When we've swept
        # some closed cursors, we'll have fewer reopens. It's different
        # by approximately the number of swept cursors, but it's less
        # predictable.
        self.assertGreater(end_stats[1] - begin_stats[1], 0)