Пример #1
0
    def append_range_test(self, instructions, args, num_pairs, kv_length):
        instructions.append('NEW_TRANSACTION')

        instructions.push_args(self.workspace.key())
        instructions.append('CLEAR_RANGE_STARTS_WITH')

        kvpairs = []
        for i in range(0, num_pairs * 2):
            kvpairs.append(self.workspace.pack((b'foo', bytes([random.randint(0, 254) for i in range(0, kv_length)]))))

        kvpairs = list(set(kvpairs))
        if len(kvpairs) % 2 == 1:
            kvpairs = kvpairs[:-1]
        kvpairs.sort()

        instructions.push_args(*kvpairs)
        for i in range(0, len(kvpairs) // 2):
            instructions.append('SET')
            if i % 100 == 99:
                test_util.blocking_commit(instructions)
                self.add_result(instructions, args, b'RESULT_NOT_PRESENT')

        foo_range = self.workspace.range((b'foo',))
        instructions.push_args(foo_range.start, foo_range.stop, 0, 0, -1)
        instructions.append('GET_RANGE')
        self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
        instructions.push_args(self.workspace.key(), 0, 0, -1)
        instructions.append('GET_RANGE_STARTS_WITH')
        self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
        instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, b'')
        instructions.append('GET_RANGE_SELECTOR')
        self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
        test_util.blocking_commit(instructions)
        self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
Пример #2
0
    def preload_database(self, instructions, num):
        for i in range(num):
            self.ensure_key_value(instructions)
            instructions.append('SET')

            if i % 100 == 99:
                test_util.blocking_commit(instructions)

        test_util.blocking_commit(instructions)
        self.add_stack_items(1)
def create_default_directory_subspace(instructions, path, random):
    test_util.blocking_commit(instructions)
    instructions.push_args(3)
    instructions.append('DIRECTORY_CHANGE')
    prefix = random.random_string(16)
    instructions.push_args(1, path, '', '%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
    instructions.append('DIRECTORY_CREATE_DATABASE')

    instructions.push_args(DEFAULT_DIRECTORY_INDEX)
    instructions.append('DIRECTORY_CHANGE')
Пример #4
0
    def generate(self, args, thread_number):
        instructions = InstructionSet()

        min_value = -2**self.max_int_bits + 1
        max_value = 2**self.max_int_bits - 1

        instructions.append('NEW_TRANSACTION')

        # Test integer encoding
        mutations = 0
        for i in range(0, self.max_int_bits + 1):
            for sign in [-1, 1]:
                sign_str = '' if sign == 1 else '-'
                for offset in range(-10, 11):
                    val = (2**i) * sign + offset
                    if val >= min_value and val <= max_value:
                        if offset == 0:
                            add_str = ''
                        elif offset > 0:
                            add_str = '+%d' % offset
                        else:
                            add_str = '%d' % offset

                        instructions.push_args(1, val)
                        instructions.append('TUPLE_PACK')
                        instructions.push_args(
                            self.workspace.pack(
                                ('%s2^%d%s' % (sign_str, i, add_str), )))
                        instructions.append('SET')
                        mutations += 1

            if mutations >= 5000:
                test_util.blocking_commit(instructions)
                mutations = 0

        instructions.begin_finalization()

        test_util.blocking_commit(instructions)
        instructions.push_args(self.stack_subspace.key())
        instructions.append('LOG_STACK')

        test_util.blocking_commit(instructions)

        return instructions
Пример #5
0
    def generate(self, args, thread_number):
        instructions = InstructionSet()

        op_choices = ['NEW_TRANSACTION', 'COMMIT']

        general = ['DIRECTORY_CREATE_SUBSPACE', 'DIRECTORY_CREATE_LAYER']

        op_choices += general

        directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO',
                               'DIRECTORY_REMOVE', 'DIRECTORY_REMOVE_IF_EXISTS']
        directory_reads = ['DIRECTORY_EXISTS', 'DIRECTORY_OPEN', 'DIRECTORY_LIST']

        directory_db_mutations = [x + '_DATABASE' for x in directory_mutations]
        directory_db_reads = [x + '_DATABASE' for x in directory_reads]
        directory_snapshot_reads = [x + '_SNAPSHOT' for x in directory_reads]

        directory = []
        directory += directory_mutations
        directory += directory_reads
        directory += directory_db_mutations
        directory += directory_db_reads

        if not args.no_directory_snapshot_ops:
            directory += directory_snapshot_reads

        subspace = ['DIRECTORY_PACK_KEY', 'DIRECTORY_UNPACK_KEY', 'DIRECTORY_RANGE', 'DIRECTORY_CONTAINS', 'DIRECTORY_OPEN_SUBSPACE']

        instructions.append('NEW_TRANSACTION')

        default_path = unicode('default%d' % self.next_path)
        self.next_path += 1
        self.dir_list = directory_util.setup_directories(instructions, default_path, self.random)
        self.root = self.dir_list[0]

        instructions.push_args(0)
        instructions.append('DIRECTORY_CHANGE')

        # Generate some directories that we are going to create in advance. This tests that other bindings
        # are compatible with the Python implementation
        self.prepopulated_dirs = [(generate_path(min_length=1), self.generate_layer()) for i in range(5)]

        for path, layer in self.prepopulated_dirs:
            instructions.push_args(layer)
            instructions.push_args(*test_util.with_length(path))
            instructions.append('DIRECTORY_OPEN')
            self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer=='partition'))))
            # print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \
            #       % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1))

        instructions.setup_complete()

        for i in range(args.num_ops):
            if random.random() < 0.5:
                while True:
                    self.dir_index = random.randrange(0, len(self.dir_list))
                    if not self.dir_list[self.dir_index].state.is_partition or not self.dir_list[self.dir_index].state.deleted:
                        break

                instructions.push_args(self.dir_index)
                instructions.append('DIRECTORY_CHANGE')

            dir_entry = self.dir_list[self.dir_index]

            choices = op_choices[:]
            if dir_entry.state.is_directory:
                choices += directory
            if dir_entry.state.is_subspace:
                choices += subspace

            op = random.choice(choices)

            # print('%d. Selected %s, dir=%d, dir_id=%d, has_known_prefix=%d, dir_list_len=%d' \
            #       % (len(instructions), op, self.dir_index, dir_entry.dir_id, dir_entry.state.has_known_prefix, len(self.dir_list)))

            if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'):
                root_op = op[0:-9]
            else:
                root_op = op

            if root_op == 'NEW_TRANSACTION':
                instructions.append(op)

            elif root_op == 'COMMIT':
                test_util.blocking_commit(instructions)

            elif root_op == 'DIRECTORY_CREATE_SUBSPACE':
                path = generate_path()
                instructions.push_args(generate_prefix(require_unique=False, is_partition=True))
                instructions.push_args(*test_util.with_length(path))
                instructions.append(op)
                self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True))

            elif root_op == 'DIRECTORY_CREATE_LAYER':
                indices = []
                
                prefixes = [generate_prefix(require_unique=args.concurrency==1, is_partition=True) for i in range(2)]
                for i in range(2):
                    instructions.push_args(prefixes[i])
                    instructions.push_args(*test_util.with_length(generate_path()))
                    instructions.append('DIRECTORY_CREATE_SUBSPACE')
                    indices.append(len(self.dir_list))
                    self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True))

                instructions.push_args(random.choice([0, 1]))
                instructions.push_args(*indices)
                instructions.append(op)
                self.dir_list.append(DirectoryStateTreeNode.get_layer(prefixes[0]))

            elif root_op == 'DIRECTORY_CREATE_OR_OPEN':
                # Because allocated prefixes are non-deterministic, we cannot have overlapping
                # transactions that allocate/remove these prefixes in a comparison test
                if op.endswith('_DATABASE') and args.concurrency == 1:
                    test_util.blocking_commit(instructions)

                path = generate_path()
                op_args = test_util.with_length(path) + (self.generate_layer(),)
                directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)

                if not op.endswith('_DATABASE') and args.concurrency == 1:
                    test_util.blocking_commit(instructions)

                child_entry = dir_entry.get_descendent(path)
                if child_entry is None:
                    child_entry = DirectoryStateTreeNode(True, True)

                child_entry.state.has_known_prefix = False  
                self.dir_list.append(dir_entry.add_child(path, child_entry))

            elif root_op == 'DIRECTORY_CREATE':
                layer = self.generate_layer()
                is_partition = layer == 'partition'

                prefix = generate_prefix(require_unique=is_partition and args.concurrency==1, is_partition=is_partition, min_length=0)

                # Because allocated prefixes are non-deterministic, we cannot have overlapping
                # transactions that allocate/remove these prefixes in a comparison test
                if op.endswith('_DATABASE') and args.concurrency == 1:  # and allow_empty_prefix:
                    test_util.blocking_commit(instructions)

                path = generate_path()
                op_args = test_util.with_length(path) + (layer, prefix)
                if prefix is None:
                    directory_util.push_instruction_and_record_prefix(
                        instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
                else:
                    instructions.push_args(*op_args)
                    instructions.append(op)

                if not op.endswith('_DATABASE') and args.concurrency == 1:  # and allow_empty_prefix:
                    test_util.blocking_commit(instructions)

                child_entry = dir_entry.get_descendent(path)
                if child_entry is None:
                    child_entry = DirectoryStateTreeNode(True, True, has_known_prefix=bool(prefix))
                elif not bool(prefix):
                    child_entry.state.has_known_prefix = False

                if is_partition:
                    child_entry.state.is_partition = True

                self.dir_list.append(dir_entry.add_child(path, child_entry))

            elif root_op == 'DIRECTORY_OPEN':
                path = generate_path()
                instructions.push_args(self.generate_layer())
                instructions.push_args(*test_util.with_length(path))
                instructions.append(op)

                child_entry = dir_entry.get_descendent(path)
                if child_entry is None:
                    self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False))
                else:
                    self.dir_list.append(dir_entry.add_child(path, child_entry))

            elif root_op == 'DIRECTORY_MOVE':
                old_path = generate_path()
                new_path = generate_path()
                instructions.push_args(*(test_util.with_length(old_path) + test_util.with_length(new_path)))
                instructions.append(op)

                child_entry = dir_entry.get_descendent(old_path)
                if child_entry is None:
                    self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False))
                else:
                    self.dir_list.append(dir_entry.add_child(new_path, child_entry))

                # Make sure that the default directory subspace still exists after moving the specified directory
                if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == (u'',):
                    self.ensure_default_directory_subspace(instructions, default_path)

            elif root_op == 'DIRECTORY_MOVE_TO':
                new_path = generate_path()
                instructions.push_args(*test_util.with_length(new_path))
                instructions.append(op)

                child_entry = dir_entry.get_descendent(())
                if child_entry is None:
                    self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False))
                else:
                    self.dir_list.append(dir_entry.add_child(new_path, child_entry))

                # Make sure that the default directory subspace still exists after moving the current directory
                self.ensure_default_directory_subspace(instructions, default_path)

            elif root_op == 'DIRECTORY_REMOVE' or root_op == 'DIRECTORY_REMOVE_IF_EXISTS':
                # Because allocated prefixes are non-deterministic, we cannot have overlapping
                # transactions that allocate/remove these prefixes in a comparison test
                if op.endswith('_DATABASE') and args.concurrency == 1:
                    test_util.blocking_commit(instructions)

                path = ()
                count = random.randint(0, 1)
                if count == 1:
                    path = generate_path()
                    instructions.push_args(*test_util.with_length(path))

                instructions.push_args(count)
                instructions.append(op)

                dir_entry.delete(path)

                # Make sure that the default directory subspace still exists after removing the specified directory
                if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == (u'',)):
                    self.ensure_default_directory_subspace(instructions, default_path)

            elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS':
                path = ()
                count = random.randint(0, 1)
                if count == 1:
                    path = generate_path()
                    instructions.push_args(*test_util.with_length(path))
                instructions.push_args(count)
                instructions.append(op)

            elif root_op == 'DIRECTORY_PACK_KEY':
                t = self.random.random_tuple(5)
                instructions.push_args(*test_util.with_length(t))
                instructions.append(op)
                instructions.append('DIRECTORY_STRIP_PREFIX')

            elif root_op == 'DIRECTORY_UNPACK_KEY' or root_op == 'DIRECTORY_CONTAINS':
                if not dir_entry.state.has_known_prefix or random.random() < 0.2 or root_op == 'DIRECTORY_UNPACK_KEY':
                    t = self.random.random_tuple(5)
                    instructions.push_args(*test_util.with_length(t))
                    instructions.append('DIRECTORY_PACK_KEY')
                    instructions.append(op)
                else:
                    instructions.push_args(fdb.tuple.pack(self.random.random_tuple(5)))
                    instructions.append(op)

            elif root_op == 'DIRECTORY_RANGE' or root_op == 'DIRECTORY_OPEN_SUBSPACE':
                t = self.random.random_tuple(5)
                instructions.push_args(*test_util.with_length(t))
                instructions.append(op)
                if root_op == 'DIRECTORY_OPEN_SUBSPACE':
                    self.dir_list.append(DirectoryStateTreeNode(False, True, dir_entry.state.has_known_prefix))
                else:
                    test_util.to_front(instructions, 1)
                    instructions.append('DIRECTORY_STRIP_PREFIX')
                    test_util.to_front(instructions, 1)
                    instructions.append('DIRECTORY_STRIP_PREFIX')

        instructions.begin_finalization()

        test_util.blocking_commit(instructions)

        instructions.append('NEW_TRANSACTION')

        for i, dir_entry in enumerate(self.dir_list):
            instructions.push_args(i)
            instructions.append('DIRECTORY_CHANGE')
            if dir_entry.state.is_directory:
                instructions.push_args(self.directory_log.key())
                instructions.append('DIRECTORY_LOG_DIRECTORY')
            if dir_entry.state.has_known_prefix and dir_entry.state.is_subspace:
                # print('%d. Logging subspace: %d' % (i, dir_entry.dir_id))
                instructions.push_args(self.subspace_log.key())
                instructions.append('DIRECTORY_LOG_SUBSPACE')
            if (i + 1) % 100 == 0:
                test_util.blocking_commit(instructions)

        test_util.blocking_commit(instructions)

        instructions.push_args(self.stack_subspace.key())
        instructions.append('LOG_STACK')

        test_util.blocking_commit(instructions)
        return instructions
Пример #6
0
    def generate(self, args, thread_number):
        self.results = []

        test_instructions = ThreadedInstructionSet()
        main_thread = test_instructions.create_thread()

        foo = [self.workspace.pack(('foo%d' % i, )) for i in range(0, 6)]

        main_thread.append('NEW_TRANSACTION')
        main_thread.push_args(1020)
        main_thread.append('ON_ERROR')
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        main_thread.append('GET_READ_VERSION')
        main_thread.push_args(foo[1], 'bar')
        main_thread.append('SET')
        main_thread.push_args(foo[1])
        main_thread.append('GET')
        self.add_result(main_thread, args, 'bar')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.push_args(2000)
        main_thread.append('ON_ERROR')
        self.add_result(main_thread, args, test_util.error_string(2000))

        main_thread.append('NEW_TRANSACTION')
        main_thread.push_args(0)
        main_thread.append('ON_ERROR')
        self.add_result(main_thread, args, test_util.error_string(2000))

        main_thread.append('NEW_TRANSACTION')
        main_thread.push_args(foo[1])
        main_thread.append('DUP')
        main_thread.append('DUP')
        main_thread.append('GET')
        self.add_result(main_thread, args, 'bar')
        main_thread.append('CLEAR')
        main_thread.append('GET_SNAPSHOT')
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        main_thread.push_args(foo[1])
        main_thread.append('GET_DATABASE')
        self.add_result(main_thread, args, 'bar')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.append('SET_READ_VERSION')
        main_thread.push_args(foo[1])
        main_thread.append('DUP')
        main_thread.append('GET')
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        main_thread.append('CLEAR')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, test_util.error_string(1020))

        main_thread.push_args(foo[1])
        main_thread.append('GET_SNAPSHOT')
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        main_thread.push_args(foo[1])
        main_thread.append('CLEAR')
        main_thread.append('COMMIT')
        main_thread.append('WAIT_FUTURE')
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        main_thread.append('GET_COMMITTED_VERSION')
        main_thread.append('RESET')
        main_thread.append('EMPTY_STACK')

        main_thread.append('NEW_TRANSACTION')
        main_thread.push_args(1, 'bar', foo[1], foo[2], 'bar2', foo[3], 'bar3',
                              foo[4], 'bar4', foo[5], 'bar5')
        main_thread.append('SWAP')
        main_thread.append('SET')
        main_thread.append('SET')
        main_thread.append('SET')
        main_thread.append('SET')
        main_thread.append('SET_DATABASE')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.append('SET_READ_VERSION')
        main_thread.push_args(foo[2])
        main_thread.append('GET')
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.append('NEW_TRANSACTION')
        main_thread.push_args('', 0, -1, '')
        main_thread.append('GET_KEY')
        self.add_result(main_thread, args, '')

        main_thread.append('NEW_TRANSACTION')
        main_thread.append('GET_READ_VERSION_SNAPSHOT')
        main_thread.push_args('random', foo[1], foo[3], 0, 1, 1)
        main_thread.append('POP')
        main_thread.append('GET_RANGE')
        self.add_result(main_thread, args,
                        fdb.tuple.pack((foo[2], 'bar2', foo[1], 'bar')))
        main_thread.push_args(foo[1], foo[3], 1, 1, 0)
        main_thread.append('GET_RANGE_SNAPSHOT')
        self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2')))
        main_thread.push_args(foo[1], foo[3], 0, 0, 4)
        main_thread.append('GET_RANGE_DATABASE')
        self.add_result(main_thread, args,
                        fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.push_args(foo[3], foo[5])
        main_thread.append('CLEAR_RANGE')
        main_thread.push_args(foo[1], 0, 3, '')
        main_thread.append('GET_KEY')
        self.add_result(main_thread, args, foo[5])
        main_thread.push_args(foo[1], 1, 2, '')
        main_thread.append('GET_KEY_SNAPSHOT')
        self.add_result(main_thread, args, foo[5])
        main_thread.push_args(foo[5], 0, -2, '')
        main_thread.append('GET_KEY_DATABASE')
        self.add_result(main_thread, args, foo[2])
        main_thread.push_args(self.workspace.key(), 2, 0, 2)
        main_thread.append('GET_RANGE_STARTS_WITH')
        self.add_result(main_thread, args,
                        fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
        main_thread.push_args(self.workspace.key(), 4, 0, 3)
        main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT')
        self.add_result(
            main_thread, args,
            fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
        main_thread.push_args(self.workspace.key(), 3, 1, -1)
        main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
        self.add_result(
            main_thread, args,
            fdb.tuple.pack((foo[5], 'bar5', foo[4], 'bar4', foo[3], 'bar3')))
        main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, '')
        main_thread.append('GET_RANGE_SELECTOR')
        self.add_result(main_thread, args,
                        fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
        main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, '')
        main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT')
        self.add_result(
            main_thread, args,
            fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
        main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, '')
        main_thread.append('GET_RANGE_SELECTOR_DATABASE')
        self.add_result(
            main_thread, args,
            fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3')))
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.push_args(self.workspace.key())
        main_thread.append('CLEAR_RANGE_STARTS_WITH')
        main_thread.push_args(self.workspace.key(), 0, 0, -1)
        main_thread.append('GET_RANGE_STARTS_WITH')
        self.add_result(main_thread, args, '')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.append('SET_READ_VERSION')
        main_thread.push_args(foo[1])
        main_thread.append('GET')
        self.add_result(main_thread, args, 'bar')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.push_args(foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3',
                              foo[4], 'bar4', foo[5], 'bar5')
        main_thread.append('SET')
        main_thread.append('SET')
        main_thread.append('SET')
        main_thread.append('SET')
        main_thread.append('SET')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.push_args(foo[2])
        main_thread.append('CLEAR_DATABASE')
        main_thread.append('WAIT_FUTURE')
        main_thread.push_args(self.workspace.key(), 0, 0, -1)
        main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
        self.add_result(
            main_thread, args,
            fdb.tuple.pack((foo[1], 'bar', foo[3], 'bar3', foo[4], 'bar4',
                            foo[5], 'bar5')))

        main_thread.push_args(foo[3], foo[5])
        main_thread.append('CLEAR_RANGE_DATABASE')
        main_thread.append('WAIT_FUTURE')
        main_thread.push_args(self.workspace.key(), 0, 0, -1)
        main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
        self.add_result(main_thread, args,
                        fdb.tuple.pack((foo[1], 'bar', foo[5], 'bar5')))

        main_thread.push_args(self.workspace.key())
        main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE')
        main_thread.append('WAIT_FUTURE')
        main_thread.push_args(self.workspace.key(), 0, 0, -1)
        main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
        self.add_result(main_thread, args, '')

        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')

        main_thread.append('NEW_TRANSACTION')
        main_thread.push_args(foo[1], foo[5], 0, 0, 0)
        main_thread.append('GET_RANGE')
        self.add_result(main_thread, args, test_util.error_string(2210))
        main_thread.push_args(foo[1], foo[5], 0, 0, 0)
        main_thread.append('GET_RANGE_DATABASE')
        self.add_result(main_thread, args, test_util.error_string(2210))

        self.append_range_test(main_thread, args, 100, 256)
        self.append_range_test(main_thread, args, 1000, 8)

        main_thread.append('EMPTY_STACK')
        tup = (0, 'foo', -1093, u'unicode\u9348test', 0xffffffff + 100,
               'bar\x00\xff')
        main_thread.push_args(*test_util.with_length(tup))
        main_thread.append('TUPLE_PACK')
        main_thread.append('DUP')
        self.add_result(main_thread, args, fdb.tuple.pack(tup))
        main_thread.append('TUPLE_UNPACK')
        for item in reversed(tup):
            self.add_result(main_thread, args, fdb.tuple.pack((item, )))

        main_thread.push_args(0xffffffff, -100)
        main_thread.append('SUB')
        main_thread.push_args(1)
        main_thread.append('TUPLE_PACK')
        self.add_result(main_thread, args, fdb.tuple.pack(
            (0xffffffff + 100, )))

        main_thread.append('EMPTY_STACK')
        main_thread.push_args(*test_util.with_length(tup))
        main_thread.append('TUPLE_RANGE')
        rng = fdb.tuple.range(tup)
        self.add_result(main_thread, args, rng.stop)
        self.add_result(main_thread, args, rng.start)

        stampKey = 'stampedXXXXXXXXXXsuffix'
        stampKeyIndex = stampKey.find('XXXXXXXXXX')
        main_thread.push_args(u'SET_VERSIONSTAMPED_KEY',
                              self.versionstamp_key(stampKey, stampKeyIndex),
                              'stampedBar')
        main_thread.append('ATOMIC_OP')
        main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue',
                              self.versionstamp_value('XXXXXXXXXX'))
        main_thread.append('ATOMIC_OP')

        if self.api_version >= 520:
            stampValue = 'stampedXXXXXXXXXXsuffix'
            stampValueIndex = stampValue.find('XXXXXXXXXX')
            main_thread.push_args(
                u'SET_VERSIONSTAMPED_VALUE', 'stampedValue2',
                self.versionstamp_value(stampValue, stampValueIndex))
            main_thread.append('ATOMIC_OP')

        main_thread.push_args('suffix')
        main_thread.append('GET_VERSIONSTAMP')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        main_thread.push_args('stamped')
        main_thread.append('CONCAT')
        main_thread.append('CONCAT')
        main_thread.append('GET')
        self.add_result(main_thread, args, 'stampedBar')

        main_thread.push_args('stampedValue', 'suffix')
        main_thread.append('GET')
        main_thread.push_args('stamped')
        main_thread.append('CONCAT')
        main_thread.append('CONCAT')
        main_thread.append('GET')
        self.add_result(main_thread, args, 'stampedBar')

        if self.api_version >= 520:
            main_thread.push_args('stampedValue2')
            main_thread.append('GET')
            main_thread.append('GET')
            self.add_result(main_thread, args, 'stampedBar')

        main_thread.append('GET_VERSIONSTAMP')
        test_util.blocking_commit(main_thread)
        self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
        self.add_result(main_thread, args, test_util.error_string(2021))

        main_thread.push_args('sentinel')
        main_thread.append('UNIT_TESTS')
        self.add_result(main_thread, args, 'sentinel')

        if not args.no_threads:
            wait_key = 'waitKey'
            # threads = [self.thread_subspace[i] for i in range(0, 2)]
            threads = ['thread_spec%d' % i for i in range(0, 2)]
            for thread_spec in threads:
                main_thread.push_args(
                    self.workspace.pack((wait_key, thread_spec)), '')
                main_thread.append('SET_DATABASE')
                main_thread.append('WAIT_FUTURE')

            for thread_spec in threads:
                main_thread.push_args(thread_spec)
                # if len(main_thread) < args.num_ops:
                main_thread.append('START_THREAD')
                thread = test_instructions.create_thread(
                    fdb.Subspace((thread_spec, )))
                thread.append('NEW_TRANSACTION')
                thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec,
                                 self.workspace.pack((wait_key, thread_spec)),
                                 self.workspace.pack((wait_key, thread_spec)))
                thread.append('GET')
                thread.append('POP')
                thread.append('SET')
                thread.append('CLEAR')
                test_util.blocking_commit(thread)
                thread.append('POP')
                thread.append('CLEAR_DATABASE')
                thread.push_args(self.workspace.pack((wait_key, )))
                thread.append('WAIT_EMPTY')

                thread.append('NEW_TRANSACTION')
                thread.push_args(foo[1])
                thread.append('GET')
                self.add_result(thread, args, 'barthread_spec0',
                                'barthread_spec1')

        main_thread.append('EMPTY_STACK')
        # if len(main_thread) > args.num_ops:
        #     main_thread[args.num_ops:] = []

        return test_instructions
Пример #7
0
    def generate(self, args, thread_number):
        instructions = InstructionSet()

        op_choices = ['NEW_TRANSACTION', 'COMMIT']

        reads = [
            'GET', 'GET_KEY', 'GET_RANGE', 'GET_RANGE_STARTS_WITH',
            'GET_RANGE_SELECTOR'
        ]
        mutations = [
            'SET', 'CLEAR', 'CLEAR_RANGE', 'CLEAR_RANGE_STARTS_WITH',
            'ATOMIC_OP'
        ]
        snapshot_reads = [x + '_SNAPSHOT' for x in reads]
        database_reads = [x + '_DATABASE' for x in reads]
        database_mutations = [x + '_DATABASE' for x in mutations]
        mutations += ['VERSIONSTAMP']
        versions = [
            'GET_READ_VERSION', 'SET_READ_VERSION', 'GET_COMMITTED_VERSION'
        ]
        snapshot_versions = ['GET_READ_VERSION_SNAPSHOT']
        tuples = [
            'TUPLE_PACK', 'TUPLE_UNPACK', 'TUPLE_RANGE', 'TUPLE_SORT', 'SUB',
            'ENCODE_FLOAT', 'ENCODE_DOUBLE', 'DECODE_DOUBLE', 'DECODE_FLOAT'
        ]
        if 'versionstamp' in args.types:
            tuples.append('TUPLE_PACK_WITH_VERSIONSTAMP')
        resets = ['ON_ERROR', 'RESET', 'CANCEL']
        read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY']
        write_conflicts = [
            'WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY',
            'DISABLE_WRITE_CONFLICT'
        ]

        op_choices += reads
        op_choices += mutations
        op_choices += snapshot_reads
        op_choices += database_reads
        op_choices += database_mutations
        op_choices += versions
        op_choices += snapshot_versions
        op_choices += tuples
        op_choices += read_conflicts
        op_choices += write_conflicts
        op_choices += resets

        idempotent_atomic_ops = [
            u'BIT_AND', u'BIT_OR', u'MAX', u'MIN', u'BYTE_MIN', u'BYTE_MAX'
        ]
        atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR']

        if args.concurrency > 1:
            self.max_keys = random.randint(100, 1000)
        else:
            self.max_keys = random.randint(100, 10000)

        instructions.append('NEW_TRANSACTION')
        instructions.append('GET_READ_VERSION')

        self.preload_database(instructions, self.max_keys)

        instructions.setup_complete()

        for i in range(args.num_ops):
            op = random.choice(op_choices)
            index = len(instructions)
            read_performed = False

            # print 'Adding instruction %s at %d' % (op, index)

            if args.concurrency == 1 and (op in database_mutations):
                self.wait_for_reads(instructions)
                test_util.blocking_commit(instructions)
                self.can_get_commit_version = False
                self.add_stack_items(1)

            if op in resets or op == 'NEW_TRANSACTION':
                if args.concurrency == 1:
                    self.wait_for_reads(instructions)

                self.outstanding_ops = []

            if op == 'NEW_TRANSACTION':
                instructions.append(op)
                self.can_get_commit_version = True
                self.can_set_version = True
                self.can_use_key_selectors = True

            elif op == 'ON_ERROR':
                instructions.push_args(random.randint(0, 5000))
                instructions.append(op)

                self.outstanding_ops.append(
                    (self.stack_size, len(instructions) - 1))
                if args.concurrency == 1:
                    self.wait_for_reads(instructions)

                instructions.append('NEW_TRANSACTION')
                self.can_get_commit_version = True
                self.can_set_version = True
                self.can_use_key_selectors = True
                self.add_strings(1)

            elif op == 'GET' or op == 'GET_SNAPSHOT' or op == 'GET_DATABASE':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                self.add_strings(1)
                self.can_set_version = False
                read_performed = True

            elif op == 'GET_KEY' or op == 'GET_KEY_SNAPSHOT' or op == 'GET_KEY_DATABASE':
                if op.endswith('_DATABASE') or self.can_use_key_selectors:
                    self.ensure_key(instructions, 1)
                    instructions.push_args(self.workspace.key())
                    instructions.push_args(
                        *self.random.random_selector_params())
                    test_util.to_front(instructions, 3)
                    instructions.append(op)

                    # Don't add key here because we may be outside of our prefix
                    self.add_strings(1)
                    self.can_set_version = False
                    read_performed = True

            elif op == 'GET_RANGE' or op == 'GET_RANGE_SNAPSHOT' or op == 'GET_RANGE_DATABASE':
                self.ensure_key(instructions, 2)
                range_params = self.random.random_range_params()
                instructions.push_args(*range_params)
                test_util.to_front(instructions, 4)
                test_util.to_front(instructions, 4)
                instructions.append(op)

                if range_params[0] >= 1 and range_params[
                        0] <= 1000:  # avoid adding a string if the limit is large
                    self.add_strings(1)
                else:
                    self.add_stack_items(1)

                self.can_set_version = False
                read_performed = True

            elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE':
                # TODO: not tested well
                self.ensure_key(instructions, 1)
                range_params = self.random.random_range_params()
                instructions.push_args(*range_params)
                test_util.to_front(instructions, 3)
                instructions.append(op)

                if range_params[0] >= 1 and range_params[
                        0] <= 1000:  # avoid adding a string if the limit is large
                    self.add_strings(1)
                else:
                    self.add_stack_items(1)

                self.can_set_version = False
                read_performed = True

            elif op == 'GET_RANGE_SELECTOR' or op == 'GET_RANGE_SELECTOR_SNAPSHOT' or op == 'GET_RANGE_SELECTOR_DATABASE':
                if op.endswith('_DATABASE') or self.can_use_key_selectors:
                    self.ensure_key(instructions, 2)
                    instructions.push_args(self.workspace.key())
                    range_params = self.random.random_range_params()
                    instructions.push_args(*range_params)
                    instructions.push_args(
                        *self.random.random_selector_params())
                    test_util.to_front(instructions, 6)
                    instructions.push_args(
                        *self.random.random_selector_params())
                    test_util.to_front(instructions, 9)
                    instructions.append(op)

                    if range_params[0] >= 1 and range_params[
                            0] <= 1000:  # avoid adding a string if the limit is large
                        self.add_strings(1)
                    else:
                        self.add_stack_items(1)

                    self.can_set_version = False
                    read_performed = True

            elif op == 'GET_READ_VERSION' or op == 'GET_READ_VERSION_SNAPSHOT':
                instructions.append(op)
                self.has_version = self.can_set_version
                self.add_strings(1)

            elif op == 'SET' or op == 'SET_DATABASE':
                self.ensure_key_value(instructions)
                instructions.append(op)
                if op == 'SET_DATABASE':
                    self.add_stack_items(1)

            elif op == 'SET_READ_VERSION':
                if self.has_version and self.can_set_version:
                    instructions.append(op)
                    self.can_set_version = False

            elif op == 'CLEAR' or op == 'CLEAR_DATABASE':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                if op == 'CLEAR_DATABASE':
                    self.add_stack_items(1)

            elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE':
                # Protect against inverted range
                key1 = self.workspace.pack(self.random.random_tuple(5))
                key2 = self.workspace.pack(self.random.random_tuple(5))

                if key1 > key2:
                    key1, key2 = key2, key1

                instructions.push_args(key1, key2)

                instructions.append(op)
                if op == 'CLEAR_RANGE_DATABASE':
                    self.add_stack_items(1)

            elif op == 'CLEAR_RANGE_STARTS_WITH' or op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
                    self.add_stack_items(1)

            elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE':
                self.ensure_key_value(instructions)
                if op == 'ATOMIC_OP' or args.concurrency > 1:
                    instructions.push_args(random.choice(atomic_ops))
                else:
                    instructions.push_args(
                        random.choice(idempotent_atomic_ops))

                instructions.append(op)
                if op == 'ATOMIC_OP_DATABASE':
                    self.add_stack_items(1)

            elif op == 'VERSIONSTAMP':
                rand_str1 = self.random.random_string(100)
                key1 = self.versionstamped_values.pack((rand_str1, ))

                split = random.randint(0, 70)
                rand_str2 = self.random.random_string(
                    20 + split
                ) + fdb.tuple.Versionstamp._UNSET_TR_VERSION + self.random.random_string(
                    70 - split)
                key2 = self.versionstamped_keys.pack() + rand_str2
                index = key2.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION)
                key2 += chr(index % 256) + chr(index / 256)

                instructions.push_args(
                    u'SET_VERSIONSTAMPED_VALUE', key1,
                    fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2)
                instructions.append('ATOMIC_OP')

                instructions.push_args(u'SET_VERSIONSTAMPED_KEY', key2,
                                       rand_str1)
                instructions.append('ATOMIC_OP')
                self.can_use_key_selectors = False

            elif op == 'READ_CONFLICT_RANGE' or op == 'WRITE_CONFLICT_RANGE':
                self.ensure_key(instructions, 2)
                instructions.append(op)
                self.add_strings(1)

            elif op == 'READ_CONFLICT_KEY' or op == 'WRITE_CONFLICT_KEY':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                self.add_strings(1)

            elif op == 'DISABLE_WRITE_CONFLICT':
                instructions.append(op)

            elif op == 'COMMIT':
                if args.concurrency == 1 or i < self.max_keys or random.random(
                ) < 0.9:
                    if args.concurrency == 1:
                        self.wait_for_reads(instructions)
                    test_util.blocking_commit(instructions)
                    self.can_get_commit_version = False
                    self.add_stack_items(1)
                    self.can_set_version = True
                    self.can_use_key_selectors = True
                else:
                    instructions.append(op)
                    self.add_strings(1)

            elif op == 'RESET':
                instructions.append(op)
                self.can_get_commit_version = False
                self.can_set_version = True
                self.can_use_key_selectors = True

            elif op == 'CANCEL':
                instructions.append(op)
                self.can_set_version = False

            elif op == 'GET_COMMITTED_VERSION':
                if self.can_get_commit_version:
                    do_commit = random.random() < 0.5

                    if do_commit:
                        instructions.append('COMMIT')
                        instructions.append('WAIT_FUTURE')
                        self.add_stack_items(1)

                    instructions.append(op)

                    self.has_version = True
                    self.add_strings(1)

                    if do_commit:
                        instructions.append('RESET')
                        self.can_get_commit_version = False
                        self.can_set_version = True
                        self.can_use_key_selectors = True

            elif op == 'TUPLE_PACK' or op == 'TUPLE_RANGE':
                tup = self.random.random_tuple(10)
                instructions.push_args(len(tup), *tup)
                instructions.append(op)
                if op == 'TUPLE_PACK':
                    self.add_strings(1)
                else:
                    self.add_strings(2)

            elif op == 'TUPLE_PACK_WITH_VERSIONSTAMP':
                tup = (self.random.random_string(20),
                       ) + self.random.random_tuple(
                           10, incomplete_versionstamps=True)
                instructions.push_args(self.versionstamped_keys.pack(),
                                       len(tup), *tup)
                instructions.append(op)
                self.add_strings(1)

                version_key = self.versionstamped_keys.pack(tup)
                first_incomplete = version_key.find(
                    fdb.tuple.Versionstamp._UNSET_TR_VERSION)
                second_incomplete = -1 if first_incomplete < 0 else \
                    version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1)

                # If there is exactly one incomplete versionstamp, perform the versionstamped key operation.
                if first_incomplete >= 0 and second_incomplete < 0:
                    rand_str = self.random.random_string(100)

                    instructions.push_args(rand_str)
                    test_util.to_front(instructions, 1)
                    instructions.push_args(u'SET_VERSIONSTAMPED_KEY')
                    instructions.append('ATOMIC_OP')

                    version_value_key = self.versionstamped_values.pack(
                        (rand_str, ))
                    instructions.push_args(
                        u'SET_VERSIONSTAMPED_VALUE', version_value_key,
                        fdb.tuple.Versionstamp._UNSET_TR_VERSION +
                        fdb.tuple.pack(tup))
                    instructions.append('ATOMIC_OP')
                    self.can_use_key_selectors = False

            elif op == 'TUPLE_UNPACK':
                tup = self.random.random_tuple(10)
                instructions.push_args(len(tup), *tup)
                instructions.append('TUPLE_PACK')
                instructions.append(op)
                self.add_strings(len(tup))

            elif op == 'TUPLE_SORT':
                tups = self.random.random_tuple_list(10, 30)
                for tup in tups:
                    instructions.push_args(len(tup), *tup)
                    instructions.append('TUPLE_PACK')
                instructions.push_args(len(tups))
                instructions.append(op)
                self.add_strings(len(tups))

            # Use SUB to test if integers are correctly unpacked
            elif op == 'SUB':
                a = self.random.random_int() / 2
                b = self.random.random_int() / 2
                instructions.push_args(0, a, b)
                instructions.append(op)
                instructions.push_args(1)
                instructions.append('SWAP')
                instructions.append(op)
                instructions.push_args(1)
                instructions.append('TUPLE_PACK')
                self.add_stack_items(1)

            elif op == 'ENCODE_FLOAT':
                f = self.random.random_float(8)
                f_bytes = struct.pack('>f', f)
                instructions.push_args(f_bytes)
                instructions.append(op)
                self.add_stack_items(1)

            elif op == 'ENCODE_DOUBLE':
                d = self.random.random_float(11)
                d_bytes = struct.pack('>d', d)
                instructions.push_args(d_bytes)
                instructions.append(op)
                self.add_stack_items(1)

            elif op == 'DECODE_FLOAT':
                f = self.random.random_float(8)
                instructions.push_args(fdb.tuple.SingleFloat(f))
                instructions.append(op)
                self.add_strings(1)

            elif op == 'DECODE_DOUBLE':
                d = self.random.random_float(11)
                instructions.push_args(d)
                instructions.append(op)
                self.add_strings(1)

            else:
                assert False

            if read_performed and op not in database_reads:
                self.outstanding_ops.append(
                    (self.stack_size, len(instructions) - 1))

            if args.concurrency == 1 and (op in database_reads
                                          or op in database_mutations):
                instructions.append('WAIT_FUTURE')

        instructions.begin_finalization()

        if args.concurrency == 1:
            self.wait_for_reads(instructions)
            test_util.blocking_commit(instructions)
            self.add_stack_items(1)

        instructions.append('NEW_TRANSACTION')
        instructions.push_args(self.stack_subspace.key())
        instructions.append('LOG_STACK')

        test_util.blocking_commit(instructions)

        return instructions
 def commit_transactions(self, instructions, args):
     for tr in self.transactions:
         if random.random() < 0.8 or args.api_version < 300:
             instructions.push_args(tr)
             instructions.append('USE_TRANSACTION')
             test_util.blocking_commit(instructions)
Пример #9
0
    def generate(self, args, thread_number):
        instructions = InstructionSet()

        op_choices = ['NEW_TRANSACTION', 'COMMIT']

        reads = [
            'GET', 'GET_KEY', 'GET_RANGE', 'GET_RANGE_STARTS_WITH',
            'GET_RANGE_SELECTOR'
        ]
        mutations = [
            'SET', 'CLEAR', 'CLEAR_RANGE', 'CLEAR_RANGE_STARTS_WITH',
            'ATOMIC_OP'
        ]
        snapshot_reads = [x + '_SNAPSHOT' for x in reads]
        database_reads = [x + '_DATABASE' for x in reads]
        database_mutations = [x + '_DATABASE' for x in mutations]
        mutations += ['VERSIONSTAMP']
        versions = [
            'GET_READ_VERSION', 'SET_READ_VERSION', 'GET_COMMITTED_VERSION'
        ]
        snapshot_versions = ['GET_READ_VERSION_SNAPSHOT']
        tuples = [
            'TUPLE_PACK', 'TUPLE_UNPACK', 'TUPLE_RANGE', 'TUPLE_SORT', 'SUB',
            'ENCODE_FLOAT', 'ENCODE_DOUBLE', 'DECODE_DOUBLE', 'DECODE_FLOAT'
        ]
        if 'versionstamp' in args.types:
            tuples.append('TUPLE_PACK_WITH_VERSIONSTAMP')
        resets = ['ON_ERROR', 'RESET', 'CANCEL']
        read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY']
        write_conflicts = [
            'WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY',
            'DISABLE_WRITE_CONFLICT'
        ]
        txn_sizes = ['GET_APPROXIMATE_SIZE']
        storage_metrics = [
            'GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS'
        ]
        tenants = [
            'TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE',
            'TENANT_CLEAR_ACTIVE', 'TENANT_LIST'
        ]

        op_choices += reads
        op_choices += mutations
        op_choices += snapshot_reads
        op_choices += database_reads
        op_choices += database_mutations
        op_choices += versions
        op_choices += snapshot_versions
        op_choices += tuples
        op_choices += read_conflicts
        op_choices += write_conflicts
        op_choices += resets
        op_choices += txn_sizes
        op_choices += storage_metrics

        if not args.no_tenants:
            op_choices += tenants

        idempotent_atomic_ops = [
            'BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX'
        ]
        atomic_ops = idempotent_atomic_ops + [
            'ADD', 'BIT_XOR', 'APPEND_IF_FITS'
        ]

        if args.concurrency > 1:
            self.max_keys = random.randint(100, 1000)
        else:
            self.max_keys = random.randint(100, 10000)

        instructions.append('NEW_TRANSACTION')
        instructions.append('GET_READ_VERSION')

        self.preload_database(instructions, self.max_keys)

        instructions.setup_complete()

        for i in range(args.num_ops):
            op = random.choice(op_choices)
            index = len(instructions)
            read_performed = False

            # print 'Adding instruction %s at %d' % (op, index)

            if args.concurrency == 1 and (
                    op in database_mutations
                    or op in ['TENANT_CREATE', 'TENANT_DELETE']):
                self.wait_for_reads(instructions)
                test_util.blocking_commit(instructions)
                self.can_get_commit_version = False
                self.add_stack_items(1)

            if op in resets or op == 'NEW_TRANSACTION':
                if args.concurrency == 1:
                    self.wait_for_reads(instructions)

                self.outstanding_ops = []

            if op == 'NEW_TRANSACTION':
                instructions.append(op)
                self.can_get_commit_version = True
                self.can_set_version = True
                self.can_use_key_selectors = True

            elif op == 'ON_ERROR':
                instructions.push_args(random.randint(0, 5000))
                instructions.append(op)

                self.outstanding_ops.append(
                    (self.stack_size, len(instructions) - 1))
                if args.concurrency == 1:
                    self.wait_for_reads(instructions)

                instructions.append('NEW_TRANSACTION')
                self.can_get_commit_version = True
                self.can_set_version = True
                self.can_use_key_selectors = True
                self.add_strings(1)

            elif op == 'GET' or op == 'GET_SNAPSHOT' or op == 'GET_DATABASE':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                self.add_strings(1)
                self.can_set_version = False
                read_performed = True

            elif op == 'GET_KEY' or op == 'GET_KEY_SNAPSHOT' or op == 'GET_KEY_DATABASE':
                if op.endswith('_DATABASE') or self.can_use_key_selectors:
                    self.ensure_key(instructions, 1)
                    instructions.push_args(self.workspace.key())
                    instructions.push_args(
                        *self.random.random_selector_params())
                    test_util.to_front(instructions, 3)
                    instructions.append(op)

                    # Don't add key here because we may be outside of our prefix
                    self.add_strings(1)
                    self.can_set_version = False
                    read_performed = True

            elif op == 'GET_RANGE' or op == 'GET_RANGE_SNAPSHOT' or op == 'GET_RANGE_DATABASE':
                self.ensure_key(instructions, 2)
                range_params = self.random.random_range_params()
                instructions.push_args(*range_params)
                test_util.to_front(instructions, 4)
                test_util.to_front(instructions, 4)
                instructions.append(op)

                if range_params[0] >= 1 and range_params[
                        0] <= 1000:  # avoid adding a string if the limit is large
                    self.add_strings(1)
                else:
                    self.add_stack_items(1)

                self.can_set_version = False
                read_performed = True

            elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE':
                # TODO: not tested well
                self.ensure_key(instructions, 1)
                range_params = self.random.random_range_params()
                instructions.push_args(*range_params)
                test_util.to_front(instructions, 3)
                instructions.append(op)

                if range_params[0] >= 1 and range_params[
                        0] <= 1000:  # avoid adding a string if the limit is large
                    self.add_strings(1)
                else:
                    self.add_stack_items(1)

                self.can_set_version = False
                read_performed = True

            elif op == 'GET_RANGE_SELECTOR' or op == 'GET_RANGE_SELECTOR_SNAPSHOT' or op == 'GET_RANGE_SELECTOR_DATABASE':
                if op.endswith('_DATABASE') or self.can_use_key_selectors:
                    self.ensure_key(instructions, 2)
                    instructions.push_args(self.workspace.key())
                    range_params = self.random.random_range_params()
                    instructions.push_args(*range_params)
                    instructions.push_args(
                        *self.random.random_selector_params())
                    test_util.to_front(instructions, 6)
                    instructions.push_args(
                        *self.random.random_selector_params())
                    test_util.to_front(instructions, 9)
                    instructions.append(op)

                    if range_params[0] >= 1 and range_params[
                            0] <= 1000:  # avoid adding a string if the limit is large
                        self.add_strings(1)
                    else:
                        self.add_stack_items(1)

                    self.can_set_version = False
                    read_performed = True

            elif op == 'GET_READ_VERSION' or op == 'GET_READ_VERSION_SNAPSHOT':
                instructions.append(op)
                self.has_version = self.can_set_version
                self.add_strings(1)

            elif op == 'SET' or op == 'SET_DATABASE':
                self.ensure_key_value(instructions)
                instructions.append(op)
                if op == 'SET_DATABASE':
                    self.add_stack_items(1)

            elif op == 'SET_READ_VERSION':
                if self.has_version and self.can_set_version:
                    instructions.append(op)
                    self.can_set_version = False

            elif op == 'CLEAR' or op == 'CLEAR_DATABASE':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                if op == 'CLEAR_DATABASE':
                    self.add_stack_items(1)

            elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE':
                # Protect against inverted range
                key1 = self.workspace.pack(self.random.random_tuple(5))
                key2 = self.workspace.pack(self.random.random_tuple(5))

                if key1 > key2:
                    key1, key2 = key2, key1

                instructions.push_args(key1, key2)

                instructions.append(op)
                if op == 'CLEAR_RANGE_DATABASE':
                    self.add_stack_items(1)

            elif op == 'CLEAR_RANGE_STARTS_WITH' or op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
                    self.add_stack_items(1)

            elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE':
                self.ensure_key_value(instructions)
                if op == 'ATOMIC_OP' or args.concurrency > 1:
                    instructions.push_args(random.choice(atomic_ops))
                else:
                    instructions.push_args(
                        random.choice(idempotent_atomic_ops))

                instructions.append(op)
                if op == 'ATOMIC_OP_DATABASE':
                    self.add_stack_items(1)

            elif op == 'VERSIONSTAMP':
                rand_str1 = self.random.random_string(100)
                key1 = self.versionstamped_values.pack((rand_str1, ))
                key2 = self.versionstamped_values_2.pack((rand_str1, ))

                split = random.randint(0, 70)
                prefix = self.random.random_string(20 + split)
                if prefix.endswith(b'\xff'):
                    # Necessary to make sure that the SET_VERSIONSTAMPED_VALUE check
                    # correctly finds where the version is supposed to fit in.
                    prefix += b'\x00'
                suffix = self.random.random_string(70 - split)
                rand_str2 = prefix + fdb.tuple.Versionstamp._UNSET_TR_VERSION + suffix
                key3 = self.versionstamped_keys.pack() + rand_str2
                index = len(self.versionstamped_keys.pack()) + len(prefix)
                key3 = self.versionstamp_key(key3, index)

                instructions.push_args(
                    'SET_VERSIONSTAMPED_VALUE', key1,
                    self.versionstamp_value(
                        fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2))
                instructions.append('ATOMIC_OP')

                if args.api_version >= 520:
                    instructions.push_args(
                        'SET_VERSIONSTAMPED_VALUE', key2,
                        self.versionstamp_value(rand_str2, len(prefix)))
                    instructions.append('ATOMIC_OP')

                instructions.push_args('SET_VERSIONSTAMPED_KEY', key3,
                                       rand_str1)
                instructions.append('ATOMIC_OP')
                self.can_use_key_selectors = False

            elif op == 'READ_CONFLICT_RANGE' or op == 'WRITE_CONFLICT_RANGE':
                self.ensure_key(instructions, 2)
                instructions.append(op)
                self.add_strings(1)

            elif op == 'READ_CONFLICT_KEY' or op == 'WRITE_CONFLICT_KEY':
                self.ensure_key(instructions, 1)
                instructions.append(op)
                self.add_strings(1)

            elif op == 'DISABLE_WRITE_CONFLICT':
                instructions.append(op)

            elif op == 'COMMIT':
                if args.concurrency == 1 or i < self.max_keys or random.random(
                ) < 0.9:
                    if args.concurrency == 1:
                        self.wait_for_reads(instructions)
                    test_util.blocking_commit(instructions)
                    self.can_get_commit_version = False
                    self.add_stack_items(1)
                    self.can_set_version = True
                    self.can_use_key_selectors = True
                else:
                    instructions.append(op)
                    self.add_strings(1)

            elif op == 'RESET':
                instructions.append(op)
                self.can_get_commit_version = False
                self.can_set_version = True
                self.can_use_key_selectors = True

            elif op == 'CANCEL':
                instructions.append(op)
                self.can_set_version = False

            elif op == 'GET_COMMITTED_VERSION':
                if self.can_get_commit_version:
                    do_commit = random.random() < 0.5

                    if do_commit:
                        instructions.append('COMMIT')
                        instructions.append('WAIT_FUTURE')
                        self.add_stack_items(1)

                    instructions.append(op)

                    self.has_version = True
                    self.add_strings(1)

                    if do_commit:
                        instructions.append('RESET')
                        self.can_get_commit_version = False
                        self.can_set_version = True
                        self.can_use_key_selectors = True

            elif op == 'GET_APPROXIMATE_SIZE':
                instructions.append(op)
                self.add_strings(1)

            elif op == 'TUPLE_PACK' or op == 'TUPLE_RANGE':
                tup = self.random.random_tuple(10)
                instructions.push_args(len(tup), *tup)
                instructions.append(op)
                if op == 'TUPLE_PACK':
                    self.add_strings(1)
                else:
                    self.add_strings(2)

            elif op == 'TUPLE_PACK_WITH_VERSIONSTAMP':
                tup = (self.random.random_string(20),
                       ) + self.random.random_tuple(
                           10, incomplete_versionstamps=True)
                prefix = self.versionstamped_keys.pack()
                instructions.push_args(prefix, len(tup), *tup)
                instructions.append(op)
                self.add_strings(1)

                versionstamp_param = prefix + fdb.tuple.pack(tup)
                first_incomplete = versionstamp_param.find(
                    fdb.tuple.Versionstamp._UNSET_TR_VERSION)
                second_incomplete = -1 if first_incomplete < 0 else \
                    versionstamp_param.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1)

                # If there is exactly one incomplete versionstamp, perform the versionstamp operation.
                if first_incomplete >= 0 and second_incomplete < 0:
                    rand_str = self.random.random_string(100)

                    instructions.push_args(rand_str)
                    test_util.to_front(instructions, 1)
                    instructions.push_args('SET_VERSIONSTAMPED_KEY')
                    instructions.append('ATOMIC_OP')

                    if self.api_version >= 520:
                        version_value_key_2 = self.versionstamped_values_2.pack(
                            (rand_str, ))
                        versionstamped_value = self.versionstamp_value(
                            fdb.tuple.pack(tup),
                            first_incomplete - len(prefix))
                        instructions.push_args('SET_VERSIONSTAMPED_VALUE',
                                               version_value_key_2,
                                               versionstamped_value)
                        instructions.append('ATOMIC_OP')

                    version_value_key = self.versionstamped_values.pack(
                        (rand_str, ))
                    instructions.push_args(
                        'SET_VERSIONSTAMPED_VALUE', version_value_key,
                        self.versionstamp_value(
                            fdb.tuple.Versionstamp._UNSET_TR_VERSION +
                            fdb.tuple.pack(tup)))
                    instructions.append('ATOMIC_OP')
                    self.can_use_key_selectors = False

            elif op == 'TUPLE_UNPACK':
                tup = self.random.random_tuple(10)
                instructions.push_args(len(tup), *tup)
                instructions.append('TUPLE_PACK')
                instructions.append(op)
                self.add_strings(len(tup))

            elif op == 'TUPLE_SORT':
                tups = self.random.random_tuple_list(10, 30)
                for tup in tups:
                    instructions.push_args(len(tup), *tup)
                    instructions.append('TUPLE_PACK')
                instructions.push_args(len(tups))
                instructions.append(op)
                self.add_strings(len(tups))

            # Use SUB to test if integers are correctly unpacked
            elif op == 'SUB':
                a = self.random.random_int() // 2
                b = self.random.random_int() // 2
                instructions.push_args(0, a, b)
                instructions.append(op)
                instructions.push_args(1)
                instructions.append('SWAP')
                instructions.append(op)
                instructions.push_args(1)
                instructions.append('TUPLE_PACK')
                self.add_stack_items(1)

            elif op == 'ENCODE_FLOAT':
                f = self.random.random_float(8)
                f_bytes = struct.pack('>f', f)
                instructions.push_args(f_bytes)
                instructions.append(op)
                self.add_stack_items(1)

            elif op == 'ENCODE_DOUBLE':
                d = self.random.random_float(11)
                d_bytes = struct.pack('>d', d)
                instructions.push_args(d_bytes)
                instructions.append(op)
                self.add_stack_items(1)

            elif op == 'DECODE_FLOAT':
                f = self.random.random_float(8)
                instructions.push_args(fdb.tuple.SingleFloat(f))
                instructions.append(op)
                self.add_strings(1)

            elif op == 'DECODE_DOUBLE':
                d = self.random.random_float(11)
                instructions.push_args(d)
                instructions.append(op)
                self.add_strings(1)
            elif op == 'GET_ESTIMATED_RANGE_SIZE':
                # Protect against inverted range and identical keys
                key1 = self.workspace.pack(self.random.random_tuple(1))
                key2 = self.workspace.pack(self.random.random_tuple(1))

                while key1 == key2:
                    key1 = self.workspace.pack(self.random.random_tuple(1))
                    key2 = self.workspace.pack(self.random.random_tuple(1))

                if key1 > key2:
                    key1, key2 = key2, key1

                instructions.push_args(key1, key2)
                instructions.append(op)
                self.add_strings(1)
            elif op == 'GET_RANGE_SPLIT_POINTS':
                # Protect against inverted range and identical keys
                key1 = self.workspace.pack(self.random.random_tuple(1))
                key2 = self.workspace.pack(self.random.random_tuple(1))

                while key1 == key2:
                    key1 = self.workspace.pack(self.random.random_tuple(1))
                    key2 = self.workspace.pack(self.random.random_tuple(1))

                if key1 > key2:
                    key1, key2 = key2, key1

                # TODO: randomize chunkSize but should not exceed 100M(shard limit)
                chunkSize = 10000000  # 10M
                instructions.push_args(key1, key2, chunkSize)
                instructions.append(op)
                self.add_strings(1)
            elif op == 'TENANT_CREATE':
                tenant_name = self.choose_tenant(0.8)
                self.allocated_tenants.add(tenant_name)
                instructions.push_args(tenant_name)
                instructions.append(op)
                self.add_strings(1)
            elif op == 'TENANT_DELETE':
                tenant_name = self.choose_tenant(0.2)
                if tenant_name in self.allocated_tenants:
                    self.allocated_tenants.remove(tenant_name)
                instructions.push_args(tenant_name)
                instructions.append(op)
                self.add_strings(1)
            elif op == 'TENANT_SET_ACTIVE':
                tenant_name = self.choose_tenant(0.8)
                instructions.push_args(tenant_name)
                instructions.append(op)
            elif op == 'TENANT_CLEAR_ACTIVE':
                instructions.append(op)
            elif op == 'TENANT_LIST':
                self.ensure_string(instructions, 2)
                instructions.push_args(self.random.random_int())
                test_util.to_front(instructions, 2)
                test_util.to_front(instructions, 2)
                instructions.append(op)
                self.add_strings(1)
            else:
                assert False, 'Unknown operation: ' + op

            if read_performed and op not in database_reads:
                self.outstanding_ops.append(
                    (self.stack_size, len(instructions) - 1))

            if args.concurrency == 1 and (
                    op in database_reads or op in database_mutations
                    or op in ['TENANT_CREATE', 'TENANT_DELETE']):
                instructions.append('WAIT_FUTURE')

        instructions.begin_finalization()

        if not args.no_tenants:
            instructions.append('TENANT_CLEAR_ACTIVE')

        if args.concurrency == 1:
            self.wait_for_reads(instructions)
            test_util.blocking_commit(instructions)
            self.add_stack_items(1)

        instructions.append('NEW_TRANSACTION')
        instructions.push_args(self.stack_subspace.key())
        instructions.append('LOG_STACK')

        test_util.blocking_commit(instructions)

        return instructions