def _generate_test(self): util.get_logger().info( 'Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' % (self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency)) random.seed(self.test_seed) if self.args.concurrency == 1: self.test.setup(self.args) test_instructions = { fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'), )): self.test.generate(self.args, 0) } else: test_instructions = {} main_thread = InstructionSet() for i in range(self.args.concurrency): # thread_spec = fdb.Subspace(('thread_spec', i)) thread_spec = b'thread_spec%d' % i main_thread.push_args(thread_spec) main_thread.append('START_THREAD') self.test.setup(self.args) test_instructions[fdb.Subspace( (thread_spec, ))] = self.test.generate(self.args, i) test_instructions[fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'), ))] = main_thread return test_instructions
def generate(self, args, thread_number): instructions = InstructionSet() instructions.append('NEW_TRANSACTION') default_path = unicode('default%d' % self.next_path) self.next_path += 1 dir_list = directory_util.setup_directories(instructions, default_path, self.random) num_dirs = len(dir_list) instructions.push_args(directory_util.DEFAULT_DIRECTORY_INDEX) instructions.append('DIRECTORY_CHANGE') instructions.setup_complete() current_op = 0 while current_op < args.num_ops: if args.concurrency > 1: self.barrier(instructions, thread_number) instructions.push_args(random.choice(self.transactions)) instructions.append('USE_TRANSACTION') if thread_number == 0 and args.concurrency > 1: num_directories = 1 else: num_directories = int( max( 1, pow(random.random(), 4) * min(self.max_directories_per_transaction, args.num_ops - current_op))) for i in range(num_directories): path = (self.random.random_unicode_str(16), ) op_args = test_util.with_length(path) + ('', None) directory_util.push_instruction_and_record_prefix( instructions, 'DIRECTORY_CREATE', op_args, path, num_dirs, self.random, self.prefix_log) num_dirs += 1 current_op += num_directories if args.concurrency > 1: self.barrier(instructions, thread_number, thread_ending=(current_op >= args.num_ops)) if thread_number == 0: self.commit_transactions(instructions, args) return instructions
def generate(self, args, thread_number): instructions = InstructionSet() min_value = -2**self.max_int_bits + 1 max_value = 2**self.max_int_bits - 1 instructions.append('NEW_TRANSACTION') # Test integer encoding mutations = 0 for i in range(0, self.max_int_bits + 1): for sign in [-1, 1]: sign_str = '' if sign == 1 else '-' for offset in range(-10, 11): val = (2**i) * sign + offset if val >= min_value and val <= max_value: if offset == 0: add_str = '' elif offset > 0: add_str = '+%d' % offset else: add_str = '%d' % offset instructions.push_args(1, val) instructions.append('TUPLE_PACK') instructions.push_args( self.workspace.pack( ('%s2^%d%s' % (sign_str, i, add_str), ))) instructions.append('SET') mutations += 1 if mutations >= 5000: test_util.blocking_commit(instructions) mutations = 0 instructions.begin_finalization() test_util.blocking_commit(instructions) instructions.push_args(self.stack_subspace.key()) instructions.append('LOG_STACK') test_util.blocking_commit(instructions) return instructions
def generate(self, args, thread_number): instructions = InstructionSet() op_choices = ['NEW_TRANSACTION', 'COMMIT'] general = ['DIRECTORY_CREATE_SUBSPACE', 'DIRECTORY_CREATE_LAYER'] op_choices += general directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO', 'DIRECTORY_REMOVE', 'DIRECTORY_REMOVE_IF_EXISTS'] directory_reads = ['DIRECTORY_EXISTS', 'DIRECTORY_OPEN', 'DIRECTORY_LIST'] directory_db_mutations = [x + '_DATABASE' for x in directory_mutations] directory_db_reads = [x + '_DATABASE' for x in directory_reads] directory_snapshot_reads = [x + '_SNAPSHOT' for x in directory_reads] directory = [] directory += directory_mutations directory += directory_reads directory += directory_db_mutations directory += directory_db_reads if not args.no_directory_snapshot_ops: directory += directory_snapshot_reads subspace = ['DIRECTORY_PACK_KEY', 'DIRECTORY_UNPACK_KEY', 'DIRECTORY_RANGE', 'DIRECTORY_CONTAINS', 'DIRECTORY_OPEN_SUBSPACE'] instructions.append('NEW_TRANSACTION') default_path = unicode('default%d' % self.next_path) self.next_path += 1 self.dir_list = directory_util.setup_directories(instructions, default_path, self.random) self.root = self.dir_list[0] instructions.push_args(0) instructions.append('DIRECTORY_CHANGE') # Generate some directories that we are going to create in advance. This tests that other bindings # are compatible with the Python implementation self.prepopulated_dirs = [(generate_path(min_length=1), self.generate_layer()) for i in range(5)] for path, layer in self.prepopulated_dirs: instructions.push_args(layer) instructions.push_args(*test_util.with_length(path)) instructions.append('DIRECTORY_OPEN') self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer=='partition')))) # print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \ # % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1)) instructions.setup_complete() for i in range(args.num_ops): if random.random() < 0.5: while True: self.dir_index = random.randrange(0, len(self.dir_list)) if not self.dir_list[self.dir_index].state.is_partition or not self.dir_list[self.dir_index].state.deleted: break instructions.push_args(self.dir_index) instructions.append('DIRECTORY_CHANGE') dir_entry = self.dir_list[self.dir_index] choices = op_choices[:] if dir_entry.state.is_directory: choices += directory if dir_entry.state.is_subspace: choices += subspace op = random.choice(choices) # print('%d. Selected %s, dir=%d, dir_id=%d, has_known_prefix=%d, dir_list_len=%d' \ # % (len(instructions), op, self.dir_index, dir_entry.dir_id, dir_entry.state.has_known_prefix, len(self.dir_list))) if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'): root_op = op[0:-9] else: root_op = op if root_op == 'NEW_TRANSACTION': instructions.append(op) elif root_op == 'COMMIT': test_util.blocking_commit(instructions) elif root_op == 'DIRECTORY_CREATE_SUBSPACE': path = generate_path() instructions.push_args(generate_prefix(require_unique=False, is_partition=True)) instructions.push_args(*test_util.with_length(path)) instructions.append(op) self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True)) elif root_op == 'DIRECTORY_CREATE_LAYER': indices = [] prefixes = [generate_prefix(require_unique=args.concurrency==1, is_partition=True) for i in range(2)] for i in range(2): instructions.push_args(prefixes[i]) instructions.push_args(*test_util.with_length(generate_path())) instructions.append('DIRECTORY_CREATE_SUBSPACE') indices.append(len(self.dir_list)) self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True)) instructions.push_args(random.choice([0, 1])) instructions.push_args(*indices) instructions.append(op) self.dir_list.append(DirectoryStateTreeNode.get_layer(prefixes[0])) elif root_op == 'DIRECTORY_CREATE_OR_OPEN': # Because allocated prefixes are non-deterministic, we cannot have overlapping # transactions that allocate/remove these prefixes in a comparison test if op.endswith('_DATABASE') and args.concurrency == 1: test_util.blocking_commit(instructions) path = generate_path() op_args = test_util.with_length(path) + (self.generate_layer(),) directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log) if not op.endswith('_DATABASE') and args.concurrency == 1: test_util.blocking_commit(instructions) child_entry = dir_entry.get_descendent(path) if child_entry is None: child_entry = DirectoryStateTreeNode(True, True) child_entry.state.has_known_prefix = False self.dir_list.append(dir_entry.add_child(path, child_entry)) elif root_op == 'DIRECTORY_CREATE': layer = self.generate_layer() is_partition = layer == 'partition' prefix = generate_prefix(require_unique=is_partition and args.concurrency==1, is_partition=is_partition, min_length=0) # Because allocated prefixes are non-deterministic, we cannot have overlapping # transactions that allocate/remove these prefixes in a comparison test if op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix: test_util.blocking_commit(instructions) path = generate_path() op_args = test_util.with_length(path) + (layer, prefix) if prefix is None: directory_util.push_instruction_and_record_prefix( instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log) else: instructions.push_args(*op_args) instructions.append(op) if not op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix: test_util.blocking_commit(instructions) child_entry = dir_entry.get_descendent(path) if child_entry is None: child_entry = DirectoryStateTreeNode(True, True, has_known_prefix=bool(prefix)) elif not bool(prefix): child_entry.state.has_known_prefix = False if is_partition: child_entry.state.is_partition = True self.dir_list.append(dir_entry.add_child(path, child_entry)) elif root_op == 'DIRECTORY_OPEN': path = generate_path() instructions.push_args(self.generate_layer()) instructions.push_args(*test_util.with_length(path)) instructions.append(op) child_entry = dir_entry.get_descendent(path) if child_entry is None: self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False)) else: self.dir_list.append(dir_entry.add_child(path, child_entry)) elif root_op == 'DIRECTORY_MOVE': old_path = generate_path() new_path = generate_path() instructions.push_args(*(test_util.with_length(old_path) + test_util.with_length(new_path))) instructions.append(op) child_entry = dir_entry.get_descendent(old_path) if child_entry is None: self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False)) else: self.dir_list.append(dir_entry.add_child(new_path, child_entry)) # Make sure that the default directory subspace still exists after moving the specified directory if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == (u'',): self.ensure_default_directory_subspace(instructions, default_path) elif root_op == 'DIRECTORY_MOVE_TO': new_path = generate_path() instructions.push_args(*test_util.with_length(new_path)) instructions.append(op) child_entry = dir_entry.get_descendent(()) if child_entry is None: self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False)) else: self.dir_list.append(dir_entry.add_child(new_path, child_entry)) # Make sure that the default directory subspace still exists after moving the current directory self.ensure_default_directory_subspace(instructions, default_path) elif root_op == 'DIRECTORY_REMOVE' or root_op == 'DIRECTORY_REMOVE_IF_EXISTS': # Because allocated prefixes are non-deterministic, we cannot have overlapping # transactions that allocate/remove these prefixes in a comparison test if op.endswith('_DATABASE') and args.concurrency == 1: test_util.blocking_commit(instructions) path = () count = random.randint(0, 1) if count == 1: path = generate_path() instructions.push_args(*test_util.with_length(path)) instructions.push_args(count) instructions.append(op) dir_entry.delete(path) # Make sure that the default directory subspace still exists after removing the specified directory if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == (u'',)): self.ensure_default_directory_subspace(instructions, default_path) elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS': path = () count = random.randint(0, 1) if count == 1: path = generate_path() instructions.push_args(*test_util.with_length(path)) instructions.push_args(count) instructions.append(op) elif root_op == 'DIRECTORY_PACK_KEY': t = self.random.random_tuple(5) instructions.push_args(*test_util.with_length(t)) instructions.append(op) instructions.append('DIRECTORY_STRIP_PREFIX') elif root_op == 'DIRECTORY_UNPACK_KEY' or root_op == 'DIRECTORY_CONTAINS': if not dir_entry.state.has_known_prefix or random.random() < 0.2 or root_op == 'DIRECTORY_UNPACK_KEY': t = self.random.random_tuple(5) instructions.push_args(*test_util.with_length(t)) instructions.append('DIRECTORY_PACK_KEY') instructions.append(op) else: instructions.push_args(fdb.tuple.pack(self.random.random_tuple(5))) instructions.append(op) elif root_op == 'DIRECTORY_RANGE' or root_op == 'DIRECTORY_OPEN_SUBSPACE': t = self.random.random_tuple(5) instructions.push_args(*test_util.with_length(t)) instructions.append(op) if root_op == 'DIRECTORY_OPEN_SUBSPACE': self.dir_list.append(DirectoryStateTreeNode(False, True, dir_entry.state.has_known_prefix)) else: test_util.to_front(instructions, 1) instructions.append('DIRECTORY_STRIP_PREFIX') test_util.to_front(instructions, 1) instructions.append('DIRECTORY_STRIP_PREFIX') instructions.begin_finalization() test_util.blocking_commit(instructions) instructions.append('NEW_TRANSACTION') for i, dir_entry in enumerate(self.dir_list): instructions.push_args(i) instructions.append('DIRECTORY_CHANGE') if dir_entry.state.is_directory: instructions.push_args(self.directory_log.key()) instructions.append('DIRECTORY_LOG_DIRECTORY') if dir_entry.state.has_known_prefix and dir_entry.state.is_subspace: # print('%d. Logging subspace: %d' % (i, dir_entry.dir_id)) instructions.push_args(self.subspace_log.key()) instructions.append('DIRECTORY_LOG_SUBSPACE') if (i + 1) % 100 == 0: test_util.blocking_commit(instructions) test_util.blocking_commit(instructions) instructions.push_args(self.stack_subspace.key()) instructions.append('LOG_STACK') test_util.blocking_commit(instructions) return instructions
def generate(self, args, thread_number): instructions = InstructionSet() op_choices = ['NEW_TRANSACTION', 'COMMIT'] reads = [ 'GET', 'GET_KEY', 'GET_RANGE', 'GET_RANGE_STARTS_WITH', 'GET_RANGE_SELECTOR' ] mutations = [ 'SET', 'CLEAR', 'CLEAR_RANGE', 'CLEAR_RANGE_STARTS_WITH', 'ATOMIC_OP' ] snapshot_reads = [x + '_SNAPSHOT' for x in reads] database_reads = [x + '_DATABASE' for x in reads] database_mutations = [x + '_DATABASE' for x in mutations] mutations += ['VERSIONSTAMP'] versions = [ 'GET_READ_VERSION', 'SET_READ_VERSION', 'GET_COMMITTED_VERSION' ] snapshot_versions = ['GET_READ_VERSION_SNAPSHOT'] tuples = [ 'TUPLE_PACK', 'TUPLE_UNPACK', 'TUPLE_RANGE', 'TUPLE_SORT', 'SUB', 'ENCODE_FLOAT', 'ENCODE_DOUBLE', 'DECODE_DOUBLE', 'DECODE_FLOAT' ] if 'versionstamp' in args.types: tuples.append('TUPLE_PACK_WITH_VERSIONSTAMP') resets = ['ON_ERROR', 'RESET', 'CANCEL'] read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY'] write_conflicts = [ 'WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT' ] op_choices += reads op_choices += mutations op_choices += snapshot_reads op_choices += database_reads op_choices += database_mutations op_choices += versions op_choices += snapshot_versions op_choices += tuples op_choices += read_conflicts op_choices += write_conflicts op_choices += resets idempotent_atomic_ops = [ u'BIT_AND', u'BIT_OR', u'MAX', u'MIN', u'BYTE_MIN', u'BYTE_MAX' ] atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR'] if args.concurrency > 1: self.max_keys = random.randint(100, 1000) else: self.max_keys = random.randint(100, 10000) instructions.append('NEW_TRANSACTION') instructions.append('GET_READ_VERSION') self.preload_database(instructions, self.max_keys) instructions.setup_complete() for i in range(args.num_ops): op = random.choice(op_choices) index = len(instructions) read_performed = False # print 'Adding instruction %s at %d' % (op, index) if args.concurrency == 1 and (op in database_mutations): self.wait_for_reads(instructions) test_util.blocking_commit(instructions) self.can_get_commit_version = False self.add_stack_items(1) if op in resets or op == 'NEW_TRANSACTION': if args.concurrency == 1: self.wait_for_reads(instructions) self.outstanding_ops = [] if op == 'NEW_TRANSACTION': instructions.append(op) self.can_get_commit_version = True self.can_set_version = True self.can_use_key_selectors = True elif op == 'ON_ERROR': instructions.push_args(random.randint(0, 5000)) instructions.append(op) self.outstanding_ops.append( (self.stack_size, len(instructions) - 1)) if args.concurrency == 1: self.wait_for_reads(instructions) instructions.append('NEW_TRANSACTION') self.can_get_commit_version = True self.can_set_version = True self.can_use_key_selectors = True self.add_strings(1) elif op == 'GET' or op == 'GET_SNAPSHOT' or op == 'GET_DATABASE': self.ensure_key(instructions, 1) instructions.append(op) self.add_strings(1) self.can_set_version = False read_performed = True elif op == 'GET_KEY' or op == 'GET_KEY_SNAPSHOT' or op == 'GET_KEY_DATABASE': if op.endswith('_DATABASE') or self.can_use_key_selectors: self.ensure_key(instructions, 1) instructions.push_args(self.workspace.key()) instructions.push_args( *self.random.random_selector_params()) test_util.to_front(instructions, 3) instructions.append(op) # Don't add key here because we may be outside of our prefix self.add_strings(1) self.can_set_version = False read_performed = True elif op == 'GET_RANGE' or op == 'GET_RANGE_SNAPSHOT' or op == 'GET_RANGE_DATABASE': self.ensure_key(instructions, 2) range_params = self.random.random_range_params() instructions.push_args(*range_params) test_util.to_front(instructions, 4) test_util.to_front(instructions, 4) instructions.append(op) if range_params[0] >= 1 and range_params[ 0] <= 1000: # avoid adding a string if the limit is large self.add_strings(1) else: self.add_stack_items(1) self.can_set_version = False read_performed = True elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE': # TODO: not tested well self.ensure_key(instructions, 1) range_params = self.random.random_range_params() instructions.push_args(*range_params) test_util.to_front(instructions, 3) instructions.append(op) if range_params[0] >= 1 and range_params[ 0] <= 1000: # avoid adding a string if the limit is large self.add_strings(1) else: self.add_stack_items(1) self.can_set_version = False read_performed = True elif op == 'GET_RANGE_SELECTOR' or op == 'GET_RANGE_SELECTOR_SNAPSHOT' or op == 'GET_RANGE_SELECTOR_DATABASE': if op.endswith('_DATABASE') or self.can_use_key_selectors: self.ensure_key(instructions, 2) instructions.push_args(self.workspace.key()) range_params = self.random.random_range_params() instructions.push_args(*range_params) instructions.push_args( *self.random.random_selector_params()) test_util.to_front(instructions, 6) instructions.push_args( *self.random.random_selector_params()) test_util.to_front(instructions, 9) instructions.append(op) if range_params[0] >= 1 and range_params[ 0] <= 1000: # avoid adding a string if the limit is large self.add_strings(1) else: self.add_stack_items(1) self.can_set_version = False read_performed = True elif op == 'GET_READ_VERSION' or op == 'GET_READ_VERSION_SNAPSHOT': instructions.append(op) self.has_version = self.can_set_version self.add_strings(1) elif op == 'SET' or op == 'SET_DATABASE': self.ensure_key_value(instructions) instructions.append(op) if op == 'SET_DATABASE': self.add_stack_items(1) elif op == 'SET_READ_VERSION': if self.has_version and self.can_set_version: instructions.append(op) self.can_set_version = False elif op == 'CLEAR' or op == 'CLEAR_DATABASE': self.ensure_key(instructions, 1) instructions.append(op) if op == 'CLEAR_DATABASE': self.add_stack_items(1) elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE': # Protect against inverted range key1 = self.workspace.pack(self.random.random_tuple(5)) key2 = self.workspace.pack(self.random.random_tuple(5)) if key1 > key2: key1, key2 = key2, key1 instructions.push_args(key1, key2) instructions.append(op) if op == 'CLEAR_RANGE_DATABASE': self.add_stack_items(1) elif op == 'CLEAR_RANGE_STARTS_WITH' or op == 'CLEAR_RANGE_STARTS_WITH_DATABASE': self.ensure_key(instructions, 1) instructions.append(op) if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE': self.add_stack_items(1) elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE': self.ensure_key_value(instructions) if op == 'ATOMIC_OP' or args.concurrency > 1: instructions.push_args(random.choice(atomic_ops)) else: instructions.push_args( random.choice(idempotent_atomic_ops)) instructions.append(op) if op == 'ATOMIC_OP_DATABASE': self.add_stack_items(1) elif op == 'VERSIONSTAMP': rand_str1 = self.random.random_string(100) key1 = self.versionstamped_values.pack((rand_str1, )) split = random.randint(0, 70) rand_str2 = self.random.random_string( 20 + split ) + fdb.tuple.Versionstamp._UNSET_TR_VERSION + self.random.random_string( 70 - split) key2 = self.versionstamped_keys.pack() + rand_str2 index = key2.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION) key2 += chr(index % 256) + chr(index / 256) instructions.push_args( u'SET_VERSIONSTAMPED_VALUE', key1, fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2) instructions.append('ATOMIC_OP') instructions.push_args(u'SET_VERSIONSTAMPED_KEY', key2, rand_str1) instructions.append('ATOMIC_OP') self.can_use_key_selectors = False elif op == 'READ_CONFLICT_RANGE' or op == 'WRITE_CONFLICT_RANGE': self.ensure_key(instructions, 2) instructions.append(op) self.add_strings(1) elif op == 'READ_CONFLICT_KEY' or op == 'WRITE_CONFLICT_KEY': self.ensure_key(instructions, 1) instructions.append(op) self.add_strings(1) elif op == 'DISABLE_WRITE_CONFLICT': instructions.append(op) elif op == 'COMMIT': if args.concurrency == 1 or i < self.max_keys or random.random( ) < 0.9: if args.concurrency == 1: self.wait_for_reads(instructions) test_util.blocking_commit(instructions) self.can_get_commit_version = False self.add_stack_items(1) self.can_set_version = True self.can_use_key_selectors = True else: instructions.append(op) self.add_strings(1) elif op == 'RESET': instructions.append(op) self.can_get_commit_version = False self.can_set_version = True self.can_use_key_selectors = True elif op == 'CANCEL': instructions.append(op) self.can_set_version = False elif op == 'GET_COMMITTED_VERSION': if self.can_get_commit_version: do_commit = random.random() < 0.5 if do_commit: instructions.append('COMMIT') instructions.append('WAIT_FUTURE') self.add_stack_items(1) instructions.append(op) self.has_version = True self.add_strings(1) if do_commit: instructions.append('RESET') self.can_get_commit_version = False self.can_set_version = True self.can_use_key_selectors = True elif op == 'TUPLE_PACK' or op == 'TUPLE_RANGE': tup = self.random.random_tuple(10) instructions.push_args(len(tup), *tup) instructions.append(op) if op == 'TUPLE_PACK': self.add_strings(1) else: self.add_strings(2) elif op == 'TUPLE_PACK_WITH_VERSIONSTAMP': tup = (self.random.random_string(20), ) + self.random.random_tuple( 10, incomplete_versionstamps=True) instructions.push_args(self.versionstamped_keys.pack(), len(tup), *tup) instructions.append(op) self.add_strings(1) version_key = self.versionstamped_keys.pack(tup) first_incomplete = version_key.find( fdb.tuple.Versionstamp._UNSET_TR_VERSION) second_incomplete = -1 if first_incomplete < 0 else \ version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1) # If there is exactly one incomplete versionstamp, perform the versionstamped key operation. if first_incomplete >= 0 and second_incomplete < 0: rand_str = self.random.random_string(100) instructions.push_args(rand_str) test_util.to_front(instructions, 1) instructions.push_args(u'SET_VERSIONSTAMPED_KEY') instructions.append('ATOMIC_OP') version_value_key = self.versionstamped_values.pack( (rand_str, )) instructions.push_args( u'SET_VERSIONSTAMPED_VALUE', version_value_key, fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup)) instructions.append('ATOMIC_OP') self.can_use_key_selectors = False elif op == 'TUPLE_UNPACK': tup = self.random.random_tuple(10) instructions.push_args(len(tup), *tup) instructions.append('TUPLE_PACK') instructions.append(op) self.add_strings(len(tup)) elif op == 'TUPLE_SORT': tups = self.random.random_tuple_list(10, 30) for tup in tups: instructions.push_args(len(tup), *tup) instructions.append('TUPLE_PACK') instructions.push_args(len(tups)) instructions.append(op) self.add_strings(len(tups)) # Use SUB to test if integers are correctly unpacked elif op == 'SUB': a = self.random.random_int() / 2 b = self.random.random_int() / 2 instructions.push_args(0, a, b) instructions.append(op) instructions.push_args(1) instructions.append('SWAP') instructions.append(op) instructions.push_args(1) instructions.append('TUPLE_PACK') self.add_stack_items(1) elif op == 'ENCODE_FLOAT': f = self.random.random_float(8) f_bytes = struct.pack('>f', f) instructions.push_args(f_bytes) instructions.append(op) self.add_stack_items(1) elif op == 'ENCODE_DOUBLE': d = self.random.random_float(11) d_bytes = struct.pack('>d', d) instructions.push_args(d_bytes) instructions.append(op) self.add_stack_items(1) elif op == 'DECODE_FLOAT': f = self.random.random_float(8) instructions.push_args(fdb.tuple.SingleFloat(f)) instructions.append(op) self.add_strings(1) elif op == 'DECODE_DOUBLE': d = self.random.random_float(11) instructions.push_args(d) instructions.append(op) self.add_strings(1) else: assert False if read_performed and op not in database_reads: self.outstanding_ops.append( (self.stack_size, len(instructions) - 1)) if args.concurrency == 1 and (op in database_reads or op in database_mutations): instructions.append('WAIT_FUTURE') instructions.begin_finalization() if args.concurrency == 1: self.wait_for_reads(instructions) test_util.blocking_commit(instructions) self.add_stack_items(1) instructions.append('NEW_TRANSACTION') instructions.push_args(self.stack_subspace.key()) instructions.append('LOG_STACK') test_util.blocking_commit(instructions) return instructions
def generate(self, args, thread_number): instructions = InstructionSet() op_choices = ['NEW_TRANSACTION', 'COMMIT'] reads = [ 'GET', 'GET_KEY', 'GET_RANGE', 'GET_RANGE_STARTS_WITH', 'GET_RANGE_SELECTOR' ] mutations = [ 'SET', 'CLEAR', 'CLEAR_RANGE', 'CLEAR_RANGE_STARTS_WITH', 'ATOMIC_OP' ] snapshot_reads = [x + '_SNAPSHOT' for x in reads] database_reads = [x + '_DATABASE' for x in reads] database_mutations = [x + '_DATABASE' for x in mutations] mutations += ['VERSIONSTAMP'] versions = [ 'GET_READ_VERSION', 'SET_READ_VERSION', 'GET_COMMITTED_VERSION' ] snapshot_versions = ['GET_READ_VERSION_SNAPSHOT'] tuples = [ 'TUPLE_PACK', 'TUPLE_UNPACK', 'TUPLE_RANGE', 'TUPLE_SORT', 'SUB', 'ENCODE_FLOAT', 'ENCODE_DOUBLE', 'DECODE_DOUBLE', 'DECODE_FLOAT' ] if 'versionstamp' in args.types: tuples.append('TUPLE_PACK_WITH_VERSIONSTAMP') resets = ['ON_ERROR', 'RESET', 'CANCEL'] read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY'] write_conflicts = [ 'WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT' ] txn_sizes = ['GET_APPROXIMATE_SIZE'] storage_metrics = [ 'GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS' ] tenants = [ 'TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE', 'TENANT_LIST' ] op_choices += reads op_choices += mutations op_choices += snapshot_reads op_choices += database_reads op_choices += database_mutations op_choices += versions op_choices += snapshot_versions op_choices += tuples op_choices += read_conflicts op_choices += write_conflicts op_choices += resets op_choices += txn_sizes op_choices += storage_metrics if not args.no_tenants: op_choices += tenants idempotent_atomic_ops = [ 'BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX' ] atomic_ops = idempotent_atomic_ops + [ 'ADD', 'BIT_XOR', 'APPEND_IF_FITS' ] if args.concurrency > 1: self.max_keys = random.randint(100, 1000) else: self.max_keys = random.randint(100, 10000) instructions.append('NEW_TRANSACTION') instructions.append('GET_READ_VERSION') self.preload_database(instructions, self.max_keys) instructions.setup_complete() for i in range(args.num_ops): op = random.choice(op_choices) index = len(instructions) read_performed = False # print 'Adding instruction %s at %d' % (op, index) if args.concurrency == 1 and ( op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']): self.wait_for_reads(instructions) test_util.blocking_commit(instructions) self.can_get_commit_version = False self.add_stack_items(1) if op in resets or op == 'NEW_TRANSACTION': if args.concurrency == 1: self.wait_for_reads(instructions) self.outstanding_ops = [] if op == 'NEW_TRANSACTION': instructions.append(op) self.can_get_commit_version = True self.can_set_version = True self.can_use_key_selectors = True elif op == 'ON_ERROR': instructions.push_args(random.randint(0, 5000)) instructions.append(op) self.outstanding_ops.append( (self.stack_size, len(instructions) - 1)) if args.concurrency == 1: self.wait_for_reads(instructions) instructions.append('NEW_TRANSACTION') self.can_get_commit_version = True self.can_set_version = True self.can_use_key_selectors = True self.add_strings(1) elif op == 'GET' or op == 'GET_SNAPSHOT' or op == 'GET_DATABASE': self.ensure_key(instructions, 1) instructions.append(op) self.add_strings(1) self.can_set_version = False read_performed = True elif op == 'GET_KEY' or op == 'GET_KEY_SNAPSHOT' or op == 'GET_KEY_DATABASE': if op.endswith('_DATABASE') or self.can_use_key_selectors: self.ensure_key(instructions, 1) instructions.push_args(self.workspace.key()) instructions.push_args( *self.random.random_selector_params()) test_util.to_front(instructions, 3) instructions.append(op) # Don't add key here because we may be outside of our prefix self.add_strings(1) self.can_set_version = False read_performed = True elif op == 'GET_RANGE' or op == 'GET_RANGE_SNAPSHOT' or op == 'GET_RANGE_DATABASE': self.ensure_key(instructions, 2) range_params = self.random.random_range_params() instructions.push_args(*range_params) test_util.to_front(instructions, 4) test_util.to_front(instructions, 4) instructions.append(op) if range_params[0] >= 1 and range_params[ 0] <= 1000: # avoid adding a string if the limit is large self.add_strings(1) else: self.add_stack_items(1) self.can_set_version = False read_performed = True elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE': # TODO: not tested well self.ensure_key(instructions, 1) range_params = self.random.random_range_params() instructions.push_args(*range_params) test_util.to_front(instructions, 3) instructions.append(op) if range_params[0] >= 1 and range_params[ 0] <= 1000: # avoid adding a string if the limit is large self.add_strings(1) else: self.add_stack_items(1) self.can_set_version = False read_performed = True elif op == 'GET_RANGE_SELECTOR' or op == 'GET_RANGE_SELECTOR_SNAPSHOT' or op == 'GET_RANGE_SELECTOR_DATABASE': if op.endswith('_DATABASE') or self.can_use_key_selectors: self.ensure_key(instructions, 2) instructions.push_args(self.workspace.key()) range_params = self.random.random_range_params() instructions.push_args(*range_params) instructions.push_args( *self.random.random_selector_params()) test_util.to_front(instructions, 6) instructions.push_args( *self.random.random_selector_params()) test_util.to_front(instructions, 9) instructions.append(op) if range_params[0] >= 1 and range_params[ 0] <= 1000: # avoid adding a string if the limit is large self.add_strings(1) else: self.add_stack_items(1) self.can_set_version = False read_performed = True elif op == 'GET_READ_VERSION' or op == 'GET_READ_VERSION_SNAPSHOT': instructions.append(op) self.has_version = self.can_set_version self.add_strings(1) elif op == 'SET' or op == 'SET_DATABASE': self.ensure_key_value(instructions) instructions.append(op) if op == 'SET_DATABASE': self.add_stack_items(1) elif op == 'SET_READ_VERSION': if self.has_version and self.can_set_version: instructions.append(op) self.can_set_version = False elif op == 'CLEAR' or op == 'CLEAR_DATABASE': self.ensure_key(instructions, 1) instructions.append(op) if op == 'CLEAR_DATABASE': self.add_stack_items(1) elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE': # Protect against inverted range key1 = self.workspace.pack(self.random.random_tuple(5)) key2 = self.workspace.pack(self.random.random_tuple(5)) if key1 > key2: key1, key2 = key2, key1 instructions.push_args(key1, key2) instructions.append(op) if op == 'CLEAR_RANGE_DATABASE': self.add_stack_items(1) elif op == 'CLEAR_RANGE_STARTS_WITH' or op == 'CLEAR_RANGE_STARTS_WITH_DATABASE': self.ensure_key(instructions, 1) instructions.append(op) if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE': self.add_stack_items(1) elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE': self.ensure_key_value(instructions) if op == 'ATOMIC_OP' or args.concurrency > 1: instructions.push_args(random.choice(atomic_ops)) else: instructions.push_args( random.choice(idempotent_atomic_ops)) instructions.append(op) if op == 'ATOMIC_OP_DATABASE': self.add_stack_items(1) elif op == 'VERSIONSTAMP': rand_str1 = self.random.random_string(100) key1 = self.versionstamped_values.pack((rand_str1, )) key2 = self.versionstamped_values_2.pack((rand_str1, )) split = random.randint(0, 70) prefix = self.random.random_string(20 + split) if prefix.endswith(b'\xff'): # Necessary to make sure that the SET_VERSIONSTAMPED_VALUE check # correctly finds where the version is supposed to fit in. prefix += b'\x00' suffix = self.random.random_string(70 - split) rand_str2 = prefix + fdb.tuple.Versionstamp._UNSET_TR_VERSION + suffix key3 = self.versionstamped_keys.pack() + rand_str2 index = len(self.versionstamped_keys.pack()) + len(prefix) key3 = self.versionstamp_key(key3, index) instructions.push_args( 'SET_VERSIONSTAMPED_VALUE', key1, self.versionstamp_value( fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2)) instructions.append('ATOMIC_OP') if args.api_version >= 520: instructions.push_args( 'SET_VERSIONSTAMPED_VALUE', key2, self.versionstamp_value(rand_str2, len(prefix))) instructions.append('ATOMIC_OP') instructions.push_args('SET_VERSIONSTAMPED_KEY', key3, rand_str1) instructions.append('ATOMIC_OP') self.can_use_key_selectors = False elif op == 'READ_CONFLICT_RANGE' or op == 'WRITE_CONFLICT_RANGE': self.ensure_key(instructions, 2) instructions.append(op) self.add_strings(1) elif op == 'READ_CONFLICT_KEY' or op == 'WRITE_CONFLICT_KEY': self.ensure_key(instructions, 1) instructions.append(op) self.add_strings(1) elif op == 'DISABLE_WRITE_CONFLICT': instructions.append(op) elif op == 'COMMIT': if args.concurrency == 1 or i < self.max_keys or random.random( ) < 0.9: if args.concurrency == 1: self.wait_for_reads(instructions) test_util.blocking_commit(instructions) self.can_get_commit_version = False self.add_stack_items(1) self.can_set_version = True self.can_use_key_selectors = True else: instructions.append(op) self.add_strings(1) elif op == 'RESET': instructions.append(op) self.can_get_commit_version = False self.can_set_version = True self.can_use_key_selectors = True elif op == 'CANCEL': instructions.append(op) self.can_set_version = False elif op == 'GET_COMMITTED_VERSION': if self.can_get_commit_version: do_commit = random.random() < 0.5 if do_commit: instructions.append('COMMIT') instructions.append('WAIT_FUTURE') self.add_stack_items(1) instructions.append(op) self.has_version = True self.add_strings(1) if do_commit: instructions.append('RESET') self.can_get_commit_version = False self.can_set_version = True self.can_use_key_selectors = True elif op == 'GET_APPROXIMATE_SIZE': instructions.append(op) self.add_strings(1) elif op == 'TUPLE_PACK' or op == 'TUPLE_RANGE': tup = self.random.random_tuple(10) instructions.push_args(len(tup), *tup) instructions.append(op) if op == 'TUPLE_PACK': self.add_strings(1) else: self.add_strings(2) elif op == 'TUPLE_PACK_WITH_VERSIONSTAMP': tup = (self.random.random_string(20), ) + self.random.random_tuple( 10, incomplete_versionstamps=True) prefix = self.versionstamped_keys.pack() instructions.push_args(prefix, len(tup), *tup) instructions.append(op) self.add_strings(1) versionstamp_param = prefix + fdb.tuple.pack(tup) first_incomplete = versionstamp_param.find( fdb.tuple.Versionstamp._UNSET_TR_VERSION) second_incomplete = -1 if first_incomplete < 0 else \ versionstamp_param.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1) # If there is exactly one incomplete versionstamp, perform the versionstamp operation. if first_incomplete >= 0 and second_incomplete < 0: rand_str = self.random.random_string(100) instructions.push_args(rand_str) test_util.to_front(instructions, 1) instructions.push_args('SET_VERSIONSTAMPED_KEY') instructions.append('ATOMIC_OP') if self.api_version >= 520: version_value_key_2 = self.versionstamped_values_2.pack( (rand_str, )) versionstamped_value = self.versionstamp_value( fdb.tuple.pack(tup), first_incomplete - len(prefix)) instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key_2, versionstamped_value) instructions.append('ATOMIC_OP') version_value_key = self.versionstamped_values.pack( (rand_str, )) instructions.push_args( 'SET_VERSIONSTAMPED_VALUE', version_value_key, self.versionstamp_value( fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup))) instructions.append('ATOMIC_OP') self.can_use_key_selectors = False elif op == 'TUPLE_UNPACK': tup = self.random.random_tuple(10) instructions.push_args(len(tup), *tup) instructions.append('TUPLE_PACK') instructions.append(op) self.add_strings(len(tup)) elif op == 'TUPLE_SORT': tups = self.random.random_tuple_list(10, 30) for tup in tups: instructions.push_args(len(tup), *tup) instructions.append('TUPLE_PACK') instructions.push_args(len(tups)) instructions.append(op) self.add_strings(len(tups)) # Use SUB to test if integers are correctly unpacked elif op == 'SUB': a = self.random.random_int() // 2 b = self.random.random_int() // 2 instructions.push_args(0, a, b) instructions.append(op) instructions.push_args(1) instructions.append('SWAP') instructions.append(op) instructions.push_args(1) instructions.append('TUPLE_PACK') self.add_stack_items(1) elif op == 'ENCODE_FLOAT': f = self.random.random_float(8) f_bytes = struct.pack('>f', f) instructions.push_args(f_bytes) instructions.append(op) self.add_stack_items(1) elif op == 'ENCODE_DOUBLE': d = self.random.random_float(11) d_bytes = struct.pack('>d', d) instructions.push_args(d_bytes) instructions.append(op) self.add_stack_items(1) elif op == 'DECODE_FLOAT': f = self.random.random_float(8) instructions.push_args(fdb.tuple.SingleFloat(f)) instructions.append(op) self.add_strings(1) elif op == 'DECODE_DOUBLE': d = self.random.random_float(11) instructions.push_args(d) instructions.append(op) self.add_strings(1) elif op == 'GET_ESTIMATED_RANGE_SIZE': # Protect against inverted range and identical keys key1 = self.workspace.pack(self.random.random_tuple(1)) key2 = self.workspace.pack(self.random.random_tuple(1)) while key1 == key2: key1 = self.workspace.pack(self.random.random_tuple(1)) key2 = self.workspace.pack(self.random.random_tuple(1)) if key1 > key2: key1, key2 = key2, key1 instructions.push_args(key1, key2) instructions.append(op) self.add_strings(1) elif op == 'GET_RANGE_SPLIT_POINTS': # Protect against inverted range and identical keys key1 = self.workspace.pack(self.random.random_tuple(1)) key2 = self.workspace.pack(self.random.random_tuple(1)) while key1 == key2: key1 = self.workspace.pack(self.random.random_tuple(1)) key2 = self.workspace.pack(self.random.random_tuple(1)) if key1 > key2: key1, key2 = key2, key1 # TODO: randomize chunkSize but should not exceed 100M(shard limit) chunkSize = 10000000 # 10M instructions.push_args(key1, key2, chunkSize) instructions.append(op) self.add_strings(1) elif op == 'TENANT_CREATE': tenant_name = self.choose_tenant(0.8) self.allocated_tenants.add(tenant_name) instructions.push_args(tenant_name) instructions.append(op) self.add_strings(1) elif op == 'TENANT_DELETE': tenant_name = self.choose_tenant(0.2) if tenant_name in self.allocated_tenants: self.allocated_tenants.remove(tenant_name) instructions.push_args(tenant_name) instructions.append(op) self.add_strings(1) elif op == 'TENANT_SET_ACTIVE': tenant_name = self.choose_tenant(0.8) instructions.push_args(tenant_name) instructions.append(op) elif op == 'TENANT_CLEAR_ACTIVE': instructions.append(op) elif op == 'TENANT_LIST': self.ensure_string(instructions, 2) instructions.push_args(self.random.random_int()) test_util.to_front(instructions, 2) test_util.to_front(instructions, 2) instructions.append(op) self.add_strings(1) else: assert False, 'Unknown operation: ' + op if read_performed and op not in database_reads: self.outstanding_ops.append( (self.stack_size, len(instructions) - 1)) if args.concurrency == 1 and ( op in database_reads or op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']): instructions.append('WAIT_FUTURE') instructions.begin_finalization() if not args.no_tenants: instructions.append('TENANT_CLEAR_ACTIVE') if args.concurrency == 1: self.wait_for_reads(instructions) test_util.blocking_commit(instructions) self.add_stack_items(1) instructions.append('NEW_TRANSACTION') instructions.push_args(self.stack_subspace.key()) instructions.append('LOG_STACK') test_util.blocking_commit(instructions) return instructions