def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_index, random, subspace): if not op.endswith('_DATABASE'): instructions.push_args(1, *test_util.with_length(path)) instructions.append('DIRECTORY_EXISTS') # This op must leave the stack in the state it is in at this point, with the exception # that it may leave an error on the stack instructions.push_args(*op_args) instructions.append(op) if not op.endswith('_DATABASE'): instructions.push_args(dir_index) instructions.append('DIRECTORY_CHANGE') instructions.push_args(1, b'', random.random_string(16), b'') instructions.append('DIRECTORY_PACK_KEY') test_util.to_front( instructions, 3) # move the existence result up to the front of the stack t = util.subspace_to_tuple(subspace) instructions.push_args(len(t) + 3, *t) instructions.append( 'TUPLE_PACK' ) # subspace[<exists>][<packed_key>][random.random_string(16)] = b'' instructions.append('SET') instructions.push_args(DEFAULT_DIRECTORY_INDEX) instructions.append('DIRECTORY_CHANGE')
def check_for_errors(self): if len(self.tester_results) == 1: return (0, False) util.get_logger().info('Comparing results from \'%s\'...' % repr(util.subspace_to_tuple(self.specification.subspace))) num_errors = 0 has_filtered_error = False # Tracks the current result being evaluated for each tester indices = [0 for i in range(len(self.tester_results))] name_length = max([len(name) for name in self.tester_results.keys()]) while True: # Gets the next result for each tester results = {i: r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i]} if len(results) == 0: break # Attempt to 'align' the results. If two results have matching sequence numbers, then they should be compared. # Only those testers which have a result matching the minimum current sequence number will be included. All # others are considered to have not produced a result and will be evaluated in a future iteration. sequence_nums = [r.sequence_num(self.specification) for r in results.values()] if any([s is not None for s in sequence_nums]): results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)} # If these results aren't using sequence numbers, then we match two results based on whether they share the same key else: min_key = reduce(ResultSet._min_tuple, [r.key(self.specification) for r in results.values()]) results = {i: r for i, r in results.items() if Result.tuples_match(r.key(self.specification), min_key)} # Increment the indices for those testers which produced a result in this iteration for i in results.keys(): indices[i] += 1 # Fill in 'None' values for testers that didn't produce a result and generate an output string describing the results all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))} result_keys = list(self.tester_results.keys()) result_str = '\n'.join([' %-*s - %s' % (name_length, result_keys[i], r) for i, r in all_results.items()]) result_list = list(results.values()) # If any of our results matches the global error filter, we ignore the result if any(r.matches_global_error_filter(self.specification) for r in result_list): has_filtered_error = True # The result is considered correct if every tester produced a value and all the values meet the matching criteria if len(results) < len(all_results) or not all(result_list[0].matches(r, self.specification) for r in result_list): util.get_logger().error('\nIncorrect result: \n%s' % result_str) num_errors += 1 else: util.get_logger().debug('\nCorrect result: \n%s' % result_str) if num_errors > 0: util.get_logger().error('') else: util.get_logger().debug('') return (num_errors, has_filtered_error)
def _get_results(self, subspace, instruction_index=None): util.get_logger().info('Reading results from \'%s\'...' % repr(util.subspace_to_tuple(subspace))) results = [] next_key = subspace.range().start while True: next_results = self.db.get_range(next_key, subspace.range().stop, 1000) if len(next_results) == 0: break results += [Result(subspace, kv.key, (kv.value,)) for kv in next_results] next_key = fdb.KeySelector.first_greater_than(next_results[-1].key) return results
def check_for_errors(self): if len(self.tester_results) == 1: return (0, False) util.get_logger().info('Comparing results from \'%s\'...' % repr(util.subspace_to_tuple(self.specification.subspace))) num_errors = 0 indices = [0 for i in range(len(self.tester_results))] name_length = max([len(name) for name in self.tester_results.keys()]) has_filtered_error = False while True: results = {i: r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i]} if len(results) == 0: break sequence_nums = [r.sequence_num(self.specification) for r in results.values()] if any([s is not None for s in sequence_nums]): results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)} else: results = {i: r for i, r in results.items() if r.matches(min(results.values()), self.specification)} for i in results.keys(): indices[i] += 1 all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))} result_str = '\n'.join([' %-*s - %s' % (name_length, self.tester_results.keys()[i], r) for i, r in all_results.items()]) result_list = results.values() if any(r.matches_global_error_filter(self.specification) for r in result_list): has_filtered_error = True if len(results) < len(all_results) or not all(result_list[0].matches(r, self.specification) for r in result_list): util.get_logger().error('\nIncorrect result: \n%s' % result_str) num_errors += 1 else: util.get_logger().debug('\nCorrect result: \n%s' % result_str) if num_errors > 0: util.get_logger().error('') else: util.get_logger().debug('') return (num_errors, has_filtered_error)
def print_test(self): test_instructions = self._generate_test() for top_level_subspace, top_level_thread in test_instructions.items(): for subspace, thread in top_level_thread.get_threads(top_level_subspace).items(): util.get_logger().error('\nThread at prefix %r:' % util.subspace_to_tuple(subspace)) if self.args.print_all: instructions = thread offset = 0 else: instructions = thread.core_instructions() offset = thread.core_test_begin for i, instruction in enumerate(instructions): if self.args.print_all or (instruction.operation != 'SWAP' and instruction.operation != 'PUSH'): util.get_logger().error(' %d. %r' % (i + offset, instruction)) util.get_logger().error('')
def create_thread(self, subspace=None, thread_instructions=None): if subspace in self.threads: raise 'An instruction set with the subspace %r has already been created' % util.subspace_to_tuple( subspace) if thread_instructions == None: thread_instructions = InstructionSet() self.threads[subspace] = thread_instructions return thread_instructions