def _m(self, program_file, validator, new_test_cases): produced_witnesses = self.create_all_witnesses(program_file, new_test_cases) for witness in produced_witnesses: logging.debug('Looking at witness %s .', witness['name']) witness_name = witness['name'] content_to_write = witness['content'] self.counter_size_witnesses.inc(len(content_to_write)) with open(witness_name, 'w+') as outp: outp.write(witness['content']) self.timer_witness_validation.start() self.timer_validation.start() try: verdicts = validator.run(program_file, witness_name) finally: self.timer_witness_validation.stop() self.timer_validation.stop() self.counter_handled_test_cases.inc() logging.debug('Results for %s: %s', witness_name, str(verdicts)) if any(['false' in v.lower() for v in verdicts]): self.final_test_vector_size.value = len(witness['vector']) return utils.VerdictFalse(witness['origin'], witness['vector'], None, witness_name) return utils.VerdictUnknown()
def check_inputs(self, program_file, is_ready_func, stop_event, tests_directory=None): logging.debug('Checking inputs for file %s', program_file) logging.debug('Considering test case directory %s', tests_directory) result = None if self.config.use_klee_replay: result = self.perform_klee_replay_validation( program_file, is_ready_func, stop_event, tests_directory) logging.info("Klee-replay validation says: " + str(result)) if (not result or not result.is_positive()) and self.config.use_execution: result = self.perform_execution_validation(program_file, is_ready_func, stop_event, tests_directory) logging.info("Execution validation says: " + str(result)) if (not result or not result.is_positive() ) and self.config.use_witness_validation: result = self.perform_witness_validation(program_file, is_ready_func, stop_event, tests_directory) logging.info("Witness validation says: " + str(result)) if result is None: return utils.VerdictUnknown(), None elif result.is_positive(): stop_event.set() if result.test_vector is None: result.test_vector = self.get_test_vector(result.test) # This currently won't work with AFL due to its string-style input if result.witness is None and 'afl' not in self.get_name().lower(): nondet_methods = utils.get_nondet_methods() witness = self.create_witness(program_file, result.test.origin, result.test_vector, nondet_methods) with open(witness['name'], 'w+') as outp: outp.write(witness['content']) result.witness = witness['name'] if result.harness is None: nondet_methods = utils.get_nondet_methods() harness = self.create_harness(result.test_vector.origin, result.test_vector, nondet_methods) with open(harness['name'], 'wb+') as outp: outp.write(harness['content']) result.harness = harness['name'] return result, self.statistics
def decide_final_verdict(self, results): """ Decides the final verdict for the given sequence of verdicts. :param results: sequence of Verdicts :return: verdict 'false' if any false-verdict exists. Otherwise, verdict 'unknown' if naive verification is not used, and verdict 'true' if naive verification is used """ if any(r.is_positive() for r in results): return next(r for r in results if r.is_positive()) elif not self.naive_verification: return utils.VerdictUnknown() else: return utils.VerdictTrue()
def _k(self, program_file, validator, new_test_cases): for test in new_test_cases: self.timer_execution_validation.start() self.timer_validation.start() try: verdicts = validator.run(program_file, test) finally: self.timer_execution_validation.stop() self.timer_validation.stop() self.counter_handled_test_cases.inc() logging.debug('Results for %s: %s', test, str(verdicts)) if any([v == FALSE for v in verdicts]): return utils.VerdictFalse(test) return utils.VerdictUnknown()
def process_inputs(self, program_file, error_method, nondet_methods, is_ready_func, stop_event, tests_directory=None): logging.debug('Checking inputs for file %s', program_file) logging.debug('Considering test-case directory: %s', tests_directory) result = None if self.config.use_klee_replay: result = self.perform_klee_replay_validation( program_file, is_ready_func, stop_event, tests_directory, error_method, nondet_methods) logging.info("Klee-replay validation says: " + str(result)) if (not result or not result.is_positive()) and self.config.use_execution: result = self.perform_execution_validation( program_file, is_ready_func, stop_event, tests_directory, error_method, nondet_methods) logging.info("Execution validation says: " + str(result)) if not result and self.config.write_xml: self.get_testvectors_continuously(program_file, is_ready_func, stop_event, tests_directory, error_method, nondet_methods) if result is None: return utils.VerdictUnknown(), None elif result.is_positive(): if result.test_vector is None: result.test_vector = self._extractor.get_test_vector( result.test) if result.harness is None: harness = self.create_harness(result.test_vector.origin, result.test_vector, error_method, nondet_methods) with open(harness['name'], 'wb+') as outp: outp.write(harness['content']) result.harness = harness['name'] return result, self.statistics
def _hs(self, program_file, validator, new_test_cases): test_vectors = self.create_all_test_vectors(new_test_cases) for vector in test_vectors: self.timer_execution_validation.start() self.timer_validation.start() try: verdicts = validator.run(program_file, vector) finally: self.timer_execution_validation.stop() self.timer_validation.stop() self.counter_handled_test_cases.inc() logging.debug('Results for %s: %s', vector, str(verdicts)) if any([v == FALSE for v in verdicts]): self.final_test_vector_size.value = len(vector) return utils.VerdictFalse(vector, vector) return utils.VerdictUnknown()
def run(args, stop_all_event=None): """ Runs tbf with the given arguments in the current working directory. All created files are put in a directory `created_files`. :param args: :param stop_all_event: :return: """ if args.use_error_method: error_method = args.error_method else: error_method = None default_err = "Unknown error" processing_result = utils.VerdictUnknown() filename = args.file processing_stats = None generator_stats = None old_dir_abs = os.path.abspath('.') if args.keep_files: created_dir = utils.provide_directory( utils.get_output_path('created_files')) work_dir = created_dir else: work_dir = utils.create_temp() try: _change_dir(work_dir) if error_method: error_method_exclude = [error_method] specification = utils.get_error_spec(error_method) else: error_method_exclude = () specification = utils.get_coverage_spec() nondet_methods = utils.find_nondet_methods(filename, args.svcomp_nondets_only, error_method_exclude) input_generator = _get_input_generator(args) test_processor = _get_test_processor(args, args.write_xml, nondet_methods) if args.write_xml: testcase_converter.write_metadata(filename, input_generator.get_name(), specification, args.machine_model, directory=XML_DIR) assert not stop_all_event.is_set( ), "Stop event is already set before starting input generation" stop_input_generator_event = StopEvent(stop_all_event) generator_pool = mp.Pool(processes=1) if args.existing_tests_dir is None: # Define the methods for running test generation and test processing in parallel/sequentially if args.run_parallel and _is_processing_necessary(args): generator_function = generator_pool.apply_async def get_generation_result(res): return res.get(3) def is_ready0(r): return r.ready() else: generator_function = generator_pool.apply def get_generation_result(res): return res def is_ready0(r): return True if args.ig_timelimit: utils.set_stop_timer(args.ig_timelimit, stop_input_generator_event) generation_result = generator_function( input_generator.generate_input, args=(filename, error_method, nondet_methods, stop_input_generator_event)) else: generation_result = None def get_generation_result(res): return True, None def is_ready0(r): return True # We can't use a def here because we pass this function to a different function, # in which the def wouldn't be defined is_ready = lambda: is_ready0(generation_result) if stop_all_event.is_set(): stop_input_generator_event.set() logging.info("Stop-all event is set, returning from execution") return processing_result, processing_stats = test_processor.process_inputs( filename, error_method, nondet_methods, is_ready, stop_all_event, args.existing_tests_dir) stop_input_generator_event.set() stop_all_event.set() logging.debug("Processing terminated and got results") try: generation_success, generator_stats = get_generation_result( generation_result) generation_done = True except TimeoutError: logging.warning("Couldn't' get result of input generation") generation_done = False generator_pool.terminate() logging.debug("Input generation terminated and got results") _change_dir(old_dir_abs) if processing_result.is_positive(): test_name = os.path.basename(processing_result.test_vector.origin) persistent_test = utils.get_output_path(test_name) shutil.copy(processing_result.test_vector.origin, persistent_test) if processing_result.harness is not None: persistent_harness = utils.get_output_path('harness.c') shutil.copy(processing_result.harness, persistent_harness) # Create an ExecutionRunner only for the purpose of # compiling the persistent harness validator_for_compilation = ExecutionRunner( args.machine_model, processing_result.test) final_harness_name = utils.get_output_path('a.out') validator_for_compilation.compile(filename, persistent_harness, final_harness_name) elif not generation_done: processing_result = utils.VerdictUnknown() except utils.CompileError as e: # This is a proper error because the program can't be compiled, so no tests can be executed logging.error("Compile error: %s", e.msg if e.msg else default_err) except utils.ParseError as e: # This is a proper error because even parsing of the program failed, so preparation for the test execution # was not possible logging.error("Parse error: %s", e.msg if e.msg else default_err) except FileNotFoundError as e: logging.error("File not found: %s", e.filename) finally: # In case an exception occurred before we went back to the original directory _change_dir(old_dir_abs) statistics = "" if generator_stats: statistics += str(generator_stats) if processing_stats: if statistics: # If other statistics are there, add some spacing statistics += "\n\n" statistics += str(processing_stats) if not error_method: verdict = utils.DONE else: verdict = processing_result.verdict.upper() verdict_str = "\nTBF verdict: " + verdict with open(utils.get_output_path('Statistics.txt'), 'w+') as stats: stats.write(statistics) stats.write('\n') stats.write(verdict_str) stats.write('\n') if args.print_stats: print("Statistics:") print(statistics) print(verdict_str) if not args.keep_files: shutil.rmtree(work_dir, ignore_errors=True)
def _decide_single_verdict(result, test_origin, test_vector=None): if any(r == FALSE for r in result): return utils.VerdictFalse(test_origin, test_vector) else: return utils.VerdictUnknown()
def run(args, stop_all_event=None): default_err = "Unknown error" validation_result = utils.VerdictUnknown() filename = os.path.abspath(args.file) input_generator = _get_input_generator(args) validator = _get_validator(args, input_generator) validator_stats = None generator_stats = None old_dir_abs = os.path.abspath('.') try: os.chdir(utils.tmp) utils.find_nondet_methods(filename, args.svcomp_nondets_only) assert not stop_all_event.is_set( ), "Stop event is already set before starting input generation" if args.existing_tests_dir is None: generator_pool = ThreadPool(processes=1) # Define the methods for running input generation and validation in parallel/sequentially if args.run_parallel: generator_function = generator_pool.apply_async def get_generation_result(res): return res.get(3) def is_ready0(r): return r.ready() else: generator_function = generator_pool.apply def get_generation_result(res): return res def is_ready0(r): return True generation_result = generator_function( input_generator.generate_input, args=(filename, stop_all_event)) else: generation_result = None def get_generation_result(res): return True, None def is_ready0(r): return True # We can't use a def here because we pass this function to a different function, # in which the def wouldn't be defined is_ready = lambda: is_ready0(generation_result) if stop_all_event.is_set(): logging.info("Stop-all event is set, returning from execution") return validation_result, validator_stats = validator.check_inputs( filename, is_ready, stop_all_event, args.existing_tests_dir) try: generation_success, generator_stats = get_generation_result( generation_result) generation_done = True except TimeoutError: logging.warning( "Couldn't' get result of input generation due to timeout") generation_done = False if validation_result.is_positive(): test_name = os.path.basename(validation_result.test_vector.origin) persistent_test = utils.get_file_path(test_name, temp_dir=False) shutil.copy(validation_result.test_vector.origin, persistent_test) if validation_result.harness is not None: persistent_harness = utils.get_file_path( 'harness.c', temp_dir=False) shutil.copy(validation_result.harness, persistent_harness) # Create an ExecutionRunner only for the purpose of # compiling the persistent harness validator = ExecutionRunner(args.machine_model, validation_result.test) final_harness_name = utils.get_file_path('a.out', temp_dir=False) validator.compile(filename, persistent_harness, final_harness_name) if validation_result.witness is not None: persistent_witness = utils.get_file_path( 'witness.graphml', temp_dir=False) shutil.copy(validation_result.witness, persistent_witness) elif not generation_done: validation_result = utils.VerdictUnknown() except utils.CompileError as e: logging.error("Compile error: %s", e.msg if e.msg else default_err) except utils.InputGenerationError as e: logging.error("Input generation error: %s", e.msg if e.msg else default_err) except utils.ParseError as e: logging.error("Parse error: %s", e.msg if e.msg else default_err) except FileNotFoundError as e: logging.error("File not found: %s", e.filename) finally: os.chdir(old_dir_abs) statistics = "" if generator_stats: statistics += str(generator_stats) if validator_stats: if statistics: # If other statistics are there, add some spacing statistics += "\n\n" statistics += str(validator_stats) verdict_str = "\nTBF verdict: " + validation_result.verdict.upper() with open(utils.get_file_path('Statistics.txt', temp_dir=False), 'w+') as stats: stats.write(statistics) stats.write('\n') stats.write(verdict_str) stats.write('\n') if args.print_stats: print("Statistics:") print(statistics) print(verdict_str) if args.keep_files: created_dir = utils.get_file_path('created_files', temp_dir=False) logging.info("Moving created files to %s .", created_dir) if os.path.exists(created_dir): # despite the name, ignore_errors=True allows removal of non-empty directories shutil.rmtree(created_dir, ignore_errors=True) shutil.move(utils.tmp, created_dir) else: shutil.rmtree(utils.tmp, ignore_errors=True)