Beispiel #1
0
    def create_input_generation_cmds(self, filename):
        if self.machine_model.is_32:
            mm_args = ['-arch', 'i386']
        elif self.machine_model.is_64:
            mm_args = ['-arch', 'x86_64']
        else:
            raise AssertionError("Unhandled machine model: " +
                                 self.machine_model.name)

        compiled_file = '.'.join(
            os.path.basename(filename).split('.')[:-1] + ['bc'])
        compiled_file = utils.get_file_path(compiled_file, temp_dir=True)
        compile_cmd = ['clang'] + mm_args + [
            '-I', include_dir, '-emit-llvm', '-c', '-g', '-o', compiled_file,
            filename
        ]
        input_generation_cmd = ['klee']
        if self.timelimit > 0:
            input_generation_cmd += ['-max-time', str(self.timelimit)]
        input_generation_cmd.append('-only-output-states-covering-new')
        input_generation_cmd += ['-search=' + h for h in self.search_heuristic]
        input_generation_cmd += ['-output-dir=' + tests_dir]
        input_generation_cmd += [compiled_file]

        return [compile_cmd, input_generation_cmd]
Beispiel #2
0
    def create_witness(self, program_file, test_name, test_vector,
                       nondet_methods):
        """
        Creates a witness for the test file produced by crest.
        Test files produced by our version of crest specify one test value per
        line, without any mention of the variable the value is assigned to.
        Because of this, we have to build a fancy witness automaton of the
        following format: For each test value specified in the test file, there
        is one precessor and one successor state. These two states are
        connected by one transition for each call to a CREST_x(..) function.
        Each of these transitions has the assumption, that the variable
        specified in the corresponding CREST_x(..) function has the current
        test value.
        """
        witness = self.witness_creator.create_witness(
            producer=self.get_name(),
            program_file=program_file,
            test_vector=test_vector,
            nondet_methods=nondet_methods,
            machine_model=self.machine_model,
            error_lines=self.get_error_lines(program_file))

        witness_file = test_name + ".witness.graphml"
        witness_file = utils.get_file_path(witness_file)

        return {'name': witness_file, 'content': witness}
Beispiel #3
0
 def _create_executable_harness(self, program_file):
     nondet_methods = utils.get_nondet_methods()
     harness_content = self.harness_generator.create_harness(
         nondet_methods, utils.error_method)
     with open(self.harness_file, 'wb+') as outp:
         outp.write(harness_content)
     output_file = utils.get_file_path('a.out', temp_dir=True)
     return self.compile(program_file, self.harness_file, output_file)
Beispiel #4
0
 def _create_testcase_dir(self):
     testcase_dir = utils.get_file_path('initial_testcases', temp_dir=True)
     os.mkdir(testcase_dir)
     initial_testcase = os.path.join(testcase_dir, '0.afl-test')
     with open(initial_testcase, 'w+') as outp:
         outp.write(1000 *
                    '0\n')  # FIXME: This is an unreliable first test case
     return testcase_dir
Beispiel #5
0
    def create_harness(self, test_name, test_vector, nondet_methods):
        harness = self.harness_creator.create_harness(
            nondet_methods=nondet_methods,
            error_method=utils.error_method,
            test_vector=test_vector)
        harness_file = test_name + '.harness.c'
        harness_file = utils.get_file_path(harness_file)

        return {'name': harness_file, 'content': harness}
Beispiel #6
0
    def create_input_generation_cmds(self, filename):
        compiled_file = '.'.join(os.path.basename(filename).split('.')[:-1])
        compiled_file = utils.get_file_path(compiled_file, temp_dir=True)
        machinem_arg = self.machine_model.compile_parameter
        compile_cmd = [
            'gcc', '-std=gnu11', machinem_arg, '-I', include_dir, '-o',
            compiled_file, generator_harness, filename, '-lm'
        ]
        input_generation_cmd = [random_runner, compiled_file]

        return [compile_cmd, input_generation_cmd]
Beispiel #7
0
 def _get_cmd(self, program_file, witness_file):
     if not self.executable:
         import shutil
         self.executable = self.tool.executable()
         self.cpa_directory = os.path.join(os.path.dirname(self.executable),
                                           '..')
         config_copy_dir = utils.get_file_path('config', temp_dir=True)
         if not os.path.exists(config_copy_dir):
             copy_dir = os.path.join(self.cpa_directory, 'config')
             shutil.copytree(copy_dir, config_copy_dir)
     return [self.executable] + \
            utils.get_cpachecker_options(witness_file) +\
            ['-witnessValidation', program_file]
Beispiel #8
0
    def create_input_generation_cmds(self, filename):
        import shutil
        config_copy_dir = utils.get_file_path('config', temp_dir=True)
        if not os.path.exists(config_copy_dir):
            copy_dir = os.path.join(base_dir, 'config')
            shutil.copytree(copy_dir, config_copy_dir)

        input_generation_cmd = [binary]
        if self.timelimit > 0:
            input_generation_cmd += ['-timelimit', str(self.timelimit)]
        input_generation_cmd += [
            '-tiger-variants', '-outputpath', tests_dir, '-spec',
            utils.spec_file, filename
        ]

        return [input_generation_cmd]
Beispiel #9
0
from tbf.input_generation import BaseInputGenerator
from tbf.testcase_validation import TestValidator as BaseTestValidator
import os
import tbf.utils as utils
import glob
from tbf.harness_generation import HarnessCreator
import pathlib

module_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.join(module_dir, 'afl/bin')
findings_dir = utils.get_file_path('findings', temp_dir=True)
name = 'afl-fuzz'
tests_dir = utils.tmp


class InputGenerator(BaseInputGenerator):
    def create_input_generation_cmds(self, program_file):
        instrumented_program = 'tested.out'
        compile_cmd = [
            os.path.join(bin_dir,
                         'afl-gcc'), self.machine_model.compile_parameter,
            '-o', instrumented_program, program_file
        ]

        testcase_dir = self._create_testcase_dir()
        input_gen_cmd = [
            os.path.join(bin_dir, 'afl-fuzz'), '-i', testcase_dir, '-o',
            findings_dir, '--',
            os.path.abspath(instrumented_program)
        ]
        return [compile_cmd, input_gen_cmd]
Beispiel #10
0
def run(args, stop_all_event=None):
    default_err = "Unknown error"

    validation_result = utils.VerdictUnknown()

    filename = os.path.abspath(args.file)
    input_generator = _get_input_generator(args)
    validator = _get_validator(args, input_generator)

    validator_stats = None
    generator_stats = None
    old_dir_abs = os.path.abspath('.')
    try:
        os.chdir(utils.tmp)

        utils.find_nondet_methods(filename, args.svcomp_nondets_only)
        assert not stop_all_event.is_set(
        ), "Stop event is already set before starting input generation"

        if args.existing_tests_dir is None:
            generator_pool = ThreadPool(processes=1)
            # Define the methods for running input generation and validation in parallel/sequentially
            if args.run_parallel:
                generator_function = generator_pool.apply_async

                def get_generation_result(res):
                    return res.get(3)

                def is_ready0(r):
                    return r.ready()
            else:
                generator_function = generator_pool.apply

                def get_generation_result(res):
                    return res

                def is_ready0(r):
                    return True

            generation_result = generator_function(
                input_generator.generate_input, args=(filename, stop_all_event))

        else:
            generation_result = None

            def get_generation_result(res):
                return True, None

            def is_ready0(r):
                return True

        # We can't use a def here because we pass this function to a different function,
        # in which the def wouldn't be defined
        is_ready = lambda: is_ready0(generation_result)

        if stop_all_event.is_set():
            logging.info("Stop-all event is set, returning from execution")
            return

        validation_result, validator_stats = validator.check_inputs(
            filename, is_ready, stop_all_event, args.existing_tests_dir)

        try:
            generation_success, generator_stats = get_generation_result(
                generation_result)
            generation_done = True
        except TimeoutError:
            logging.warning(
                "Couldn't' get result of input generation due to timeout")
            generation_done = False

        if validation_result.is_positive():
            test_name = os.path.basename(validation_result.test_vector.origin)
            persistent_test = utils.get_file_path(test_name, temp_dir=False)
            shutil.copy(validation_result.test_vector.origin, persistent_test)

            if validation_result.harness is not None:
                persistent_harness = utils.get_file_path(
                    'harness.c', temp_dir=False)
                shutil.copy(validation_result.harness, persistent_harness)

                # Create an ExecutionRunner only for the purpose of
                # compiling the persistent harness
                validator = ExecutionRunner(args.machine_model,
                                            validation_result.test)
                final_harness_name = utils.get_file_path('a.out', temp_dir=False)
                validator.compile(filename, persistent_harness, final_harness_name)

            if validation_result.witness is not None:
                persistent_witness = utils.get_file_path(
                    'witness.graphml', temp_dir=False)
                shutil.copy(validation_result.witness, persistent_witness)

        elif not generation_done:
            validation_result = utils.VerdictUnknown()

    except utils.CompileError as e:
        logging.error("Compile error: %s", e.msg if e.msg else default_err)
    except utils.InputGenerationError as e:
        logging.error("Input generation error: %s", e.msg
                      if e.msg else default_err)
    except utils.ParseError as e:
        logging.error("Parse error: %s", e.msg if e.msg else default_err)
    except FileNotFoundError as e:
        logging.error("File not found: %s", e.filename)
    finally:
        os.chdir(old_dir_abs)

        statistics = ""
        if generator_stats:
            statistics += str(generator_stats)
        if validator_stats:
            if statistics:  # If other statistics are there, add some spacing
                statistics += "\n\n"
            statistics += str(validator_stats)
        verdict_str = "\nTBF verdict: " + validation_result.verdict.upper()
        with open(utils.get_file_path('Statistics.txt', temp_dir=False),
                  'w+') as stats:
            stats.write(statistics)
            stats.write('\n')
            stats.write(verdict_str)
            stats.write('\n')

        if args.print_stats:
            print("Statistics:")
            print(statistics)
        print(verdict_str)

        if args.keep_files:
            created_dir = utils.get_file_path('created_files', temp_dir=False)
            logging.info("Moving created files to %s .", created_dir)
            if os.path.exists(created_dir):
                # despite the name, ignore_errors=True allows removal of non-empty directories
                shutil.rmtree(created_dir, ignore_errors=True)
            shutil.move(utils.tmp, created_dir)
        else:
            shutil.rmtree(utils.tmp, ignore_errors=True)