def run_forest_run(self):
        if self.output_dir is not None and not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        new_file_path = os.path.join(self.config.tmp_dir, "feelingLucky.txt")
        cmd = self.config.get_gdb_command_line(self.config.get_most_standard_binary(), new_file_path, self.gdb_script_path)
        for path, _, files in os.walk(self.search_dir):
            for filename in files:
                eips = []
                indexes = []
                if filename.endswith(self.config.run_extension):
                    continue
                Logger.info("Trying my luck with", filename)
                filepath = os.path.join(path, filename)
                orig_file = file(filepath, "rb").read()
                Logger.debug(filepath, debug_level=4)
                for index in xrange(0,len(orig_file)-len(self.lucky_hex_values)):
                    new_file = orig_file[:index] + self.lucky_hex_values + orig_file[index+len(self.lucky_hex_values):]
                    #Logger.debug(new_file[:100])
                    file(new_file_path, "w").write(new_file)
                    crash_eip = self.get_crash_eip(cmd)
                    if crash_eip:
                        if not crash_eip in eips:
                            eips.append(crash_eip)
                            indexes.append(index)
                        if self.lucky_hex_values <= crash_eip and crash_eip <= self.lucky_hex_values_upper_bound:
                            o = os.path.join(self.output_dir, filename)
                            Logger.info("WTF, we actually were able to control EIP! See file ", o)
                            file(o, "w").write(new_file)
#                        else:
#                            Logger.debug("Binary crashed, but at eip:", hex(crash_eip), "index to put lucky hex value in file:", index, debug_level=7)
                Logger.info("Seen the following crashing eips for this file:", list_as_intervals(eips, as_hex=True))
                Logger.info("File indexes that lead to different crashes for this file:", list_as_intervals(indexes))
Пример #2
0
 def divide_by_exploitability(self, function=shutil.move):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             gdb_out_filepath = filepath+self.config.get_gdb_exploitable_file_extension()
             if os.path.exists(gdb_out_filepath):
                 file_content = file(gdb_out_filepath, "rb").read()
                 out_dir_main = self.output_dir
                 if out_dir_main is None:
                     out_dir_main = path
                 out_dir = os.path.join(out_dir_main, "UNCATEGORIZED") + os.path.sep
                 for classification in self.classifications:
                     if self._get_search_string_for_classification(classification) in file_content:
                         out_dir = os.path.join(out_dir_main, classification) + os.path.sep
                         break
                 if not os.path.exists(out_dir):
                     os.mkdir(out_dir)
                 Logger.debug("Moving", filepath+"* to", out_dir, debug_level=4)
                 for file_all_extensions in glob.glob(filepath+"*"):
                     function(file_all_extensions, out_dir)
             else:
                 Logger.warning("Seems like there is no gdb output file %s, can not find exploitability" % gdb_out_filepath)
Пример #3
0
 def divide_by_signal(self, confirmation_loops=0, function=shutil.copyfile):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     ex = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             command = self.config.get_command_line(self.binary_to_use,
                                                    filepath)
             Logger.debug("Executing:", command, debug_level=4)
             Logger.busy()
             signal = ex.run_command(command, env=self.config.env)
             while confirmation_loops > 0:
                 Logger.busy()
                 new_signal = ex.run_command(command, env=self.config.env)
                 if new_signal == signal:
                     signal = new_signal
                     confirmation_loops -= 1
                 else:
                     Logger.info(
                         "Detected varying return codes for exactly the same run"
                     )
                     signal = SignalFinder.VARYING_SIGNAL
                     break
             Logger.debug("We consider signal %i for input file %s" %
                          (signal, filename),
                          debug_level=5)
             destination_dir = self.get_folder_path_for_signal(signal)
             if not os.path.exists(destination_dir):
                 os.mkdir(destination_dir)
             function(filepath, os.path.join(destination_dir, filename))
Пример #4
0
 def run_command(self, command, timeout=None, env={}, stdout=file("/dev/null"), stderr=file("/dev/null")):
     #TODO: make stdout / stderr configurable
     if not timeout:
         timeout = self.config.run_timeout
     process = subprocess.Popen(command, stdin=None, shell=False, stdout=stdout, stderr=stderr)
     self.current_process = process
     signal.signal(signal.SIGALRM, self._handle_alarm)
     #We also had a problem that memory corruptions...
     signal.signal(signal.SIGTTOU, self._handle_sigttou)
     signal.alarm(timeout)
     self.timeout_flag = False
     self.sigttou_flag = False
     #TODO: get rid of magic number
     ret_signal = self.TIMEOUT_SIGNAL
     #blocking call:
     process.communicate()
     signal.alarm(0)
     #This line is reached when timeout_flag was set by _handle_alarm if it was called
     if self.timeout_flag:
         Logger.debug("Process was killed as it exceeded the time limit", debug_level=3)
         ret_signal = self.TIMEOUT_SIGNAL
     elif self.sigttou_flag:
         Logger.debug("Some memory corruption resulted in a SIGTTOU signal being thrown (usually stops process). We caught it.", debug_level=3)
         ret_signal = signal.SIGTTOU
     else:
         ret_signal = process.returncode
     return ret_signal
 def divide_by_signal(self, confirmation_loops=0, function=shutil.copyfile):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     ex = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join( path, filename )
             command = self.config.get_command_line(self.binary_to_use, filepath)
             Logger.debug("Executing:", command, debug_level=4)
             Logger.busy()
             signal = ex.get_signal_for_run(command, env=self.config.env)
             while confirmation_loops > 0:
                 Logger.busy()
                 new_signal = ex.get_signal_for_run(command, env=self.config.env)
                 if new_signal == signal:
                     signal = new_signal
                     confirmation_loops -= 1
                 else:
                     Logger.info("Detected varying return codes for exactly the same run")
                     signal = SignalFinder.VARYING_SIGNAL
                     break
             Logger.debug("We consider signal %i for input file %s" % (signal, filename), debug_level=5)
             destination_dir = self.get_folder_path_for_signal(signal)
             if not os.path.exists(destination_dir):
                 os.mkdir(destination_dir)
             function(filepath, os.path.join(destination_dir, filename))
    def run_forest_run(self):
        if self.output_dir is not None and not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        new_file_path = os.path.join(self.config.tmp_dir, "feelingLucky.txt")
        cmd = self.config.get_gdb_command_line(self.config.get_most_standard_binary(), new_file_path, self.gdb_script_path)
        for path, _, files in os.walk(self.search_dir):
            for filename in files:
                eips = []
                indexes = []
                if filename.endswith(self.config.run_extension):
                    continue
                Logger.info("Trying my luck with", filename)
                filepath = os.path.join(path, filename)
                orig_file = file(filepath, "rb").read()
                Logger.debug(filepath, debug_level=4)
                for index in xrange(0,len(orig_file)-len(self.lucky_hex_values)):
                    new_file = orig_file[:index] + self.lucky_hex_values + orig_file[index+len(self.lucky_hex_values):]
                    #Logger.debug(new_file[:100])
                    file(new_file_path, "w").write(new_file)
                    crash_eip = self.get_crash_eip(cmd)
                    if crash_eip:
                        if not crash_eip in eips:
                            eips.append(crash_eip)
                            indexes.append(index)
                        if self.lucky_hex_values <= crash_eip and crash_eip <= self.lucky_hex_values_upper_bound:
                            o = os.path.join(self.output_dir, filename)
                            Logger.info("WTF, we actually were able to control EIP! See file ", o)
                            file(o, "w").write(new_file)
#                        else:
#                            Logger.debug("Binary crashed, but at eip:", hex(crash_eip), "index to put lucky hex value in file:", index, debug_level=7)
                Logger.info("Seen the following crashing eips for this file:", list_as_intervals(eips, as_hex=True))
                Logger.info("File indexes that lead to different crashes for this file:", list_as_intervals(indexes))
 def minimize_testcases(self):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             Logger.info("Minimizing", filename)
             filepath = os.path.join(path, filename)
             cmd = self.config.get_afl_tmin_command_line(filepath, os.path.join(self.output_dir, filename))
             Logger.debug("Executing:", cmd)
             signal = executer.get_signal_for_run(cmd, self.config.run_timeout_tmin, env=self.config.env)
             if signal == SignalFinder.TIMEOUT_SIGNAL:
                 Logger.error("Minimizing this file took too long, aborted")
Пример #8
0
    def run_command(self,
                    command,
                    timeout=None,
                    env={},
                    stdout=file("/dev/null"),
                    stderr=file("/dev/null")):
        #TODO: make stdout / stderr configurable
        if not timeout:
            timeout = self.config.run_timeout

        # Somewhat of a hack to support stdin without any architectural changes
        if command[-1].strip().startswith("<"):
            stdin = file(command[-1].strip()[1:].strip(),
                         'r')  # everything except the "<"
        else:
            stdin = None

        process = subprocess.Popen(command,
                                   stdin=stdin,
                                   shell=False,
                                   stdout=stdout,
                                   stderr=stderr)
        self.current_process = process
        signal.signal(signal.SIGALRM, self._handle_alarm)
        #We also had a problem that memory corruptions...
        signal.signal(signal.SIGTTOU, self._handle_sigttou)
        signal.alarm(timeout)
        self.timeout_flag = False
        self.sigttou_flag = False
        #TODO: get rid of magic number
        ret_signal = self.TIMEOUT_SIGNAL
        #blocking call:
        process.communicate()
        signal.alarm(0)
        if stdin:
            stdin.close()

        if self.timeout_flag:
            #This line is reached when timeout_flag was set by _handle_alarm if it was called
            Logger.debug("Process was killed as it exceeded the time limit",
                         debug_level=3)
            ret_signal = self.TIMEOUT_SIGNAL
        elif self.sigttou_flag:
            Logger.debug(
                "Some memory corruption resulted in a SIGTTOU signal being thrown (usually stops process). We caught it.",
                debug_level=3)
            ret_signal = signal.SIGTTOU
        else:
            ret_signal = process.returncode
        return ret_signal
Пример #9
0
 def minimize_testcases(self):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             Logger.info("Minimizing", filename)
             filepath = os.path.join(path, filename)
             cmd = self.config.get_afl_tmin_command_line(
                 filepath, os.path.join(self.output_dir, filename))
             Logger.debug("Executing:", cmd)
             signal = executer.get_signal_for_run(
                 cmd, self.config.run_timeout_tmin, env=self.config.env)
             if signal == SignalFinder.TIMEOUT_SIGNAL:
                 Logger.error("Minimizing this file took too long, aborted")
Пример #10
0
 def run_command(self,
                 command,
                 timeout=None,
                 env={},
                 stdout=file("/dev/null"),
                 stderr=file("/dev/null")):
     #TODO: make stdout / stderr configurable
     if not timeout:
         timeout = self.config.run_timeout
     process = subprocess.Popen(command,
                                stdin=None,
                                shell=False,
                                stdout=stdout,
                                stderr=stderr)
     self.current_process = process
     signal.signal(signal.SIGALRM, self._handle_alarm)
     #We also had a problem that memory corruptions...
     signal.signal(signal.SIGTTOU, self._handle_sigttou)
     signal.alarm(timeout)
     self.timeout_flag = False
     self.sigttou_flag = False
     #TODO: get rid of magic number
     ret_signal = self.TIMEOUT_SIGNAL
     #blocking call:
     process.communicate()
     signal.alarm(0)
     #This line is reached when timeout_flag was set by _handle_alarm if it was called
     if self.timeout_flag:
         Logger.debug("Process was killed as it exceeded the time limit",
                      debug_level=3)
         ret_signal = self.TIMEOUT_SIGNAL
     elif self.sigttou_flag:
         Logger.debug(
             "Some memory corruption resulted in a SIGTTOU signal being thrown (usually stops process). We caught it.",
             debug_level=3)
         ret_signal = signal.SIGTTOU
     else:
         ret_signal = process.returncode
     return ret_signal
 def _combined_stdout_stderr(self, binary, gdb_run, hint):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             if gdb_run:
                 command = self.config.get_gdb_command_line(binary, filepath)
                 new_filename = filename+"-"+os.path.basename(binary)+hint+self.config.gdb_prefix
             else:
                 command = self.config.get_command_line(binary, filepath)
                 new_filename = filename+"-"+os.path.basename(binary)+hint
             Logger.debug("Looking for stdout/stderr output:", command, debug_level=4)
             if self.output_dir:
                 output_file_name = get_new_output_file_name(self.output_dir, new_filename, self.config.run_extension, self.config.max_digets)
                 new_filepath = os.path.join(self.output_dir, output_file_name)
             else:
                 output_file_name = get_new_output_file_name(path, new_filename, self.config.run_extension, self.config.max_digets)
                 new_filepath = os.path.join(path, output_file_name)
             fp = file(new_filepath, "w")
             executer.get_output_for_run(command, fp, env=self.config.env)
             fp.close()
Пример #12
0
 def _combined_stdout_stderr(self, binary, gdb_run, hint):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             if gdb_run:
                 command = self.config.get_gdb_command_line(
                     binary, filepath)
                 new_filename = filename + "-" + os.path.basename(
                     binary) + hint + self.config.gdb_prefix
             else:
                 command = self.config.get_command_line(binary, filepath)
                 new_filename = filename + "-" + os.path.basename(
                     binary) + hint
             Logger.debug("Looking for stdout/stderr output:",
                          command,
                          debug_level=4)
             if self.output_dir:
                 output_file_name = get_new_output_file_name(
                     self.output_dir, new_filename,
                     self.config.run_extension, self.config.max_digets)
                 new_filepath = os.path.join(self.output_dir,
                                             output_file_name)
             else:
                 output_file_name = get_new_output_file_name(
                     path, new_filename, self.config.run_extension,
                     self.config.max_digets)
                 new_filepath = os.path.join(path, output_file_name)
             fp = file(new_filepath, "w")
             Logger.busy()
             executer.run_command(command,
                                  env=self.config.env,
                                  stdout=fp,
                                  stderr=fp)
             fp.close()
 def __init__(self, config, search_dir=None, output_dir=None):
     self.config = config
     self.search_dir = search_dir
     if self.search_dir is None:
         self.search_dir = self.config.original_crashes_directory
     self.output_dir = output_dir
     if self.output_dir is None:
         self.output_dir = self.config.default_signal_directory
     if config.target_binary_plain:
         Logger.debug("Using", self.config.target_binary_plain, "for signal run")
         self.binary_to_use = self.config.target_binary_plain
     elif config.target_binary_asan:
         Logger.debug("Using", self.config.target_binary_asan, "for signal run")
         self.binary_to_use = self.config.target_binary_asan
     else:
         Logger.debug("Using", self.config.target_binary_instrumented, "for signal run")
         self.binary_to_use = self.config.target_binary_instrumented
Пример #14
0
 def __init__(self, config, search_dir=None, output_dir=None):
     self.config = config
     self.search_dir = search_dir
     if self.search_dir is None:
         self.search_dir = self.config.original_crashes_directory
     self.output_dir = output_dir
     if self.output_dir is None:
         self.output_dir = self.config.default_signal_directory
     if config.target_binary_plain:
         Logger.debug("Using", self.config.target_binary_plain,
                      "for signal run")
         self.binary_to_use = self.config.target_binary_plain
     elif config.target_binary_asan:
         Logger.debug("Using", self.config.target_binary_asan,
                      "for signal run")
         self.binary_to_use = self.config.target_binary_asan
     else:
         Logger.debug("Using", self.config.target_binary_instrumented,
                      "for signal run")
         self.binary_to_use = self.config.target_binary_instrumented
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
"""
    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "gdb"
    gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    config_gm = CrashAnalysisConfig(where_this_python_script_lives, 
                            target_binary_instrumented=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-afl/utilities/gm", 
                            args_before="identify", 
                            args_after="", 
                            target_binary_plain=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-plain/utilities/gm", 
                            target_binary_asan=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-asan/utilities/gm",
                            env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
                            crash_dir=where_this_python_script_lives+"/test-cases/gm/crashes",
                            gdb_script=gdb_script_32bit,
                            gdb_binary=gdb_command
                            )
    
#    config_ffmpeg = CrashAnalysisConfig(where_this_python_script_lives, 
#                        target_binary_instrumented=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-afl/ffmpeg", 
#                        args_before="-i", 
#                        args_after="-loglevel quiet", 
#                        target_binary_plain=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-plain/ffmpeg", 
##                        target_binary_asan=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-asan/ffmpeg",
#                        env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
#                        crash_dir=where_this_python_script_lives+"/test-cases/ffmpeg/crashes",
#                        gdb_script=gdb_script_32bit,
#                        gdb_binary=gdb_command
#                        )

    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(config_gm, config_gm.original_crashes_directory)
    fdf.remove_readmes()
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively()
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files()
    
    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(config_gm)
    if os.path.exists(config_gm.default_signal_directory):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", config_gm.default_signal_directory)
    else:
        Logger.debug("Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()
    
    #Interestings signals: negative on OSX, 129 and above for Linux
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0,129)
    
    analyze_output_and_exploitability(config_gm, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
        
    Logger.info("Interesting signals / Minimizing input (afl-tmin)")
    if os.path.exists(config_gm.default_minimized_crashes_directory):
        Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", config_gm.default_minimized_crashes_directory)
    else:
        for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals):
            Logger.debug("Minimizing inputs resulting in signal %i" % signal)
            im = InputMinimizer(config_gm, signal_folder)
            im.minimize_testcases()
        
        Logger.info("Interesting signals / Minimized inputs / Deduplication")
        fdf_minimized = FileDuplicateFinder(config_gm, config_gm.default_minimized_crashes_directory)
        fdf_minimized.delete_duplicates_recursively()
        
    #
    Logger.info("Interesting signals / Minimized inputs / Finding interesting signals")
    #
    sf_minimized_crashes = SignalFinder(config_gm, config_gm.default_minimized_crashes_directory, os.path.join(config_gm.output_dir, "minimized-per-signal"))
    if os.path.exists(sf_minimized_crashes.output_dir):
        Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r", sf_minimized_crashes.output_dir)
    else:
        os.mkdir(sf_minimized_crashes.output_dir)
        Logger.info("Dividing files to output folder according to their signal")
        sf_minimized_crashes.divide_by_signal(0)
    
    
    analyze_output_and_exploitability(config_gm, sf_minimized_crashes, uninteresting_signals, message_prefix="Interesting signals / Minimized inputs /")
    
    
#     # If you are in the mood to waste a little CPU time, run this
#     Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation")
#     #
#     fle = FeelingLuckyExploiter(config_gm, sf_minimized_crashes.output_dir)
#     #os.mkdir(fle.output_dir)
#     fle.run_forest_run()
    
#TODO: develop
#- peruvian were rabbit?
#- exploitable script, something along: less `grep -l 'Exploitability Classification: EXPLOITABLE' output/per-signal/*/*gdb*`

    cleanup(config_gm)
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
printf "[+] list\n"
list
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
printf "[+] list\n"
list
"""

    #TODO: Make sure gdb script doesn't abort on error
    #ignoring errors in gdb scripts: http://stackoverflow.com/questions/17923865/gdb-stops-in-a-command-file-if-there-is-an-error-how-to-continue-despite-the-er
    gdb_script_32bit_noerror = r"""python
def my_ignore_errors(arg):
  try:
    gdb.execute("print \"" + "Executing command: " + arg + "\"")
    gdb.execute (arg)
  except:
    gdb.execute("print \"" + "ERROR: " + arg + "\"")

my_ignore_errors("p p")
my_ignore_errors("p p->v1")
gdb.execute("quit")
    """

    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "/usr/bin/gdb"
    #gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    #TODO: For some reason the ASAN environment variables are not correctly set when given to the subprocess module... so let's just set it in parent process already:
    os.environ['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer-3.4"
    os.environ['ASAN_OPTIONS'] = "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"
    env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"}
    
    ###
    #This import decides which testcase/binary we want to run!
    ###
    from testcases.gm.Config import create_config
    #from testcases.ffmpeg.Config import create_config
    #see CrashAnalysisConfig for more options that get passed on by create_config
    chosen_config = create_config(where_this_python_script_lives, env=env, gdb_script=gdb_script_32bit, gdb_binary=gdb_command)
    chosen_config.sanity_check()
    
    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(chosen_config, chosen_config.original_crashes_directory)
    fdf.remove_readmes()
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively()
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files()
    #OR:
    #Logger.info("Renaming all files to numeric values, as some programs prefer no special chars in filenames and might require a specific file extension")
    #fdf.rename_all_files(".png")
    
    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(chosen_config)
    if os.path.exists(chosen_config.default_signal_directory):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", chosen_config.default_signal_directory)
    else:
        Logger.debug("Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()
    
    #Interestings signals: negative on OSX, 129 and above sometimes for Linux on the shell (depending on used mechanism)
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0, 129)
    
    analyze_output_and_exploitability(chosen_config, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
        
    Logger.info("Interesting signals / Minimizing input (afl-tmin)")
    if os.path.exists(chosen_config.default_minimized_crashes_directory):
        Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", chosen_config.default_minimized_crashes_directory)
    else:
        for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals):
            Logger.debug("Minimizing inputs resulting in signal %i" % signal)
            im = InputMinimizer(chosen_config, signal_folder)
            im.minimize_testcases()
        
        Logger.info("Interesting signals / Minimized inputs / Deduplication")
        fdf_minimized = FileDuplicateFinder(chosen_config, chosen_config.default_minimized_crashes_directory)
        fdf_minimized.delete_duplicates_recursively()
        
    #
    Logger.info("Interesting signals / Minimized inputs / Finding interesting signals")
    #
    sf_minimized_crashes = SignalFinder(chosen_config, chosen_config.default_minimized_crashes_directory, os.path.join(chosen_config.output_dir, "minimized-per-signal"))
    if os.path.exists(sf_minimized_crashes.output_dir):
        Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r", sf_minimized_crashes.output_dir)
    else:
        os.mkdir(sf_minimized_crashes.output_dir)
        Logger.info("Dividing files to output folder according to their signal")
        sf_minimized_crashes.divide_by_signal(0)
    
    
    analyze_output_and_exploitability(chosen_config, sf_minimized_crashes, uninteresting_signals, message_prefix="Interesting signals / Minimized inputs /")
    
#TODO:
#- Make (some) modules work as standalone applications with command line parsing
#- The FeelingLuckyExplotier thing. Need to get a small test sample where I know it should work.
#     # If you are in the mood to waste a little CPU time, run this
#     Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation")
#     #
#     fle = FeelingLuckyExploiter(chosen_config, sf_minimized_crashes.output_dir)
#     #os.mkdir(fle.output_dir)
#     fle.run_forest_run()


    cleanup(chosen_config)
Пример #17
0
def main():
    # Read the README before you start.

    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
printf "[+] list\n"
list
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
printf "[+] list\n"
list
"""

    # TODO: Make sure gdb script doesn't abort on error
    # ignoring errors in gdb scripts: http://stackoverflow.com/questions/17923865/gdb-stops-in-a-command-file-if-there-is-an-error-how-to-continue-despite-the-er
    gdb_script_32bit_noerror = r"""python
def my_ignore_errors(arg):
  try:
    gdb.execute("print \"" + "Executing command: " + arg + "\"")
    gdb.execute (arg)
  except:
    gdb.execute("print \"" + "ERROR: " + arg + "\"")

my_ignore_errors("p p")
my_ignore_errors("p p->v1")
gdb.execute("quit")
    """

    where_this_python_script_lives = os.path.dirname(
        os.path.realpath(__file__))

    gdb_command = "/usr/bin/gdb"
    #gdb_command_osx = "/opt/local/bin/gdb-apple"

    #TODO: For some reason the ASAN environment variables are not correctly set when given to the subprocess module... so let's just set it in parent process already:
    os.environ['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer-3.4"
    os.environ[
        'ASAN_OPTIONS'] = "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"
    env = {
        "ASAN_SYMBOLIZER_PATH":
        "/usr/bin/llvm-symbolizer-3.4",
        "ASAN_OPTIONS":
        "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"
    }

    ###
    # This import decides which testcase/binary we want to run!
    ###
    from testcases.ffmpeg.Config import create_config
    #from testcases.ffmpeg.Config import create_config
    #see CrashAnalysisConfig for more options that get passed on by create_config
    chosen_config = create_config(where_this_python_script_lives,
                                  env=env,
                                  gdb_script=gdb_script_32bit,
                                  gdb_binary=gdb_command)
    chosen_config.sanity_check()

    #
    Logger.info("Input crashes directory operations")
    #

    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(chosen_config,
                              chosen_config.original_crashes_directory)
    fdf.remove_readmes()

    Logger.info(
        "Removing duplicates from original crashes folder (same file size + MD5)"
    )
    fdf.delete_duplicates_recursively()

    Logger.info(
        "Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions"
    )
    fdf.rename_same_name_files()
    #OR:
    #Logger.info("Renaming all files to numeric values, as some programs prefer no special chars in filenames and might require a specific file extension")
    #fdf.rename_all_files(".png")

    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(chosen_config)
    if os.path.exists(chosen_config.default_signal_directory):
        Logger.warning(
            "Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r",
            chosen_config.default_signal_directory)
    else:
        Logger.debug(
            "Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()

    #Interestings signals: negative on OSX, 129 and above sometimes for Linux on the shell (depending on used mechanism)
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0, 129)

    analyze_output_and_exploitability(chosen_config,
                                      sf_all_crashes,
                                      uninteresting_signals,
                                      message_prefix="Interesting signals /")

    Logger.info("Interesting signals / Minimizing input (afl-tmin)")
    if os.path.exists(chosen_config.default_minimized_crashes_directory):
        Logger.warning(
            "Seems like crashes were already minimized, skipping. If you want to rerun: rm -r",
            chosen_config.default_minimized_crashes_directory)
    else:
        for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(
                uninteresting_signals):
            Logger.debug("Minimizing inputs resulting in signal %i" % signal)
            im = InputMinimizer(chosen_config, signal_folder)
            im.minimize_testcases()

        Logger.info("Interesting signals / Minimized inputs / Deduplication")
        fdf_minimized = FileDuplicateFinder(
            chosen_config, chosen_config.default_minimized_crashes_directory)
        fdf_minimized.delete_duplicates_recursively()

    #
    Logger.info(
        "Interesting signals / Minimized inputs / Finding interesting signals")
    #
    sf_minimized_crashes = SignalFinder(
        chosen_config, chosen_config.default_minimized_crashes_directory,
        os.path.join(chosen_config.output_dir, "minimized-per-signal"))
    if os.path.exists(sf_minimized_crashes.output_dir):
        Logger.warning(
            "Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r",
            sf_minimized_crashes.output_dir)
    else:
        os.mkdir(sf_minimized_crashes.output_dir)
        Logger.info(
            "Dividing files to output folder according to their signal")
        sf_minimized_crashes.divide_by_signal(0)

    analyze_output_and_exploitability(
        chosen_config,
        sf_minimized_crashes,
        uninteresting_signals,
        message_prefix="Interesting signals / Minimized inputs /")

    #TODO:
    #- Make (some) modules work as standalone applications with command line parsing
    #- The FeelingLuckyExplotier thing. Need to get a small test sample where I know it should work.
    #     # If you are in the mood to waste a little CPU time, run this
    #     Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation")
    #     #
    #     fle = FeelingLuckyExploiter(chosen_config, sf_minimized_crashes.output_dir)
    #     #os.mkdir(fle.output_dir)
    #     fle.run_forest_run()

    cleanup(chosen_config)