예제 #1
0
 def divide_by_signal(self, confirmation_loops=0, function=shutil.copyfile):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     ex = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             command = self.config.get_command_line(self.binary_to_use,
                                                    filepath)
             Logger.debug("Executing:", command, debug_level=4)
             Logger.busy()
             signal = ex.run_command(command, env=self.config.env)
             while confirmation_loops > 0:
                 Logger.busy()
                 new_signal = ex.run_command(command, env=self.config.env)
                 if new_signal == signal:
                     signal = new_signal
                     confirmation_loops -= 1
                 else:
                     Logger.info(
                         "Detected varying return codes for exactly the same run"
                     )
                     signal = SignalFinder.VARYING_SIGNAL
                     break
             Logger.debug("We consider signal %i for input file %s" %
                          (signal, filename),
                          debug_level=5)
             destination_dir = self.get_folder_path_for_signal(signal)
             if not os.path.exists(destination_dir):
                 os.mkdir(destination_dir)
             function(filepath, os.path.join(destination_dir, filename))
    def run_forest_run(self):
        if self.output_dir is not None and not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        new_file_path = os.path.join(self.config.tmp_dir, "feelingLucky.txt")
        cmd = self.config.get_gdb_command_line(self.config.get_most_standard_binary(), new_file_path, self.gdb_script_path)
        for path, _, files in os.walk(self.search_dir):
            for filename in files:
                eips = []
                indexes = []
                if filename.endswith(self.config.run_extension):
                    continue
                Logger.info("Trying my luck with", filename)
                filepath = os.path.join(path, filename)
                orig_file = file(filepath, "rb").read()
                Logger.debug(filepath, debug_level=4)
                for index in xrange(0,len(orig_file)-len(self.lucky_hex_values)):
                    new_file = orig_file[:index] + self.lucky_hex_values + orig_file[index+len(self.lucky_hex_values):]
                    #Logger.debug(new_file[:100])
                    file(new_file_path, "w").write(new_file)
                    crash_eip = self.get_crash_eip(cmd)
                    if crash_eip:
                        if not crash_eip in eips:
                            eips.append(crash_eip)
                            indexes.append(index)
                        if self.lucky_hex_values <= crash_eip and crash_eip <= self.lucky_hex_values_upper_bound:
                            o = os.path.join(self.output_dir, filename)
                            Logger.info("WTF, we actually were able to control EIP! See file ", o)
                            file(o, "w").write(new_file)
#                        else:
#                            Logger.debug("Binary crashed, but at eip:", hex(crash_eip), "index to put lucky hex value in file:", index, debug_level=7)
                Logger.info("Seen the following crashing eips for this file:", list_as_intervals(eips, as_hex=True))
                Logger.info("File indexes that lead to different crashes for this file:", list_as_intervals(indexes))
예제 #3
0
 def run_command(self, command, timeout=None, env={}, stdout=file("/dev/null"), stderr=file("/dev/null")):
     #TODO: make stdout / stderr configurable
     if not timeout:
         timeout = self.config.run_timeout
     process = subprocess.Popen(command, stdin=None, shell=False, stdout=stdout, stderr=stderr)
     self.current_process = process
     signal.signal(signal.SIGALRM, self._handle_alarm)
     #We also had a problem that memory corruptions...
     signal.signal(signal.SIGTTOU, self._handle_sigttou)
     signal.alarm(timeout)
     self.timeout_flag = False
     self.sigttou_flag = False
     #TODO: get rid of magic number
     ret_signal = self.TIMEOUT_SIGNAL
     #blocking call:
     process.communicate()
     signal.alarm(0)
     #This line is reached when timeout_flag was set by _handle_alarm if it was called
     if self.timeout_flag:
         Logger.debug("Process was killed as it exceeded the time limit", debug_level=3)
         ret_signal = self.TIMEOUT_SIGNAL
     elif self.sigttou_flag:
         Logger.debug("Some memory corruption resulted in a SIGTTOU signal being thrown (usually stops process). We caught it.", debug_level=3)
         ret_signal = signal.SIGTTOU
     else:
         ret_signal = process.returncode
     return ret_signal
 def delete_duplicates_recursively(self):
     Logger.info("Removing duplicates in", self.search_dir)
     for duplicate in self.find_duplicate_contents(self.search_dir):
         if duplicate.endswith(self.config.run_extension):
             continue
         Logger.info("Deleting the duplicate file:", duplicate)
         os.remove(duplicate)
 def loop_listen(self) -> None:
     while True:
         data = self.client.recv(self.buffer_size).decode('utf-8')
         if data != '':
             Logger.console_log(message="Received {} of type {}".format(
                 data, type(data)),
                                status=Logger.LogStatus.SUCCESS)
 def delete_duplicates_recursively(self):
     Logger.info("Removing duplicates in", self.search_dir)
     for duplicate in self.find_duplicate_contents(self.search_dir):
         if duplicate.endswith(self.config.run_extension):
             continue
         Logger.info("Deleting the duplicate file:", duplicate)
         os.remove(duplicate)
예제 #7
0
 def divide_by_exploitability(self, function=shutil.move):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             gdb_out_filepath = filepath+self.config.get_gdb_exploitable_file_extension()
             if os.path.exists(gdb_out_filepath):
                 file_content = file(gdb_out_filepath, "rb").read()
                 out_dir_main = self.output_dir
                 if out_dir_main is None:
                     out_dir_main = path
                 out_dir = os.path.join(out_dir_main, "UNCATEGORIZED") + os.path.sep
                 for classification in self.classifications:
                     if self._get_search_string_for_classification(classification) in file_content:
                         out_dir = os.path.join(out_dir_main, classification) + os.path.sep
                         break
                 if not os.path.exists(out_dir):
                     os.mkdir(out_dir)
                 Logger.debug("Moving", filepath+"* to", out_dir, debug_level=4)
                 for file_all_extensions in glob.glob(filepath+"*"):
                     function(file_all_extensions, out_dir)
             else:
                 Logger.warning("Seems like there is no gdb output file %s, can not find exploitability" % gdb_out_filepath)
 def remove_readmes(self):
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename == "README.txt":
                 filepath = os.path.join(path, filename)
                 Logger.info("Deleting the file:", filepath)
                 os.remove(filepath)
 def remove_readmes(self):
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename == "README.txt":
                 filepath = os.path.join(path, filename)
                 Logger.info("Deleting the file:", filepath)
                 os.remove(filepath)
    def run_forest_run(self):
        if self.output_dir is not None and not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        new_file_path = os.path.join(self.config.tmp_dir, "feelingLucky.txt")
        cmd = self.config.get_gdb_command_line(self.config.get_most_standard_binary(), new_file_path, self.gdb_script_path)
        for path, _, files in os.walk(self.search_dir):
            for filename in files:
                eips = []
                indexes = []
                if filename.endswith(self.config.run_extension):
                    continue
                Logger.info("Trying my luck with", filename)
                filepath = os.path.join(path, filename)
                orig_file = file(filepath, "rb").read()
                Logger.debug(filepath, debug_level=4)
                for index in xrange(0,len(orig_file)-len(self.lucky_hex_values)):
                    new_file = orig_file[:index] + self.lucky_hex_values + orig_file[index+len(self.lucky_hex_values):]
                    #Logger.debug(new_file[:100])
                    file(new_file_path, "w").write(new_file)
                    crash_eip = self.get_crash_eip(cmd)
                    if crash_eip:
                        if not crash_eip in eips:
                            eips.append(crash_eip)
                            indexes.append(index)
                        if self.lucky_hex_values <= crash_eip and crash_eip <= self.lucky_hex_values_upper_bound:
                            o = os.path.join(self.output_dir, filename)
                            Logger.info("WTF, we actually were able to control EIP! See file ", o)
                            file(o, "w").write(new_file)
#                        else:
#                            Logger.debug("Binary crashed, but at eip:", hex(crash_eip), "index to put lucky hex value in file:", index, debug_level=7)
                Logger.info("Seen the following crashing eips for this file:", list_as_intervals(eips, as_hex=True))
                Logger.info("File indexes that lead to different crashes for this file:", list_as_intervals(indexes))
 def divide_by_signal(self, confirmation_loops=0, function=shutil.copyfile):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     ex = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join( path, filename )
             command = self.config.get_command_line(self.binary_to_use, filepath)
             Logger.debug("Executing:", command, debug_level=4)
             Logger.busy()
             signal = ex.get_signal_for_run(command, env=self.config.env)
             while confirmation_loops > 0:
                 Logger.busy()
                 new_signal = ex.get_signal_for_run(command, env=self.config.env)
                 if new_signal == signal:
                     signal = new_signal
                     confirmation_loops -= 1
                 else:
                     Logger.info("Detected varying return codes for exactly the same run")
                     signal = SignalFinder.VARYING_SIGNAL
                     break
             Logger.debug("We consider signal %i for input file %s" % (signal, filename), debug_level=5)
             destination_dir = self.get_folder_path_for_signal(signal)
             if not os.path.exists(destination_dir):
                 os.mkdir(destination_dir)
             function(filepath, os.path.join(destination_dir, filename))
예제 #12
0
    def __init__(self,
                 data_refresh_rate: int = 1800,
                 relative_path_correction: str = "") -> None:
        """
        Constructor.
        :param data_refresh_rate:
        :param relative_path_correction:
        """
        Logger.console_log("Initializing Stock Scraper.",
                           status=Logger.LogStatus.COMMUNICATION)
        self.scrape = True
        self.available_cpus = cpu_count() - 1
        self.threads = {}
        self.mode_lock = Lock()
        self.executive_lock = Lock()
        self.mode = Scraper.ServerModes.INVALID
        self.stock_ticker_letters_to_survey = list(ascii_uppercase)
        self.relative_path_correction = relative_path_correction

        # Initialize Threads
        # Build worker threads for available number of cpus
        self.threads[Scraper.WorkerThread] = []
        for available_cpu_index in range(self.available_cpus):
            self.threads[Scraper.WorkerThread].append(
                Scraper.WorkerThread(thread_id=available_cpu_index,
                                     scraper_server=self))
        # Build Executive Thread
        self.threads[Scraper.ExecutiveThread] = Scraper.ExecutiveThread(
            thread_id=0,
            scraper_server=self,
            worker_threads=self.threads[Scraper.WorkerThread])
예제 #13
0
 def _handle_alarm(self, signum, frame):
     # If the alarm is triggered, we're still in the communicate()
     # call, so use kill() to end the process
     self.timeout_flag = True
     try:
         self.current_process.kill()
     except OSError as ose:
         Logger.info("Kill failed. Sometimes the process exactly exits before we try to kill it... coward. Nothing to worry about.", ose)
예제 #14
0
 def asan_combined_stdout_stderr(self, gdb_run=False):
     if not self.config.target_binary_asan:
         Logger.warning(
             "You didn't configure an ASAN enabled binary (recommended: with symbols), therefore skipping run with ASAN binary."
         )
     else:
         self._combined_stdout_stderr(self.config.target_binary_asan,
                                      gdb_run,
                                      self.config.output_prefix_asan)
예제 #15
0
 def plain_combined_stdout_stderr(self, gdb_run=False):
     if not self.config.target_binary_plain:
         Logger.warning(
             "You didn't configure a plain binary (recommended: with symbols), therefore skipping run with plain binary."
         )
     else:
         self._combined_stdout_stderr(self.config.target_binary_plain,
                                      gdb_run,
                                      self.config.output_prefix_plain)
 def rename_all_files(self, extension=""):
     i = 1
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             formatstr = "%0"+str(self.config.max_digets+4)+"d"
             new_filename = formatstr % i
             shutil.move(os.path.join(path, filename), os.path.join(path, new_filename+extension))
             i = i+1
     Logger.info("Renamed all files starting from 1, last file was named", new_filename+extension)
예제 #17
0
def survey_market():
    from yahoo_fin.stock_info import get_data

    Logger.console_log("Beginning survey of all possible stock symbols", 1)

    def concatenate_letters(one, two, three, four, five):
        if letter_four is None:
            return letter_one + letter_two + letter_three
        elif five is None:
            return letter_one + letter_two + letter_three + letter_four
        else:
            return letter_one + letter_two + letter_three + letter_four + letter_five

    def log_valid_stock_symbol(symbol):
        log_location = os.getcwd(
        ) + os.path.sep + "Data" + os.path.sep + "valid_stocks.txt"

        with open(log_location, 'a+') as stock_log:
            stock_log.write(symbol + '\n')

    def survey_symbol(symbol):
        try:
            try:
                data = get_data(symbol)
                #                Logger.console_log("Stock for symbol {} exists.".format(symbol), 1)
                log_valid_stock_symbol(symbol)
            except KeyError:
                #                Logger.console_log("Stock for symbol {} produced KeyError on adjclose.".format(symbol), 4)
                pass
            except ValueError:
                Logger.console_log(
                    "ValueError when attempting to retrieve data for stock symbol {}. Retrying..."
                    .format(symbol), Logger.LogStatus.FAIL)
                survey_symbol(symbol)
        except AssertionError:
            #            Logger.console_log("Stock for symbol {} does not exist.".format(symbol), 2)
            pass

    ascii_uppercase_and_none = list(ascii_uppercase)
    ascii_uppercase_and_none.append(None)

    for letter_one in ascii_uppercase[2:]:
        for letter_two in ascii_uppercase:
            for letter_three in ascii_uppercase:
                for letter_four in ascii_uppercase_and_none:
                    if letter_four is None:
                        three_letter_symbol = concatenate_letters(
                            letter_one, letter_two, letter_three, letter_four,
                            None)
                        survey_symbol(three_letter_symbol)
                    else:
                        for letter_five in ascii_uppercase_and_none:
                            four_or_five_letter_symbol = concatenate_letters(
                                letter_one, letter_two, letter_three,
                                letter_four, letter_five)
                            survey_symbol(four_or_five_letter_symbol)
예제 #18
0
 def _handle_alarm(self, signum, frame):
     # If the alarm is triggered, we're still in the communicate()
     # call, so use kill() to end the process
     self.timeout_flag = True
     try:
         self.current_process.kill()
     except OSError as ose:
         Logger.info(
             "Kill failed. Sometimes the process exactly exits before we try to kill it... coward. Nothing to worry about.",
             ose)
예제 #19
0
 def rename_all_files(self, extension=""):
     i = 1
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             formatstr = "%0" + str(self.config.max_digets + 4) + "d"
             new_filename = formatstr % i
             shutil.move(os.path.join(path, filename),
                         os.path.join(path, new_filename + extension))
             i = i + 1
     Logger.info("Renamed all files starting from 1, last file was named",
                 new_filename + extension)
 def get_crash_eip(self, cmd):
     eip = None
     Logger.busy()
     gdb_output = self.executer.run_command(cmd, env=self.config.env, stdout=self.executer.pipe, stderr=self.executer.pipe)
     #Logger.debug("GDB output:", gdb_output)
     m = self.regular_expr.search(gdb_output)
     if m:
         eip = m.group(1)
         eip = int(eip, 16)
     #if not signal == SignalFinder.TIMEOUT_SIGNAL:
     #    Logger.error("Minimizing this file took too long, aborted")
     return eip
 def get_crash_eip(self, cmd):
     eip = None
     Logger.busy()
     gdb_output = self.executer.get_output_for_run(cmd, self.executer.pipe, self.config.run_timeout_tmin, env=self.config.env, stderr=self.executer.pipe)
     #Logger.debug("GDB output:", gdb_output)
     m = self.regular_expr.search(gdb_output)
     if m:
         eip = m.group(1)
         eip = int(eip, 16)
     #if not signal == SignalFinder.TIMEOUT_SIGNAL:
     #    Logger.error("Minimizing this file took too long, aborted")
     return eip
예제 #22
0
 def connection_test(self) -> bool:
     try:
         test_connection = self.create_connection()
         Logger.console_log(message="Connection test with " +
                            self.database + " located at " + self.host +
                            " was a success",
                            status=Logger.LogStatus.SUCCESS)
         return True
     except self.sql_type.value.Error as err:
         Logger.console_log(
             message="Unable to establish connection with database " +
             self.database + ". Error [" + str(err) + "] was returned",
             status=Logger.LogStatus.FAIL)
         return False
예제 #23
0
def analyze_output_and_exploitability(config,
                                      signal_finder,
                                      uninteresting_signals,
                                      message_prefix=""):
    for signal, signal_folder in signal_finder.get_folder_paths_for_signals_if_exist(
            uninteresting_signals):
        skip = False
        for cat in ExploitableGdbPlugin.get_classifications():
            if os.path.exists(os.path.join(signal_folder, cat)):
                Logger.warning(
                    "Seems like there are already exploitability analysis results, skipping. If you want to rerun: rm -r %s"
                    % os.path.join(signal_folder, cat))
                skip = True
        if not skip:
            Logger.info(
                message_prefix,
                "Discover stdout, stderr, gdb and ASAN output (signal %s)" %
                signal)
            wildcard_for_run_output_files = os.path.join(
                signal_folder, "*" + config.run_extension)
            if glob.glob(wildcard_for_run_output_files):
                Logger.warning(
                    "Seems like there are already results from running the binaries, skipping. If you want to rerun: rm",
                    wildcard_for_run_output_files)
            else:
                of = OutputFinder(config, signal_folder)
                of.do_sane_output_runs()

            Logger.info(message_prefix,
                        "Analyzing exploitability (signal %s)" % signal)
            egp = ExploitableGdbPlugin(config, signal_folder)
            egp.divide_by_exploitability()
예제 #24
0
    def run_command(self,
                    command,
                    timeout=None,
                    env={},
                    stdout=file("/dev/null"),
                    stderr=file("/dev/null")):
        #TODO: make stdout / stderr configurable
        if not timeout:
            timeout = self.config.run_timeout

        # Somewhat of a hack to support stdin without any architectural changes
        if command[-1].strip().startswith("<"):
            stdin = file(command[-1].strip()[1:].strip(),
                         'r')  # everything except the "<"
        else:
            stdin = None

        process = subprocess.Popen(command,
                                   stdin=stdin,
                                   shell=False,
                                   stdout=stdout,
                                   stderr=stderr)
        self.current_process = process
        signal.signal(signal.SIGALRM, self._handle_alarm)
        #We also had a problem that memory corruptions...
        signal.signal(signal.SIGTTOU, self._handle_sigttou)
        signal.alarm(timeout)
        self.timeout_flag = False
        self.sigttou_flag = False
        #TODO: get rid of magic number
        ret_signal = self.TIMEOUT_SIGNAL
        #blocking call:
        process.communicate()
        signal.alarm(0)
        if stdin:
            stdin.close()

        if self.timeout_flag:
            #This line is reached when timeout_flag was set by _handle_alarm if it was called
            Logger.debug("Process was killed as it exceeded the time limit",
                         debug_level=3)
            ret_signal = self.TIMEOUT_SIGNAL
        elif self.sigttou_flag:
            Logger.debug(
                "Some memory corruption resulted in a SIGTTOU signal being thrown (usually stops process). We caught it.",
                debug_level=3)
            ret_signal = signal.SIGTTOU
        else:
            ret_signal = process.returncode
        return ret_signal
 def rename_same_name_files(self):
     filenames = []
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             i = 1
             new_filename = filename
             name, extension = os.path.splitext(filename)
             while new_filename in filenames:
                 formatstr = "%0"+str(self.config.max_digets)+"d"
                 new_number = formatstr % i
                 new_filename = name + "_" + new_number + extension
                 i += 1
             if not new_filename == filename:
                 Logger.info("Found filename that is already taken, renaming", filename, "to", new_filename)
                 shutil.move(os.path.join(path, filename), os.path.join(path, new_filename))
             filenames.append(new_filename)
예제 #26
0
 def survey_symbol(symbol):
     try:
         try:
             data = get_data(symbol)
             #                Logger.console_log("Stock for symbol {} exists.".format(symbol), 1)
             log_valid_stock_symbol(symbol)
         except KeyError:
             #                Logger.console_log("Stock for symbol {} produced KeyError on adjclose.".format(symbol), 4)
             pass
         except ValueError:
             Logger.console_log(
                 "ValueError when attempting to retrieve data for stock symbol {}. Retrying..."
                 .format(symbol), Logger.LogStatus.FAIL)
             survey_symbol(symbol)
     except AssertionError:
         #            Logger.console_log("Stock for symbol {} does not exist.".format(symbol), 2)
         pass
예제 #27
0
 def _handle_sigttou(self, signum, frame):
     #Had some issues that when memory corruptions occured in a subprocess
     #(no matter if shielded by multiprocess and subprocess module), 
     #that a SIGTTOU was sent to the entire Python main process.
     #According to https://en.wikipedia.org/wiki/SIGTTOU this
     #results in the process being stopped (and it looks like SIGSTP on the cmd):
     #[1]+  Stopped                 ./AflCrashAnalyzer.py
     #Of course we don't want that. Debugging was hard but then
     #realized after this program was stopped:
     #$ echo $?
     #150
     #So that's SIGTTOU on Linux at least.
     #This handler will prevent the process to stop.
     self.sigttou_flag = True
     try:
         self.current_process.kill()
     except OSError as ose:
         Logger.info("Kill failed. Sometimes the process exactly exits before we try to kill it... coward. Nothing to worry about.", ose)
예제 #28
0
 def rename_same_name_files(self):
     filenames = []
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             i = 1
             new_filename = filename
             name, extension = os.path.splitext(filename)
             while new_filename in filenames:
                 formatstr = "%0" + str(self.config.max_digets) + "d"
                 new_number = formatstr % i
                 new_filename = name + "_" + new_number + extension
                 i += 1
             if not new_filename == filename:
                 Logger.info(
                     "Found filename that is already taken, renaming",
                     filename, "to", new_filename)
                 shutil.move(os.path.join(path, filename),
                             os.path.join(path, new_filename))
             filenames.append(new_filename)
예제 #29
0
    def survey_ticker(thread: Thread, ticker: str) -> bool:
        """
        Uses Scraper.get_current_ticker_data to survey the given ticker string.  The result is returned.
        :param thread: A Scraper.WorkerThread responsible for carrying out the survey.
        :param ticker: Stock ticker.
        :return: Boolean denoting rather or not the ticker was successfully scraped during the survey.
        """
        try:
            data = Scraper.WebInterface.get_current_ticker_data(ticker=ticker)

            if None in data:
                return False
            else:
                Logger.console_log(
                    "Successful scraping of ticker " + ticker + " from " +
                    str(thread), Logger.LogStatus.SUCCESS)
                return True
        except Exception as e:
            return False
예제 #30
0
 def _handle_sigttou(self, signum, frame):
     #Had some issues that when memory corruptions occured in a subprocess
     #(no matter if shielded by multiprocess and subprocess module),
     #that a SIGTTOU was sent to the entire Python main process.
     #According to https://en.wikipedia.org/wiki/SIGTTOU this
     #results in the process being stopped (and it looks like SIGSTP on the cmd):
     #[1]+  Stopped                 ./AflCrashAnalyzer.py
     #Of course we don't want that. Debugging was hard but then
     #realized after this program was stopped:
     #$ echo $?
     #150
     #So that's SIGTTOU on Linux at least.
     #This handler will prevent the process to stop.
     self.sigttou_flag = True
     try:
         self.current_process.kill()
     except OSError as ose:
         Logger.info(
             "Kill failed. Sometimes the process exactly exits before we try to kill it... coward. Nothing to worry about.",
             ose)
예제 #31
0
 def do_sane_output_runs(self):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     if self.config.target_binary_plain is None and self.config.target_binary_asan is None:
         Logger.warning("You didn't specify any non-instrumented binary, running tests with instrumented binaries")
         self.instrumented_combined_stdout_stderr()
         self.instrumented_combined_stdout_stderr(gdb_run=True)
     else:
         Logger.info("Plain run")
         self.plain_combined_stdout_stderr()
         Logger.info("Plain gdb run")
         self.plain_combined_stdout_stderr(gdb_run=True)
         Logger.info("ASAN run")
         self.asan_combined_stdout_stderr()
예제 #32
0
 def run_command(self,
                 command,
                 timeout=None,
                 env={},
                 stdout=file("/dev/null"),
                 stderr=file("/dev/null")):
     #TODO: make stdout / stderr configurable
     if not timeout:
         timeout = self.config.run_timeout
     process = subprocess.Popen(command,
                                stdin=None,
                                shell=False,
                                stdout=stdout,
                                stderr=stderr)
     self.current_process = process
     signal.signal(signal.SIGALRM, self._handle_alarm)
     #We also had a problem that memory corruptions...
     signal.signal(signal.SIGTTOU, self._handle_sigttou)
     signal.alarm(timeout)
     self.timeout_flag = False
     self.sigttou_flag = False
     #TODO: get rid of magic number
     ret_signal = self.TIMEOUT_SIGNAL
     #blocking call:
     process.communicate()
     signal.alarm(0)
     #This line is reached when timeout_flag was set by _handle_alarm if it was called
     if self.timeout_flag:
         Logger.debug("Process was killed as it exceeded the time limit",
                      debug_level=3)
         ret_signal = self.TIMEOUT_SIGNAL
     elif self.sigttou_flag:
         Logger.debug(
             "Some memory corruption resulted in a SIGTTOU signal being thrown (usually stops process). We caught it.",
             debug_level=3)
         ret_signal = signal.SIGTTOU
     else:
         ret_signal = process.returncode
     return ret_signal
예제 #33
0
 def notify_restaurant(self):
     if self.restaurant.has_gps_printer:
         # Send GPS push
         pass
         return True
     elif self.restaurant.has_merchant_app:
         # Send GCM Push
         pass
         return True
     else:
         try:
             for contact in self.restaurant.numbers.all():
                 if contact.number_type == NUMBER_TYPE[1][0]:
                     s = SMS()
                     s.send(mobile_number=contact.number, sms_text=self.order_text())
                     OrderLog.objects.create(order=self, message="Restaurant notified via SMS, to"
                                                                 " be followed up by a call",
                                             owner_type='system', owner_id=self.agent.id)
                     return True
         except ObjectDoesNotExist:
             Logger.log_ticket(order=self, message=dict(STOCK_MESSAGES)['failed_to_notify'],
                               owner=self.agent, owner_type='system', ticket_type=TICKET_TYPE[1][0])
             return False
예제 #34
0
 def minimize_testcases(self):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             Logger.info("Minimizing", filename)
             filepath = os.path.join(path, filename)
             cmd = self.config.get_afl_tmin_command_line(filepath, os.path.join(self.output_dir, filename))
             Logger.debug("Executing:", cmd)
             Logger.busy()
             signal = executer.run_command(cmd, timeout=self.config.run_timeout_tmin, env=self.config.env)
             if signal == SignalFinder.TIMEOUT_SIGNAL:
                 Logger.error("Minimizing this file took too long, aborted")
 def _combined_stdout_stderr(self, binary, gdb_run, hint):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             if gdb_run:
                 command = self.config.get_gdb_command_line(binary, filepath)
                 new_filename = filename+"-"+os.path.basename(binary)+hint+self.config.gdb_prefix
             else:
                 command = self.config.get_command_line(binary, filepath)
                 new_filename = filename+"-"+os.path.basename(binary)+hint
             Logger.debug("Looking for stdout/stderr output:", command, debug_level=4)
             if self.output_dir:
                 output_file_name = get_new_output_file_name(self.output_dir, new_filename, self.config.run_extension, self.config.max_digets)
                 new_filepath = os.path.join(self.output_dir, output_file_name)
             else:
                 output_file_name = get_new_output_file_name(path, new_filename, self.config.run_extension, self.config.max_digets)
                 new_filepath = os.path.join(path, output_file_name)
             fp = file(new_filepath, "w")
             executer.get_output_for_run(command, fp, env=self.config.env)
             fp.close()
예제 #36
0
 def do_sane_output_runs(self):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     if self.config.target_binary_plain is None and self.config.target_binary_asan is None:
         Logger.warning(
             "You didn't specify any non-instrumented binary, running tests with instrumented binaries"
         )
         self.instrumented_combined_stdout_stderr()
         self.instrumented_combined_stdout_stderr(gdb_run=True)
     else:
         Logger.info("Plain run")
         self.plain_combined_stdout_stderr()
         Logger.info("Plain gdb run")
         self.plain_combined_stdout_stderr(gdb_run=True)
         Logger.info("ASAN run")
         self.asan_combined_stdout_stderr()
예제 #37
0
 def _combined_stdout_stderr(self, binary, gdb_run, hint):
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             filepath = os.path.join(path, filename)
             if gdb_run:
                 command = self.config.get_gdb_command_line(
                     binary, filepath)
                 new_filename = filename + "-" + os.path.basename(
                     binary) + hint + self.config.gdb_prefix
             else:
                 command = self.config.get_command_line(binary, filepath)
                 new_filename = filename + "-" + os.path.basename(
                     binary) + hint
             Logger.debug("Looking for stdout/stderr output:",
                          command,
                          debug_level=4)
             if self.output_dir:
                 output_file_name = get_new_output_file_name(
                     self.output_dir, new_filename,
                     self.config.run_extension, self.config.max_digets)
                 new_filepath = os.path.join(self.output_dir,
                                             output_file_name)
             else:
                 output_file_name = get_new_output_file_name(
                     path, new_filename, self.config.run_extension,
                     self.config.max_digets)
                 new_filepath = os.path.join(path, output_file_name)
             fp = file(new_filepath, "w")
             Logger.busy()
             executer.run_command(command,
                                  env=self.config.env,
                                  stdout=fp,
                                  stderr=fp)
             fp.close()
예제 #38
0
    def should_call(self):
        ticket_id, numbers = None, None

        if self.restaurant.has_to_be_called:
            numbers = [c.number for c in self.restaurant.numbers.all() if c.number_type == NUMBER_TYPE[2][0]]
            ticket_id = Logger.log_ticket(order=self, message="System ticket created to place order via "
                                                  "outbound call with restaurant", owner=self.agent,
                              owner_type="Agent", ticket_type='place_order')
        #else:
        #self.notify_restaurant()

        return {
                "order": self,
                "estimated_delivery_time": 20, #abs((self.expected_delivery_time - datetime.now()).minutes),
                "contact_numbers": numbers,
                "ticket_id": ticket_id
                }
def analyze_output_and_exploitability(config, signal_finder, uninteresting_signals, message_prefix=""):
    for signal, signal_folder in signal_finder.get_folder_paths_for_signals_if_exist(uninteresting_signals):
        skip = False
        for cat in ExploitableGdbPlugin.get_classifications():
            if os.path.exists(os.path.join(signal_folder, cat)):
                Logger.warning("Seems like there are already exploitability analysis results, skipping. If you want to rerun: rm -r %s" % os.path.join(signal_folder, cat))
                skip = True
        if not skip:
            Logger.info(message_prefix, "Discover stdout, stderr, gdb and ASAN output (signal %s)" % signal)
            wildcard_for_run_output_files = os.path.join(signal_folder, "*" + config.run_extension)
            if glob.glob(wildcard_for_run_output_files):
                Logger.warning("Seems like there are already results from running the binaries, skipping. If you want to rerun: rm", wildcard_for_run_output_files)
            else:
                of = OutputFinder(config, signal_folder)
                of.do_sane_output_runs()
            
            Logger.info(message_prefix, "Analyzing exploitability (signal %s)" % signal)
            egp = ExploitableGdbPlugin(config, signal_folder)
            egp.divide_by_exploitability()
예제 #40
0
 def minimize_testcases(self):
     if self.output_dir is not None and not os.path.exists(self.output_dir):
         os.mkdir(self.output_dir)
     executer = Executer(self.config)
     for path, _, files in os.walk(self.search_dir):
         for filename in files:
             if filename.endswith(self.config.run_extension):
                 continue
             Logger.info("Minimizing", filename)
             filepath = os.path.join(path, filename)
             cmd = self.config.get_afl_tmin_command_line(
                 filepath, os.path.join(self.output_dir, filename))
             Logger.debug("Executing:", cmd)
             Logger.busy()
             signal = executer.run_command(
                 cmd,
                 timeout=self.config.run_timeout_tmin,
                 env=self.config.env)
             if signal == SignalFinder.TIMEOUT_SIGNAL:
                 Logger.error("Minimizing this file took too long, aborted")
예제 #41
0
    def menu(self) -> None:
        Logger.console_log(message="==== MENU ====",
                           status=Logger.LogStatus.COMMUNICATION)
        Logger.console_log(message="1) send a message",
                           status=Logger.LogStatus.COMMUNICATION)
        Logger.console_log(message="0) end connection",
                           status=Logger.LogStatus.COMMUNICATION)

        response = int(input("What would you like to do: "))

        if response == 1:
            self.prompt_for_message()
        elif response == 0:
            self.end_connection()
        else:
            Logger.console_log(
                message="Invalid menu selection.  Please try again...",
                status=Logger.LogStatus.FAIL)

        return response
예제 #42
0
 def __init__(self, config, search_dir=None, output_dir=None):
     self.config = config
     self.search_dir = search_dir
     if self.search_dir is None:
         self.search_dir = self.config.original_crashes_directory
     self.output_dir = output_dir
     if self.output_dir is None:
         self.output_dir = os.path.join(self.config.output_dir, "per-signal")
     if config.target_binary_plain:
         Logger.info("Using", self.config.target_binary_plain, "for signal run")
         self.binary_to_use = self.config.target_binary_plain
     elif config.target_binary_asan:
         Logger.info("Using", self.config.target_binary_asan, "for signal run")
         self.binary_to_use = self.config.target_binary_asan
     else:
         Logger.info("Using", self.config.target_binary_instrumented, "for signal run")
         self.binary_to_use = self.config.target_binary_instrumented
 def __init__(self, config, search_dir=None, output_dir=None):
     self.config = config
     self.search_dir = search_dir
     if self.search_dir is None:
         self.search_dir = self.config.original_crashes_directory
     self.output_dir = output_dir
     if self.output_dir is None:
         self.output_dir = self.config.default_signal_directory
     if config.target_binary_plain:
         Logger.debug("Using", self.config.target_binary_plain, "for signal run")
         self.binary_to_use = self.config.target_binary_plain
     elif config.target_binary_asan:
         Logger.debug("Using", self.config.target_binary_asan, "for signal run")
         self.binary_to_use = self.config.target_binary_asan
     else:
         Logger.debug("Using", self.config.target_binary_instrumented, "for signal run")
         self.binary_to_use = self.config.target_binary_instrumented
예제 #44
0
    def connect(self, ip_address: str, port: int) -> None:
        port_description = (ip_address, port)

        Logger.console_log(
            message="Attempting to bind to port with description: " +
            str(port_description),
            status=Logger.LogStatus.EMPHASIS)

        # Bind to the port description
        try:
            self.sock.connect(port_description)
            Logger.console_log(
                message=
                "Successfully created connection with server @ address {} through port {}"
                .format(ip_address, port),
                status=Logger.LogStatus.SUCCESS)
        except Exception as err:
            Logger.console_log(
                message="Unable to send message due to error: " + str(err),
                status=Logger.LogStatus.FAIL)
예제 #45
0
 def __init__(self, config, search_dir=None, output_dir=None):
     self.config = config
     self.search_dir = search_dir
     if self.search_dir is None:
         self.search_dir = self.config.original_crashes_directory
     self.output_dir = output_dir
     if self.output_dir is None:
         self.output_dir = self.config.default_signal_directory
     if config.target_binary_plain:
         Logger.debug("Using", self.config.target_binary_plain,
                      "for signal run")
         self.binary_to_use = self.config.target_binary_plain
     elif config.target_binary_asan:
         Logger.debug("Using", self.config.target_binary_asan,
                      "for signal run")
         self.binary_to_use = self.config.target_binary_asan
     else:
         Logger.debug("Using", self.config.target_binary_instrumented,
                      "for signal run")
         self.binary_to_use = self.config.target_binary_instrumented
    def connect(self, ip_address: str, port: int) -> None:
        port_description = (ip_address, port)

        try:
            # Bind to the port description
            self.sock.bind(port_description)

            # Sets up and start TCP Listener
            self.sock.listen(2)

            # Wait for TCP connection to arrive [Returns client]
            Logger.console_log(message="Waiting for client...",
                               status=Logger.LogStatus.EMPHASIS)
            self.client, client_address = self.sock.accept()
            Logger.console_log(
                message="Creating connection with client from " +
                str(client_address),
                status=Logger.LogStatus.SUCCESS)
        except Exception as err:
            Logger.console_log(
                message="Unable to send message due to error: " + str(err),
                status=Logger.LogStatus.FAIL)
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
"""
    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "gdb"
    gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    config_gm = CrashAnalysisConfig(where_this_python_script_lives, 
                            target_binary_instrumented=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-afl/utilities/gm", 
                            args_before="identify", 
                            args_after="", 
                            target_binary_plain=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-plain/utilities/gm", 
                            target_binary_asan=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-asan/utilities/gm",
                            env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
                            crash_dir=where_this_python_script_lives+"/test-cases/gm/crashes",
                            gdb_script=gdb_script_32bit,
                            gdb_binary=gdb_command
                            )
    
#    config_ffmpeg = CrashAnalysisConfig(where_this_python_script_lives, 
#                        target_binary_instrumented=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-afl/ffmpeg", 
#                        args_before="-i", 
#                        args_after="-loglevel quiet", 
#                        target_binary_plain=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-plain/ffmpeg", 
##                        target_binary_asan=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-asan/ffmpeg",
#                        env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
#                        crash_dir=where_this_python_script_lives+"/test-cases/ffmpeg/crashes",
#                        gdb_script=gdb_script_32bit,
#                        gdb_binary=gdb_command
#                        )

    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(config_gm)
    fdf.remove_readmes(config_gm.original_crashes_directory)
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively(config_gm.original_crashes_directory)
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files(config_gm.original_crashes_directory)
    
    #
    Logger.info("Finding signals for all crash files")
    #
    sf = SignalFinder(config_gm)
    if os.path.exists(sf.output_dir):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. Remove output directory or remove this folder if you want to rerun:", sf.output_dir)
    else:
        Logger.info("Dividing files to output folder according to their signal")
        os.mkdir(sf.output_dir)
        sf.divide_by_signal(0)
        
    
    #
    Logger.info("Running binaries to discover stdout/stderr, gdb and ASAN output for crash files that result in interesting signals")
    #
    #signals, negative on OSX, 129 and above for Linux. No harm if we go on with all of them.
    signals = (-4, -6, -11, 132, 134, 139)
    get_output_for_signals(config_gm, sf, signals)

    
    #
    Logger.info("Minimizing input files that result in interesting signals (and removing duplicates from the results)")
    #
    im = InputMinimizer(config_gm)
    if os.path.exists(im.output_dir):
        Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. Remove output directory or remove this folder if you want to rerun:", im.output_dir)
    else:
        os.mkdir(im.output_dir)
        for signal in signals:
            Logger.info("Processing minimized folder for crash-minimizer for signal %i" % signal)
            signal_folder = sf.get_folder_path_for_signal(signal)
            im = InputMinimizer(config_gm, signal_folder)
            if os.path.exists(signal_folder):
                Logger.info("Minimizing inputs resulting in signal %i" % signal)
                im.minimize_testcases()
            else:
                Logger.warning("Seems that none of the crashes results in a %i signal" % signal)
        Logger.info("Removing duplicates from minimized tests")
        fdf.delete_duplicates_recursively(im.output_dir)
        
    #
    Logger.info("Finding signals for minimized crash files")
    #
    sf_minimized_crashes = SignalFinder(config_gm, im.output_dir, os.path.join(config_gm.output_dir, "minimized-inputs-per-signal"))
    if os.path.exists(sf_minimized_crashes.output_dir):
        Logger.warning("Seems like crashes were already categorized by signal, skipping.")
        Logger.warning("Remove output directory or remove this folder if you want to rerun:", sf_minimized_crashes.output_dir)
    else:
        os.mkdir(sf_minimized_crashes.output_dir)
        Logger.info("Dividing files to output folder according to their signal")
        sf_minimized_crashes.divide_by_signal(0)
        
    
    #
    Logger.info("Running binaries to discover stdout/stderr, gdb and ASAN output for minimized input files that result in interesting signals")
    #
    get_output_for_signals(config_gm, sf_minimized_crashes, signals)
    def sanity_check(self):
        ##
        # Sanity checks and initial setup
        ##
        if not os.access(self.target_binary_instrumented, os.R_OK):
            Logger.fatal("AFL target binary not accessible:", self.target_binary_instrumented+". Did you configure the CrashAnalysisConfig class?")
        if not self.target_binary_plain is None and not os.access(self.target_binary_plain, os.R_OK):
            Logger.fatal("Target binary not accessible:", self.target_binary_plain+". Did you configure the CrashAnalysisConfig class?")
        if not self.target_binary_asan is None and not os.access(self.target_binary_asan, os.R_OK):
            Logger.fatal("ASAN target binary not accessible:", self.target_binary_asan+". Did you configure the CrashAnalysisConfig class?")
        if not os.access(self.main_dir, os.F_OK):
            Logger.fatal("Your main_dir doesn't exist:", self.main_dir)
        if not os.access(self.original_crashes_directory, os.F_OK):
            Logger.fatal("Your original_crashes_directory doesn't exist:", self.original_crashes_directory)

        if os.path.exists(self.output_dir):
            Logger.warning("Your output directory already exists, did you want to move it before running?", self.output_dir)
        else:
            Logger.info("Output folder will be:", self.output_dir)
            os.mkdir(self.output_dir)
        if not os.path.exists(self.tmp_dir):
            os.mkdir(self.tmp_dir)
        self.prepare_gdb_script()
예제 #49
0
 def plain_combined_stdout_stderr(self, gdb_run=False):
     if not self.config.target_binary_plain:
         Logger.warning("You didn't configure a plain binary (recommended: with symbols), therefore skipping run with plain binary.")
     else:
         self._combined_stdout_stderr(self.config.target_binary_plain, gdb_run, self.config.output_prefix_plain)
예제 #50
0
 def asan_combined_stdout_stderr(self, gdb_run=False):
     if not self.config.target_binary_asan:
         Logger.warning("You didn't configure an ASAN enabled binary (recommended: with symbols), therefore skipping run with ASAN binary.")
     else:
         self._combined_stdout_stderr(self.config.target_binary_asan, gdb_run, self.config.output_prefix_asan)
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
"""
    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "gdb"
    gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    config_gm = CrashAnalysisConfig(where_this_python_script_lives, 
                            target_binary_instrumented=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-afl/utilities/gm", 
                            args_before="identify", 
                            args_after="", 
                            target_binary_plain=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-plain/utilities/gm", 
                            target_binary_asan=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-asan/utilities/gm",
                            env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
                            crash_dir=where_this_python_script_lives+"/test-cases/gm/crashes",
                            gdb_script=gdb_script_32bit,
                            gdb_binary=gdb_command
                            )
    
#    config_ffmpeg = CrashAnalysisConfig(where_this_python_script_lives, 
#                        target_binary_instrumented=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-afl/ffmpeg", 
#                        args_before="-i", 
#                        args_after="-loglevel quiet", 
#                        target_binary_plain=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-plain/ffmpeg", 
##                        target_binary_asan=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-asan/ffmpeg",
#                        env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1"},
#                        crash_dir=where_this_python_script_lives+"/test-cases/ffmpeg/crashes",
#                        gdb_script=gdb_script_32bit,
#                        gdb_binary=gdb_command
#                        )

    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(config_gm, config_gm.original_crashes_directory)
    fdf.remove_readmes()
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively()
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files()
    
    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(config_gm)
    if os.path.exists(config_gm.default_signal_directory):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", config_gm.default_signal_directory)
    else:
        Logger.debug("Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()
    
    #Interestings signals: negative on OSX, 129 and above for Linux
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0,129)
    
    analyze_output_and_exploitability(config_gm, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
        
    Logger.info("Interesting signals / Minimizing input (afl-tmin)")
    if os.path.exists(config_gm.default_minimized_crashes_directory):
        Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", config_gm.default_minimized_crashes_directory)
    else:
        for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals):
            Logger.debug("Minimizing inputs resulting in signal %i" % signal)
            im = InputMinimizer(config_gm, signal_folder)
            im.minimize_testcases()
        
        Logger.info("Interesting signals / Minimized inputs / Deduplication")
        fdf_minimized = FileDuplicateFinder(config_gm, config_gm.default_minimized_crashes_directory)
        fdf_minimized.delete_duplicates_recursively()
        
    #
    Logger.info("Interesting signals / Minimized inputs / Finding interesting signals")
    #
    sf_minimized_crashes = SignalFinder(config_gm, config_gm.default_minimized_crashes_directory, os.path.join(config_gm.output_dir, "minimized-per-signal"))
    if os.path.exists(sf_minimized_crashes.output_dir):
        Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r", sf_minimized_crashes.output_dir)
    else:
        os.mkdir(sf_minimized_crashes.output_dir)
        Logger.info("Dividing files to output folder according to their signal")
        sf_minimized_crashes.divide_by_signal(0)
    
    
    analyze_output_and_exploitability(config_gm, sf_minimized_crashes, uninteresting_signals, message_prefix="Interesting signals / Minimized inputs /")
    
    
#     # If you are in the mood to waste a little CPU time, run this
#     Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation")
#     #
#     fle = FeelingLuckyExploiter(config_gm, sf_minimized_crashes.output_dir)
#     #os.mkdir(fle.output_dir)
#     fle.run_forest_run()
    
#TODO: develop
#- peruvian were rabbit?
#- exploitable script, something along: less `grep -l 'Exploitability Classification: EXPLOITABLE' output/per-signal/*/*gdb*`

    cleanup(config_gm)
def get_output_for_signals(config, signal_finder, signals):
    wildcard_for_run_output_files = signal_finder.output_dir + "/*/*"+config.run_extension
    if glob.glob(wildcard_for_run_output_files):
        Logger.warning("Seems like there are already results from running the binaries, skipping. Remove output directory or run this command if you want to rerun:")
        Logger.warning("rm ", wildcard_for_run_output_files)
    else:
        Logger.info("We analyze only a couple of signals like SIGABRT, SIGSEGV, but do not care about the rest. Going for", signals)
        for signal in signals:
            Logger.info("Processing folder for output generation for signal %i" % signal)
            signal_folder = signal_finder.get_folder_path_for_signal(signal)
            if os.path.exists(signal_folder):
                Logger.info("Getting stdout and stderr of runs which result in %i. Additionally running with gdb script." % signal)
                of = OutputFinder(config, signal_folder)
                if config.target_binary_plain is None and config.target_binary_asan is None:
                    Logger.warning("You didn't specify any non-instrumented binary, running tests with instrumented binaries")
                    of.instrumented_combined_stdout_stderr()
                    of.instrumented_combined_stdout_stderr(gdb_run=True)
                else:
                    Logger.info("Plain run for", signal_folder)
                    of.plain_combined_stdout_stderr()
                    Logger.info("Plain gdb run for", signal_folder)
                    of.plain_combined_stdout_stderr(gdb_run=True)
                    Logger.info("ASAN run for", signal_folder)
                    of.asan_combined_stdout_stderr()
                    #Logger.info("ASAN gdb run for", signal_folder)
                    #of.asan_combined_stdout_stderr(gdb_run=True)
            else:
                Logger.warning("Seems that none of the crashes results in a %i signal" % signal)
예제 #53
0
    def screenshotByInterface(self):
        logger = Logger(self.driver)
        logger.takeScreenshot()

        time.sleep(2)
        self.driver.quit()
def main():
    #Read the README before you start.
    
    Logger.info("Setting up configuration")

    gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
printf "[+] list\n"
list
"""
    gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
printf "[+] list\n"
list
"""

    #TODO: Make sure gdb script doesn't abort on error
    #ignoring errors in gdb scripts: http://stackoverflow.com/questions/17923865/gdb-stops-in-a-command-file-if-there-is-an-error-how-to-continue-despite-the-er
    gdb_script_32bit_noerror = r"""python
def my_ignore_errors(arg):
  try:
    gdb.execute("print \"" + "Executing command: " + arg + "\"")
    gdb.execute (arg)
  except:
    gdb.execute("print \"" + "ERROR: " + arg + "\"")

my_ignore_errors("p p")
my_ignore_errors("p p->v1")
gdb.execute("quit")
    """

    where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
    
    gdb_command = "/usr/bin/gdb"
    #gdb_command_osx = "/opt/local/bin/gdb-apple"
    
    #TODO: For some reason the ASAN environment variables are not correctly set when given to the subprocess module... so let's just set it in parent process already:
    os.environ['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer-3.4"
    os.environ['ASAN_OPTIONS'] = "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"
    env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"}
    
    ###
    #This import decides which testcase/binary we want to run!
    ###
    from testcases.gm.Config import create_config
    #from testcases.ffmpeg.Config import create_config
    #see CrashAnalysisConfig for more options that get passed on by create_config
    chosen_config = create_config(where_this_python_script_lives, env=env, gdb_script=gdb_script_32bit, gdb_binary=gdb_command)
    chosen_config.sanity_check()
    
    #
    Logger.info("Input crashes directory operations")
    #
    
    Logger.info("Removing README.txt files")
    fdf = FileDuplicateFinder(chosen_config, chosen_config.original_crashes_directory)
    fdf.remove_readmes()
    
    Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
    fdf.delete_duplicates_recursively()
    
    Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
    fdf.rename_same_name_files()
    #OR:
    #Logger.info("Renaming all files to numeric values, as some programs prefer no special chars in filenames and might require a specific file extension")
    #fdf.rename_all_files(".png")
    
    #
    Logger.info("Finding interesting signals (all crashes)")
    #
    sf_all_crashes = SignalFinder(chosen_config)
    if os.path.exists(chosen_config.default_signal_directory):
        Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", chosen_config.default_signal_directory)
    else:
        Logger.debug("Dividing files to output folder according to their signal")
        sf_all_crashes.divide_by_signal()
    
    #Interestings signals: negative on OSX, 129 and above sometimes for Linux on the shell (depending on used mechanism)
    #Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
    uninteresting_signals = range(0, 129)
    
    analyze_output_and_exploitability(chosen_config, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
        
    Logger.info("Interesting signals / Minimizing input (afl-tmin)")
    if os.path.exists(chosen_config.default_minimized_crashes_directory):
        Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", chosen_config.default_minimized_crashes_directory)
    else:
        for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals):
            Logger.debug("Minimizing inputs resulting in signal %i" % signal)
            im = InputMinimizer(chosen_config, signal_folder)
            im.minimize_testcases()
        
        Logger.info("Interesting signals / Minimized inputs / Deduplication")
        fdf_minimized = FileDuplicateFinder(chosen_config, chosen_config.default_minimized_crashes_directory)
        fdf_minimized.delete_duplicates_recursively()
        
    #
    Logger.info("Interesting signals / Minimized inputs / Finding interesting signals")
    #
    sf_minimized_crashes = SignalFinder(chosen_config, chosen_config.default_minimized_crashes_directory, os.path.join(chosen_config.output_dir, "minimized-per-signal"))
    if os.path.exists(sf_minimized_crashes.output_dir):
        Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r", sf_minimized_crashes.output_dir)
    else:
        os.mkdir(sf_minimized_crashes.output_dir)
        Logger.info("Dividing files to output folder according to their signal")
        sf_minimized_crashes.divide_by_signal(0)
    
    
    analyze_output_and_exploitability(chosen_config, sf_minimized_crashes, uninteresting_signals, message_prefix="Interesting signals / Minimized inputs /")
    
#TODO:
#- Make (some) modules work as standalone applications with command line parsing
#- The FeelingLuckyExplotier thing. Need to get a small test sample where I know it should work.
#     # If you are in the mood to waste a little CPU time, run this
#     Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation")
#     #
#     fle = FeelingLuckyExploiter(chosen_config, sf_minimized_crashes.output_dir)
#     #os.mkdir(fle.output_dir)
#     fle.run_forest_run()


    cleanup(chosen_config)