def send_next_task(self, conn): # Inputs placed to imports/ folder have priority. # This can also be used to inject additional seeds at runtime. imports = glob.glob(self.config.argument_values['work_dir'] + "/imports/*") if imports: path = imports.pop() print("Importing payload from %s" % path) seed = read_binary_file(path) os.remove(path) return self.comm.send_import(conn, { "type": "import", "payload": seed }) # Process items from queue.. node = self.queue.get_next() if node: return self.comm.send_node(conn, { "type": "node", "nid": node.get_id() }) # No work in queue. Tell slave to wait a little or attempt blind fuzzing. # If we see a lot of busy events, check the bitmap and warn on coverage issues. self.comm.send_busy(conn) self.busy_events += 1 if self.busy_events >= 10: self.busy_events = 0 main_bitmap = self.bitmap_storage.get_bitmap_for_node_type( "regular").c_bitmap if mmh3.hash(main_bitmap) == self.empty_hash: print_note( "Coverage bitmap is empty?! Check -ip0 or try better seeds." )
def send_next_task(self, conn): # for the switching queue. if self.task_paused: return # Process items from queue.. node = self.queues.get_next_node() if node: self.task_count += 1 return self.comm.send_node(conn, { "type": "node", "nid": node.get_id() }) # No work in queue. Tell slave to wait a little or attempt blind fuzzing. # If we see a lot of busy events, check the bitmap and warn on coverage issues. self.comm.send_busy(conn) self.busy_events += 1 if self.busy_events >= 10: self.busy_events = 0 main_bitmap = self.bitmap_storage.get_bitmap_for_node_type( "regular").c_bitmap if mmh3.hash(main_bitmap) == self.empty_hash: print_note( "Coverage bitmap is empty?! Check -ip0 or try better seeds." )
def parse_trace_file(self, trace_file, trace_id): if not os.path.isfile(trace_file): print_note("Could not find trace file %s, skipping.." % trace_file) return None gaps = set() bbs = set() edges = set() with lz4.LZ4FrameFile(trace_file, 'rb') as f: #for line in f.readlines(): # info = (json.loads(line.decode())) # if 'trace_enable' in info: # gaps.add(info['trace_enable']) # if 'edge' in info: # edges.add("%s_%s" % (info['edge'][0], info['edge'][1])) # bbs.add(info['edge'][0]) # bbs.add(info['edge'][1]) # slightly faster than above line-wise json parsing for m in re.finditer("\{.(\w+).: \[?(\d+),?(\d+)?\]? \}", f.read().decode()): if m.group(1) == "trace_enable": gaps.add(m.group(2)) if m.group(1) == "edge": edges.add("%s_%s" % (m.group(2), m.group(3))) bbs.add(m.group(2)) bbs.add(m.group(3)) return {'bbs': bbs, 'edges': edges, 'gaps': gaps}
def start(config): if not post_self_check(config): return -1 work_dir = config.argument_values["work_dir"] if config.argument_values['v'] or config.argument_values['debug']: enable_logging(work_dir) if not prepare_working_dir(config): print_fail("Refuse to operate on existing work directory. Use --purge to override.") return 1 # Load an interface json file. interface_manager.load(config.argument_values['interface']) # Start IRPT! qemu_sweep() proc = Process(config) try: proc.loop() except KeyboardInterrupt: print_note("Received Ctrl-C") finally: proc.database.save() proc.shutdown() os._exit(0)
def generate_traces(config, input_list): trace_dir = config.argument_values["input"] + "/traces/" # TODO What is the effect of not defining a trace region? will it trace? if not config.argument_values['ip0']: print_warning("No trace region configured!") return None start = time.time() if not os.path.exists(trace_dir): os.makedirs(trace_dir) input_files = list() for input_path, _, _ in input_list: trace_file = trace_dir + os.path.basename(input_path) + ".lz4" if os.path.exists(trace_file): print("Skip input with existing trace: %s" % input_path) else: input_files.append(input_path) nproc = os.cpu_count() chunksize = ceil(len(input_files) / nproc) offset = 0 workers = list() try: for pid in range(nproc): sublist = input_files[offset:offset + chunksize] offset += chunksize if len(sublist) > 0: worker = mp.Process(target=generate_traces_worker, args=(config, pid, sublist)) worker.start() workers.append(worker) for worker in workers: while worker.is_alive(): time.sleep(2) if worker.exitcode != 0: return None except KeyboardInterrupt: print_note("Received Ctrl-C, killing slaves...") return None except Exception: return None finally: graceful_exit(workers) end = time.time() print("\n\nDone. Time taken: %.2fs\n" % (end - start)) return trace_dir
def get_cov_by_trace(self, trace_file, trace_id): # note the return new BB count depends on the order in which traces are parsed findings = self.parse_trace_file(trace_file, trace_id) if not findings: return 0, 0 if len(findings['gaps']) > 1: print_note("Got multiple gaps in trace %s" % trace_file) num_new_bbs = len(findings['bbs'] - self.known_bbs) num_new_edges = len(findings['edges'] - self.known_edges) self.known_bbs.update(findings['bbs']) self.known_edges.update(findings['edges']) return num_new_bbs, num_new_edges
def start(config): if not post_self_check(config): return -1 work_dir = config.argument_values["work_dir"] seed_dir = config.argument_values["seed_dir"] num_slaves = config.argument_values['p'] if config.argument_values['v']: enable_logging(work_dir) if not prepare_working_dir(config): print_fail( "Refuse to operate on existing work directory. Use --purge to override." ) return 1 if seed_dir and not copy_seed_files(work_dir, seed_dir): print_fail("Error when importing seeds. Exit.") return 1 # Without -ip0, Qemu will not active PT tracing and we turn into a blind fuzzer if not config.argument_values['ip0']: print_warning("No trace region configured! PT feedback disabled!") master = MasterProcess(config) slaves = [] for i in range(num_slaves): slaves.append( multiprocessing.Process(name="Slave " + str(i), target=slave_loader, args=(i, ))) slaves[i].start() try: master.loop() except KeyboardInterrupt: print_note("Received Ctrl-C, killing slaves...") except: print_fail("Exception in Master. Exiting..") print(traceback.format_exc()) finally: graceful_exit(slaves) time.sleep(0.2) qemu_sweep() sys.exit(0)
def get_inputs_by_time(data_dir): # check if data_dir is kAFL or AFL type, then assemble sorted list of inputs/input IDs over time if (os.path.exists(data_dir + "/fuzzer_stats") and os.path.exists(data_dir + "/fuzz_bitmap") and os.path.exists(data_dir + "/plot_data") and os.path.isdir(data_dir + "/queue")): input_data = afl_workdir_iterator(data_dir) elif (os.path.isdir(data_dir + "/corpus/regular") and os.path.isdir(data_dir + "/metadata")): input_data = kafl_workdir_iterator(data_dir) else: print_note("Unrecognized target directory type «%s». Exit." % data_dir) sys.exit() input_data.sort(key=itemgetter(2)) return input_data
def parse_trace_file(trace_file): if not os.path.isfile(trace_file): print_note("Could not find trace file %s, skipping.." % trace_file) return None gaps = set() bbs = set() edges = set() with lz4.LZ4FrameFile(trace_file, 'rb') as f: for m in re.finditer("\{.(\w+).: \[?(\d+),?(\d+)?\]? \}", f.read().decode()): if m.group(1) == "trace_enable": gaps.add(m.group(2)) if m.group(1) == "edge": edges.add("%s,%s" % (m.group(2), m.group(3))) bbs.add(m.group(2)) bbs.add(m.group(3)) return {'bbs': bbs, 'edges': edges, 'gaps': gaps}
def start(config): if not post_self_check(config): return -1 work_dir = config.argument_values["work_dir"] seed_dir = config.argument_values["seed_dir"] num_slaves = config.argument_values['p'] if config.argument_values['v'] or config.argument_values['debug']: enable_logging(work_dir) if not prepare_working_dir(config): print_fail("Refuse to operate on existing work directory. Use --purge to override.") return 1 if seed_dir and not copy_seed_files(work_dir, seed_dir): print_fail("Error when importing seeds. Exit.") return 1 if config.argument_values['wdm']: interface_manager.load(config.argument_values['wdm']) master = MasterProcess(config) slaves = [] for i in range(num_slaves): slaves.append(multiprocessing.Process(name="Slave " + str(i), target=slave_loader, args=(i,))) slaves[i].start() try: master.loop() except KeyboardInterrupt: print_note("Received Ctrl-C, killing slaves...") except SystemExit as e: print_fail("Master exit: " + str(e)) finally: graceful_exit(slaves) time.sleep(0.2) qemu_sweep() sys.exit(0)
def generate_traces(config, input_list): work_dir = config.argument_values['work_dir'] data_dir = config.argument_values["input"] trace_dir = data_dir + "/traces/" if data_dir == work_dir: print_note("Workdir must be separate from input/data dir. Aborting.") return None prepare_working_dir(config) if os.path.exists(trace_dir): print_note( "Input data_dir already has a traces/ subdir. Skipping trace generation..\n" ) return trace_dir # real deal. delete trace dir if it exists and (re-)create traces shutil.rmtree(trace_dir, ignore_errors=True) os.makedirs(trace_dir) # TODO What is the effect of not defining a trace region? will it trace? if not config.argument_values['ip0']: print_warning("No trace region configured!") if os.path.exists(work_dir + "redqueen_workdir_1337"): print_fail( "Leftover files from 1337 instance. This should not happen.") return None q = qemu(1337, config, debug_mode=False) if not q.start(): print_fail("Could not start Qemu. Exit.") return None start = time.time() try: for input_path, nid, timestamp in input_list: print("Processing: %s" % input_path) q.set_payload(read_binary_file(input_path)) exec_res = q.execute_in_trace_mode(timeout_detection=False) if not exec_res: print_note("Failed to execute input %s. Continuing anyway..." % input_path) assert (q.restart()) continue # TODO: reboot by default, persistent by option if exec_res.is_crash(): q.reload() with open(work_dir + "/redqueen_workdir_1337/pt_trace_results.txt", 'rb') as f_in: with lz4.LZ4FrameFile( trace_dir + os.path.basename(input_path) + ".lz4", 'wb', compression_level=lz4.COMPRESSIONLEVEL_MINHC) as f_out: shutil.copyfileobj(f_in, f_out) except: raise finally: q.async_exit() end = time.time() print("Time taken: %.2fs" % (end - start)) return trace_dir