def start(config): if not post_self_check(config): return -1 work_dir = config.argument_values["work_dir"] if config.argument_values['v'] or config.argument_values['debug']: enable_logging(work_dir) if not prepare_working_dir(config): print_fail("Refuse to operate on existing work directory. Use --purge to override.") return 1 # Load an interface json file. interface_manager.load(config.argument_values['interface']) # Start IRPT! qemu_sweep() proc = Process(config) try: proc.loop() except KeyboardInterrupt: print_note("Received Ctrl-C") finally: proc.database.save() proc.shutdown() os._exit(0)
def __debug_recv_expect(self, cmd): res = '' while True: res = self.__debug_recv() if res in cmd: break # TODO: the I/O handling here really sucks. # Below we are returning OK to set_init_state() in order to silence handshake error message during kafl_info.py. # We need to factor out the debug stuff and properly support required vs optional/intermediate control messages... elif res == qemu_protocol.INFO: break elif res is None: # Timeout is detected separately in debug_recv(), so we should never get here.. assert False else: # Reaching this part typically means there is a bug in the agent or target setup which # messes up the expected interaction. Throw an error and exit. log_qemu( "Fatal error in debug_recv(): Got " + str(res) + ", Expected: " + str(cmd) + ")", self.qemu_id) print_fail( "Slave %d: Error in debug_recv(): Got %s, Expected: %s" % (self.qemu_id, str(res), str(cmd))) assert False if res == qemu_protocol.PT_TRASHED: log_qemu("PT_TRASHED", self.qemu_id) return False return True
def set_payload(self, payload): if self.exiting: sys.exit(0) # TODO: enforce single global size limit through frontend/mutations/backend # PAYLOAD_SIZE-sizeof(uint32)-sizeof(uint8) = 131067! payload_limit = self.payload_size - 16 if len(payload) > payload_limit: payload = payload[:payload_limit] try: self.fs_shm.seek(0) input_len = to_string_32(len(payload)) self.fs_shm.write_byte(input_len[3]) self.fs_shm.write_byte(input_len[2]) self.fs_shm.write_byte(input_len[1]) self.fs_shm.write_byte(input_len[0]) self.fs_shm.write(payload) self.fs_shm.flush() except: if self.exiting: sys.exit(0) # Qemu crashed. Could be due to prior payload but more likely harness/config is broken.. print_fail("Failed to set new payload - Qemu crash?") log_qemu("Failed to set new payload - Qemu crash?", self.qemu_id) raise
def start(self): if self.exiting: return False self.persistent_runs = 0 self.handshake_stage_1 = True self.handshake_stage_2 = True if self.qemu_id == "0" or self.qemu_id == "1337": ## 1337 is debug instance! log_qemu("Launching virtual machine...CMD:\n" + ' '.join(self.cmd), self.qemu_id) else: log_qemu("Launching virtual machine...", self.qemu_id) # Launch Qemu. stderr to stdout, stdout is logged on VM exit # os.setpgrp() prevents signals from being propagated to Qemu, instead allowing an # organized shutdown via async_exit() self.process = subprocess.Popen(self.cmd, preexec_fn=os.setpgrp, stdin=subprocess.PIPE, #stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: self.__qemu_connect() self.__qemu_handshake() except (OSError, BrokenPipeError) as e: if not self.exiting: print_fail("Failed to launch Qemu, please see logs. Error: " + str(e)) log_qemu("Fatal error: Failed to launch Qemu: " + str(e), self.qemu_id) self.shutdown() return False return True
def start(): config = FuzzerConfiguration() if not post_self_check(config): return -1 if config.argument_values['v']: enable_logging(config.argument_values["work_dir"]) num_processes = config.argument_values['p'] if not config.argument_values['Purge']: if ask_for_permission("PURGE", " to wipe old workspace:"): print_warning("Wiping old workspace...") time.sleep(2) else: print_fail("Aborting...") return 0 prepare_working_dir(config.argument_values['work_dir']) if not copy_seed_files(config.argument_values['work_dir'], config.argument_values['seed_dir']): print_fail("Seed directory is empty...") return 1 master = MasterProcess(config) slaves = [] for i in range(num_processes): print "fuzzing process {}".format(i) slaves.append( multiprocessing.Process(name='SLAVE' + str(i), target=slave_loader, args=(i, ))) slaves[i].start() try: master.loop() except KeyboardInterrupt: pass signal.signal(signal.SIGINT, signal.SIG_IGN) counter = 0 # print_pre_exit_msg(counter, clrscr=True) for slave in slaves: while True: counter += 1 # print_pre_exit_msg(counter) slave.join(timeout=0.25) if not slave.is_alive(): break # print_exit_msg() return 0
def execute(cmd): log_radamsa("Radamsa cmd: " + str(cmd)) try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None) except: # Radamsa stage is experimental and does not seem very effective. # Need binary in current/same path as this python file. print_fail("Failed to launch radamsa. Do we have the binary in place?") raise return proc
def start(self): if self.exiting: return False if self.qemu_id == "0" or self.qemu_id == "1337": ## 1337 is debug instance! log_qemu("Launching virtual machine...CMD:\n" + ' '.join(self.cmd), self.qemu_id) else: log_qemu("Launching virtual machine...CMD:\n" + ' '.join(self.cmd), self.qemu_id) #log_qemu("Launching virtual machine...", self.qemu_id) self.persistent_runs = 0 # Have not received+send first RELEASE (init handshake) self.handshake_stage_1 = True # Have not received first ACQUIRE (ready for payload execution) self.handshake_stage_2 = True # Launch Qemu. stderr to stdout, stdout is logged on VM exit # os.setpgrp() prevents signals from being propagated to Qemu, instead allowing an # organized shutdown via async_exit() if self.verbose: self.process = subprocess.Popen(self.cmd, preexec_fn=os.setpgrp, stdin=subprocess.PIPE, stdout=get_log_file(), stderr=get_log_file()) else: self.process = subprocess.Popen(self.cmd, preexec_fn=os.setpgrp, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: self.stat_fd = open("/proc/" + str(self.process.pid) + "/stat") self.init() self.set_init_state() except: if not self.exiting: print_fail("Failed to launch Qemu, please see logs.") log_qemu("Fatal error: Failed to launch Qemu.", self.qemu_id) self.shutdown() return False self.initial_mem_usage = resource.getrusage( resource.RUSAGE_SELF).ru_maxrss self.kafl_shm.seek(0x0) self.kafl_shm.write(self.virgin_bitmap) self.kafl_shm.flush() return True
def generate_traces_worker(config, pid, input_list): def sigterm_handler(signal, frame): if q: q.async_exit() sys.exit(0) # override config - workdir root should be tempdir! pname = mp.current_process().name config.argument_values['work_dir'] += "_%s" % pname config.argument_values['purge'] = True prepare_working_dir(config) work_dir = config.argument_values['work_dir'] trace_dir = config.argument_values["input"] + "/traces/" signal.signal(signal.SIGTERM, sigterm_handler) os.setpgrp() q = qemu(1337, config, debug_mode=False) if not q.start(): print_fail("%s: Could not start Qemu. Exit." % pname) return None pbar = tqdm(total=len(input_list), desc=pname, dynamic_ncols=True, smoothing=0.1, position=pid + 1) try: for input_path in input_list: trace_file = trace_dir + os.path.basename(input_path) + ".lz4" if os.path.exists(trace_file): #printf("Skipping %s.." % os.path.basename(input_path)) pbar.update() continue #print("Processing %s.." % os.path.basename(input_path)) if funky_trace_run(q, input_path): with open( work_dir + "/redqueen_workdir_1337/pt_trace_results.txt", 'rb') as f_in: with lz4.LZ4FrameFile(trace_file, 'wb', compression_level=lz4. COMPRESSIONLEVEL_MINHC) as f_out: shutil.copyfileobj(f_in, f_out) pbar.update() except: q.async_exit() raise q.shutdown()
def __execute(self, data, retry=0): try: self.q.set_payload(data) return self.q.send_payload() except (ValueError, BrokenPipeError): if retry > 2: # TODO if it reliably kills qemu, perhaps log to master for harvesting.. print_fail("Slave %d aborting due to repeated SHM/socket error. Check logs." % self.slave_id) log_slave("Aborting due to repeated SHM/socket error. Payload: %s" % repr(data), self.slave_id) raise print_warning("SHM/socket error on Slave %d (retry %d)" % (self.slave_id, retry)) log_slave("SHM/socket error, trying to restart qemu...", self.slave_id) self.statistics.event_reload() if not self.q.restart(): raise return self.__execute(data, retry=retry+1)
def wait(self, timeout=None): results = [] r, w, e = select.select(self.clients, (), (), timeout) for sock_ready in r: if sock_ready == self.listener: c = self.listener.accept() self.clients.append(c) else: try: msg = sock_ready.recv_bytes() msg = msgpack.unpackb(msg, raw=False, strict_map_key=False) results.append((sock_ready, msg)) except (EOFError, IOError): print_fail("Slave has died - check logs!") sock_ready.close() self.clients.remove(sock_ready) if len(self.clients) == 1: raise SystemExit("All slaves have died.") return results
def wait(self, timeout=None): results = [] r, w, e = select.select(self.clients, (), (), timeout) for sock_ready in r: if sock_ready == self.listener: c = self.listener.accept() self.clients.append(c) else: try: msg = sock_ready.recv_bytes() msg = msgpack.unpackb(msg, raw=False, strict_map_key=False) results.append((sock_ready, msg)) #print("Master received: ", str(msg)) except (EOFError, IOError): # TODO: try to restart or exit when all slaves dead sock_ready.close() self.clients.remove(sock_ready) print_fail("Slave has died - check logs!") #raise return results
def send_irp(self, irp, retry=0): try: #log(f"iocode: {hex(irp.IoControlCode)}, payload: {bytes(irp.InBuffer[:0x10])}.., len: {hex(irp.InBufferLength)}", label='IRP') self.set_payload(irp) return self.send_payload() except (ValueError, BrokenPipeError): if retry > 2: # TODO if it reliably kills qemu, perhaps log to master for harvesting.. print_fail( "Process aborting due to repeated SHM/socket error. Check logs." ) log_qemu("Aborting due to repeated SHM/socket error", self.qemu_id) raise print_warning("SHM/socket error on Process (retry %d)" % retry) log_qemu("SHM/socket error, trying to restart qemu...", self.qemu_id) if not self.restart(): raise return self.send_irp(irp, retry=retry + 1)
def set_payload(self, payload): if self.exiting: sys.exit(0) # actual payload is limited to payload_size - sizeof(uint32) - sizeof(uint8) if len(payload) > self.payload_size - 5: payload = payload[:self.payload_size - 5] try: self.fs_shm.seek(0) input_len = to_string_32(len(payload)) self.fs_shm.write_byte(input_len[3]) self.fs_shm.write_byte(input_len[2]) self.fs_shm.write_byte(input_len[1]) self.fs_shm.write_byte(input_len[0]) self.fs_shm.write(payload) self.fs_shm.flush() except ValueError: if self.exiting: sys.exit(0) # Qemu crashed. Could be due to prior payload but more likely harness/config is broken.. print_fail("Failed to set new payload - Qemu crash?") log_qemu("Failed to set new payload - Qemu crash?", self.qemu_id) raise
def start(config): if not post_self_check(config): return -1 work_dir = config.argument_values["work_dir"] seed_dir = config.argument_values["seed_dir"] num_slaves = config.argument_values['p'] if config.argument_values['v']: enable_logging(work_dir) if not prepare_working_dir(config): print_fail( "Refuse to operate on existing work directory. Use --purge to override." ) return 1 if seed_dir and not copy_seed_files(work_dir, seed_dir): print_fail("Error when importing seeds. Exit.") return 1 # Without -ip0, Qemu will not active PT tracing and we turn into a blind fuzzer if not config.argument_values['ip0']: print_warning("No trace region configured! PT feedback disabled!") master = MasterProcess(config) slaves = [] for i in range(num_slaves): slaves.append( multiprocessing.Process(name="Slave " + str(i), target=slave_loader, args=(i, ))) slaves[i].start() try: master.loop() except KeyboardInterrupt: print_note("Received Ctrl-C, killing slaves...") except: print_fail("Exception in Master. Exiting..") print(traceback.format_exc()) finally: graceful_exit(slaves) time.sleep(0.2) qemu_sweep() sys.exit(0)
def start(config): if not post_self_check(config): return -1 work_dir = config.argument_values["work_dir"] seed_dir = config.argument_values["seed_dir"] num_slaves = config.argument_values['p'] if config.argument_values['v'] or config.argument_values['debug']: enable_logging(work_dir) if not prepare_working_dir(config): print_fail("Refuse to operate on existing work directory. Use --purge to override.") return 1 if seed_dir and not copy_seed_files(work_dir, seed_dir): print_fail("Error when importing seeds. Exit.") return 1 if config.argument_values['wdm']: interface_manager.load(config.argument_values['wdm']) master = MasterProcess(config) slaves = [] for i in range(num_slaves): slaves.append(multiprocessing.Process(name="Slave " + str(i), target=slave_loader, args=(i,))) slaves[i].start() try: master.loop() except KeyboardInterrupt: print_note("Received Ctrl-C, killing slaves...") except SystemExit as e: print_fail("Master exit: " + str(e)) finally: graceful_exit(slaves) time.sleep(0.2) qemu_sweep() sys.exit(0)
def error(self, message): self.print_help() print_fail('%s\n\n' % message) sys.exit(1)
def main(): signal.signal(signal.SIGUSR1, handle_pdb) print(os.getpid()) time.sleep(1) config = FuzzerConfiguration() num_processes = config.argument_values['p'] num_concolic = config.argument_values['concolic'] reload = False if config.argument_values['Purge'] and check_if_old_state_exits( config.argument_values['work_dir']): print_warning("Old workspace found!") print_warning("Wiping old workspace...") prepare_working_dir(config.argument_values['work_dir'], purge=config.argument_values['Purge']) time.sleep(2) if not check_if_old_state_exits(config.argument_values['work_dir']): if not prepare_working_dir(config.argument_values['work_dir'], purge=config.argument_values['Purge']): print_fail("Working directory is weired or corrupted...") return 1 if not copy_seed_files(config.argument_values['work_dir'], config.argument_values['seed_dir']): print_fail("Seed directory is empty...") return 1 config.save_data() else: log_core("Old state exist -> loading...") config.load_data() reload = True DO_USE_UI = (USE_UI and not config.argument_values['verbose'] and config.argument_values['f']) comm = Communicator(num_processes=num_processes, concolic_thread=num_concolic) master = MasterProcess(comm, reload=reload) mapserver_process = multiprocessing.Process(name='MAPSERVER', target=mapserver_loader, args=(comm, reload)) modelserver_process = multiprocessing.Process(name='MODELSERVER', target=modelserver_loader, args=(comm, )) update_process = multiprocessing.Process(name='UPDATE', target=update_loader, args=(comm, DO_USE_UI)) slaves = [] for i in range(num_processes): slave = SlaveThread(comm, i, reload=reload) slaves.append(slave) concolic_models = [] for i in range(num_concolic): controller = ConcolicController(comm, num_processes, i) slaves.append(controller) concolic_models.append(controller.model) concserv = ConcolicServerThread(comm, num_processes, num_concolic, concolic_models) comm.start() comm.create_shm() update_process.start() time.sleep(.1) mapserver_process.start() modelserver_process.start() concserv.start() for slave in slaves: slave.start() # print('Starting master loop') try: master.loop() except KeyboardInterrupt: master.stop() print('Saving data') # Wait for child processes to properly exit mapserver_process.join() update_process.join() concserv.stop() # Properly stop threads for slave in slaves: slave.stop() time.sleep(1) # Stop communicator last because Queues may be in used comm.stop() master.save_data() print('Data saved')
def start(): config = FuzzerConfiguration() if not post_self_check(config): return -1 if config.argument_values['v']: enable_logging() num_processes = config.argument_values['p'] if config.argument_values['Purge'] and check_if_old_state_exits( config.argument_values['work_dir']): print_warning("Old workspace found!") if ask_for_permission("PURGE", " to wipe old workspace:"): print_warning("Wiping old workspace...") prepare_working_dir(config.argument_values['work_dir'], purge=True) time.sleep(2) else: print_fail("Aborting...") return 0 if not check_if_old_state_exits(config.argument_values['work_dir']): if not prepare_working_dir(config.argument_values['work_dir'], purge=True): print_fail("Working directory is weired or corrupted...") return 1 if not copy_seed_files(config.argument_values['work_dir'], config.argument_values['seed_dir']): print_fail("Seed directory is empty...") return 1 config.save_data() else: log_core("Old state exist -> loading...") config.load_data() comm = Communicator(num_processes=num_processes, tasks_per_requests=config.argument_values['t'], bitmap_size=config.config_values["BITMAP_SHM_SIZE"]) comm.create_shm() qlookup = QemuLookupSet() master = MasterProcess(comm) update_process = multiprocessing.Process(name='UPDATE', target=update_loader, args=(comm, )) mapserver_process = multiprocessing.Process(name='MAPSERVER', target=mapserver_loader, args=(comm, )) slaves = [] for i in range(num_processes): slaves.append( multiprocessing.Process(name='SLAVE' + str(i), target=slave_loader, args=(comm, i))) slaves[i].start() update_process.start() mapserver_process.start() try: master.loop() except KeyboardInterrupt: master.save_data() log_core("Date saved!") signal.signal(signal.SIGINT, signal.SIG_IGN) counter = 0 print_pre_exit_msg(counter, clrscr=True) for slave in slaves: while True: counter += 1 print_pre_exit_msg(counter) slave.join(timeout=0.25) if not slave.is_alive(): break print_exit_msg() return 0
def generate_traces(config, input_list): work_dir = config.argument_values['work_dir'] data_dir = config.argument_values["input"] trace_dir = data_dir + "/traces/" if data_dir == work_dir: print_note("Workdir must be separate from input/data dir. Aborting.") return None prepare_working_dir(config) if os.path.exists(trace_dir): print_note( "Input data_dir already has a traces/ subdir. Skipping trace generation..\n" ) return trace_dir # real deal. delete trace dir if it exists and (re-)create traces shutil.rmtree(trace_dir, ignore_errors=True) os.makedirs(trace_dir) # TODO What is the effect of not defining a trace region? will it trace? if not config.argument_values['ip0']: print_warning("No trace region configured!") if os.path.exists(work_dir + "redqueen_workdir_1337"): print_fail( "Leftover files from 1337 instance. This should not happen.") return None q = qemu(1337, config, debug_mode=False) if not q.start(): print_fail("Could not start Qemu. Exit.") return None start = time.time() try: for input_path, nid, timestamp in input_list: print("Processing: %s" % input_path) q.set_payload(read_binary_file(input_path)) exec_res = q.execute_in_trace_mode(timeout_detection=False) if not exec_res: print_note("Failed to execute input %s. Continuing anyway..." % input_path) assert (q.restart()) continue # TODO: reboot by default, persistent by option if exec_res.is_crash(): q.reload() with open(work_dir + "/redqueen_workdir_1337/pt_trace_results.txt", 'rb') as f_in: with lz4.LZ4FrameFile( trace_dir + os.path.basename(input_path) + ".lz4", 'wb', compression_level=lz4.COMPRESSIONLEVEL_MINHC) as f_out: shutil.copyfileobj(f_in, f_out) except: raise finally: q.async_exit() end = time.time() print("Time taken: %.2fs" % (end - start)) return trace_dir