示例#1
0
    def __debug_recv_expect(self, cmd):
        res = ''
        while True:

            res = self.__debug_recv()
            if res in cmd:
                break
            # TODO: the I/O handling here really sucks.
            # Below we are returning OK to set_init_state() in order to silence handshake error message during kafl_info.py.
            # We need to factor out the debug stuff and properly support required vs optional/intermediate control messages...
            elif res == qemu_protocol.INFO:
                break
            elif res is None:
                # Timeout is detected separately in debug_recv(), so we should never get here..
                assert False
            else:
                # Reaching this part typically means there is a bug in the agent or target setup which
                # messes up the expected interaction. Throw an error and kill Qemu. Slave may retry.
                log_qemu(
                    "Error in debug_recv(): Got " + str(res) + ", Expected: " +
                    str(cmd) + ")", self.qemu_id)
                print_warning(
                    "Slave %s: Error in debug_recv(): Got %s, Expected: %s" %
                    (self.qemu_id, str(res), str(cmd)))
                self.shutdown()
                raise ConnectionResetError(
                    "Killed Qemu due to protocol error.")
        if res == qemu_protocol.PT_TRASHED:
            log_qemu("PT_TRASHED", self.qemu_id)
            return False
        return True
示例#2
0
    def execute_in_redqueen_mode(self, payload):
        log_qemu("Performing redqueen iteration...", self.qemu_id)
        try:
            self.soft_reload()
            self.send_rq_set_light_instrumentation()
            self.send_enable_redqueen()
            self.set_payload(payload)
            self.send_payload(timeout_detection=False)
            if self.exit_reason() != "regular":
                print_warning("RQ execution returned %s", self.exit_reason())
        except Exception as e:
            log_qemu("%s" % traceback.format_exc(), self.qemu_id)
            return False

        #log_qemu("Disabling redqueen mode...", self.qemu_id)
        try:
            self.send_disable_redqueen()
            self.set_payload(payload)
            self.send_payload(timeout_detection=False)
            self.soft_reload()
            if self.exit_reason() != "regular":
                print_warning("RQ execution returned %s", self.exit_reason())
        except Exception as e:
            log_qemu("%s" % traceback.format_exc(), self.qemu_id)
            return False
        return True
示例#3
0
文件: core.py 项目: vient/kAFL
def start(config):

    prepare_working_dir(config)

    if not post_self_check(config):
        return -1

    # kAFL debug output is redirected to logs as part of -v mode. stdout will only print test/debug results.
    if config.argument_values['v']:
        enable_logging(config.argument_values["work_dir"])

    # Without -ip0, Qemu will not active PT tracing and Redqueen will not
    # attempt to handle debug traps. This is a requirement for modes like gdb.
    if not config.argument_values['ip0']:
        print_warning("No trace region configured! Intel PT disabled!")

    max_execs = config.argument_values['n']

    try:
        # TODO: noise, benchmark, trace are working, others untested
        mode = config.argument_values['action']
        if (mode == "noise"):
            debug_non_det(config, max_execs)
        elif (mode == "benchmark"):
            benchmark(config)
        elif (mode == "gdb"):
            gdb_session(config, qemu_verbose=True)
        elif (mode == "single"):
            execute_once(config, max_execs)
        elif (mode == "trace"):
            debug_execution(config, max_execs)
        elif (mode == "trace-qemu"):
            debug_execution(config, max_execs, qemu_verbose=True)
        elif (mode == "printk"):
            debug_execution(config, 1, qemu_verbose=True, notifiers=False)
        elif (mode == "redqueen"):
            redqueen_dbg(config, qemu_verbose=False)
        elif (mode == "redqueen-qemu"):
            redqueen_dbg(config, qemu_verbose=True)
        elif (mode == "verify"):
            verify_dbg(config, qemu_verbose=True)
        else:
            print("Unknown debug mode. Exit")
    except Exception as e:
        raise
    finally:
        # cleanup
        #os.system("stty sane")
        for i in range(512):
            if os.path.exists("/tmp/kAFL_printf.txt." + str(i)):
                os.remove("/tmp/kAFL_printf.txt." + str(i))
            else:
                break

        print(
            "\nDone. Check logs for details.\nAny remaining qemu instances should be GC'ed on exit:"
        )
        os.system("pgrep qemu-system")
    return 0
示例#4
0
def start():
    config = FuzzerConfiguration()

    if not post_self_check(config):
        return -1

    if config.argument_values['v']:
        enable_logging(config.argument_values["work_dir"])

    num_processes = config.argument_values['p']

    if not config.argument_values['Purge']:
        if ask_for_permission("PURGE", " to wipe old workspace:"):
            print_warning("Wiping old workspace...")
            time.sleep(2)
        else:
            print_fail("Aborting...")
            return 0

    prepare_working_dir(config.argument_values['work_dir'])

    if not copy_seed_files(config.argument_values['work_dir'],
                           config.argument_values['seed_dir']):
        print_fail("Seed directory is empty...")
        return 1

    master = MasterProcess(config)

    slaves = []
    for i in range(num_processes):
        print
        "fuzzing process {}".format(i)
        slaves.append(
            multiprocessing.Process(name='SLAVE' + str(i),
                                    target=slave_loader,
                                    args=(i, )))
        slaves[i].start()

    try:
        master.loop()
    except KeyboardInterrupt:
        pass

    signal.signal(signal.SIGINT, signal.SIG_IGN)

    counter = 0
    # print_pre_exit_msg(counter, clrscr=True)
    for slave in slaves:
        while True:
            counter += 1
            # print_pre_exit_msg(counter)
            slave.join(timeout=0.25)
            if not slave.is_alive():
                break
    # print_exit_msg()
    return 0
示例#5
0
文件: kafl_cov.py 项目: vient/kAFL
def generate_traces(config, input_list):

    trace_dir = config.argument_values["input"] + "/traces/"

    # TODO What is the effect of not defining a trace region? will it trace?
    if not config.argument_values['ip0']:
        print_warning("No trace region configured!")
        return None

    start = time.time()

    if not os.path.exists(trace_dir):
        os.makedirs(trace_dir)

    input_files = list()
    for input_path, _, _ in input_list:
        trace_file = trace_dir + os.path.basename(input_path) + ".lz4"
        if os.path.exists(trace_file):
            print("Skip input with existing trace: %s" % input_path)
        else:
            input_files.append(input_path)

    nproc = os.cpu_count()
    chunksize = ceil(len(input_files) / nproc)
    offset = 0
    workers = list()

    try:
        for pid in range(nproc):
            sublist = input_files[offset:offset + chunksize]
            offset += chunksize
            if len(sublist) > 0:
                worker = mp.Process(target=generate_traces_worker,
                                    args=(config, pid, sublist))
                worker.start()
                workers.append(worker)

        for worker in workers:
            while worker.is_alive():
                time.sleep(2)
            if worker.exitcode != 0:
                return None

    except KeyboardInterrupt:
        print_note("Received Ctrl-C, killing slaves...")
        return None
    except Exception:
        return None
    finally:
        graceful_exit(workers)

    end = time.time()
    print("\n\nDone. Time taken: %.2fs\n" % (end - start))
    return trace_dir
示例#6
0
def start(config):

    if not post_self_check(config):
        return -1

    work_dir = config.argument_values["work_dir"]
    seed_dir = config.argument_values["seed_dir"]
    num_slaves = config.argument_values['p']

    if config.argument_values['v']:
        enable_logging(work_dir)

    if not prepare_working_dir(config):
        print_fail(
            "Refuse to operate on existing work directory. Use --purge to override."
        )
        return 1

    if seed_dir and not copy_seed_files(work_dir, seed_dir):
        print_fail("Error when importing seeds. Exit.")
        return 1

    # Without -ip0, Qemu will not active PT tracing and we turn into a blind fuzzer
    if not config.argument_values['ip0']:
        print_warning("No trace region configured! PT feedback disabled!")

    master = MasterProcess(config)

    slaves = []
    for i in range(num_slaves):
        slaves.append(
            multiprocessing.Process(name="Slave " + str(i),
                                    target=slave_loader,
                                    args=(i, )))
        slaves[i].start()

    try:
        master.loop()
    except KeyboardInterrupt:
        print_note("Received Ctrl-C, killing slaves...")
    except:
        print_fail("Exception in Master. Exiting..")
        print(traceback.format_exc())
    finally:
        graceful_exit(slaves)

    time.sleep(0.2)
    qemu_sweep()
    sys.exit(0)
示例#7
0
文件: slave.py 项目: Bl1nnnk/kAFL-1
 def check_funkyness_and_store_trace(self, data):
     global num_funky
     exec_res = self.q.send_payload()
     hash = exec_res.hash()
     trace1 = read_binary_file(self.config.argument_values['work_dir'] + "/pt_trace_dump_%d" % self.slave_id)
     exec_res = self.q.send_payload()
     if (hash != exec_res.hash()):
         print_warning("Validation identified funky bits, dumping!")
         num_funky += 1
         trace_folder = self.config.argument_values['work_dir'] + "/traces/funky_%d_%d" % (num_funky, self.slave_id);
         os.makedirs(trace_folder)
         atomic_write(trace_folder + "/input", data)
         atomic_write(trace_folder + "/trace_a", trace1)
         trace2 = read_binary_file(self.config.argument_values["work_dir"] + "/pt_trace_dump_%d" % self.slave_id)
         atomic_write(trace_folder + "/trace_b", trace2)
     return exec_res
示例#8
0
    def __execute(self, data, retry=0):

        try:
            self.q.set_payload(data)
            return self.q.send_payload()
        except (ValueError, BrokenPipeError):
            if retry > 2:
                # TODO if it reliably kills qemu, perhaps log to master for harvesting..
                print_fail("Slave %d aborting due to repeated SHM/socket error. Check logs." % self.slave_id)
                log_slave("Aborting due to repeated SHM/socket error. Payload: %s" % repr(data), self.slave_id)
                raise
            print_warning("SHM/socket error on Slave %d (retry %d)" % (self.slave_id, retry))
            log_slave("SHM/socket error, trying to restart qemu...", self.slave_id)
            self.statistics.event_reload()
            if not self.q.restart():
                raise
        return self.__execute(data, retry=retry+1)
示例#9
0
 def send_irp(self, irp, retry=0):
     try:
         #log(f"iocode: {hex(irp.IoControlCode)}, payload: {bytes(irp.InBuffer[:0x10])}.., len: {hex(irp.InBufferLength)}", label='IRP')
         self.set_payload(irp)
         return self.send_payload()
     except (ValueError, BrokenPipeError):
         if retry > 2:
             # TODO if it reliably kills qemu, perhaps log to master for harvesting..
             print_fail(
                 "Process aborting due to repeated SHM/socket error. Check logs."
             )
             log_qemu("Aborting due to repeated SHM/socket error",
                      self.qemu_id)
             raise
         print_warning("SHM/socket error on Process (retry %d)" % retry)
         log_qemu("SHM/socket error, trying to restart qemu...",
                  self.qemu_id)
         if not self.restart():
             raise
     return self.send_irp(irp, retry=retry + 1)
示例#10
0
def main():
    signal.signal(signal.SIGUSR1, handle_pdb)
    print(os.getpid())
    time.sleep(1)

    config = FuzzerConfiguration()
    num_processes = config.argument_values['p']
    num_concolic = config.argument_values['concolic']
    reload = False

    if config.argument_values['Purge'] and check_if_old_state_exits(
            config.argument_values['work_dir']):
        print_warning("Old workspace found!")
        print_warning("Wiping old workspace...")
        prepare_working_dir(config.argument_values['work_dir'],
                            purge=config.argument_values['Purge'])
        time.sleep(2)

    if not check_if_old_state_exits(config.argument_values['work_dir']):
        if not prepare_working_dir(config.argument_values['work_dir'],
                                   purge=config.argument_values['Purge']):
            print_fail("Working directory is weired or corrupted...")
            return 1
        if not copy_seed_files(config.argument_values['work_dir'],
                               config.argument_values['seed_dir']):
            print_fail("Seed directory is empty...")
            return 1
        config.save_data()
    else:
        log_core("Old state exist -> loading...")
        config.load_data()
        reload = True

    DO_USE_UI = (USE_UI and not config.argument_values['verbose']
                 and config.argument_values['f'])
    comm = Communicator(num_processes=num_processes,
                        concolic_thread=num_concolic)
    master = MasterProcess(comm, reload=reload)
    mapserver_process = multiprocessing.Process(name='MAPSERVER',
                                                target=mapserver_loader,
                                                args=(comm, reload))
    modelserver_process = multiprocessing.Process(name='MODELSERVER',
                                                  target=modelserver_loader,
                                                  args=(comm, ))
    update_process = multiprocessing.Process(name='UPDATE',
                                             target=update_loader,
                                             args=(comm, DO_USE_UI))

    slaves = []
    for i in range(num_processes):
        slave = SlaveThread(comm, i, reload=reload)
        slaves.append(slave)
    concolic_models = []
    for i in range(num_concolic):
        controller = ConcolicController(comm, num_processes, i)
        slaves.append(controller)
        concolic_models.append(controller.model)

    concserv = ConcolicServerThread(comm, num_processes, num_concolic,
                                    concolic_models)

    comm.start()
    comm.create_shm()

    update_process.start()
    time.sleep(.1)

    mapserver_process.start()
    modelserver_process.start()
    concserv.start()

    for slave in slaves:
        slave.start()

    # print('Starting master loop')
    try:
        master.loop()
    except KeyboardInterrupt:
        master.stop()
        print('Saving data')
        # Wait for child processes to properly exit
        mapserver_process.join()
        update_process.join()
        concserv.stop()

        # Properly stop threads
        for slave in slaves:
            slave.stop()
        time.sleep(1)
        # Stop communicator last because Queues may be in used
        comm.stop()
        master.save_data()
        print('Data saved')
示例#11
0
def generate_traces(config, input_list):

    work_dir = config.argument_values['work_dir']
    data_dir = config.argument_values["input"]
    trace_dir = data_dir + "/traces/"

    if data_dir == work_dir:
        print_note("Workdir must be separate from input/data dir. Aborting.")
        return None

    prepare_working_dir(config)

    if os.path.exists(trace_dir):
        print_note(
            "Input data_dir already has a traces/ subdir. Skipping trace generation..\n"
        )
        return trace_dir

    # real deal. delete trace dir if it exists and (re-)create traces
    shutil.rmtree(trace_dir, ignore_errors=True)
    os.makedirs(trace_dir)

    # TODO What is the effect of not defining a trace region? will it trace?
    if not config.argument_values['ip0']:
        print_warning("No trace region configured!")

    if os.path.exists(work_dir + "redqueen_workdir_1337"):
        print_fail(
            "Leftover files from 1337 instance. This should not happen.")
        return None

    q = qemu(1337, config, debug_mode=False)
    if not q.start():
        print_fail("Could not start Qemu. Exit.")
        return None

    start = time.time()

    try:
        for input_path, nid, timestamp in input_list:
            print("Processing: %s" % input_path)

            q.set_payload(read_binary_file(input_path))
            exec_res = q.execute_in_trace_mode(timeout_detection=False)

            if not exec_res:
                print_note("Failed to execute input %s. Continuing anyway..." %
                           input_path)
                assert (q.restart())
                continue

            # TODO: reboot by default, persistent by option
            if exec_res.is_crash():
                q.reload()

            with open(work_dir + "/redqueen_workdir_1337/pt_trace_results.txt",
                      'rb') as f_in:
                with lz4.LZ4FrameFile(
                        trace_dir + os.path.basename(input_path) + ".lz4",
                        'wb',
                        compression_level=lz4.COMPRESSIONLEVEL_MINHC) as f_out:
                    shutil.copyfileobj(f_in, f_out)

    except:
        raise
    finally:
        q.async_exit()

    end = time.time()
    print("Time taken: %.2fs" % (end - start))
    return trace_dir
示例#12
0
文件: core.py 项目: ufwt/kAFL
def qemu_sweep():
    pids = pgrep.pgrep("qemu")

    if (len(pids) > 0):
        print_warning("Detected potential qemu zombies, please kill -9: " + repr(pids))
示例#13
0
def start():
    config = FuzzerConfiguration()

    if not post_self_check(config):
        return -1

    if config.argument_values['v']:
        enable_logging()

    num_processes = config.argument_values['p']

    if config.argument_values['Purge'] and check_if_old_state_exits(
            config.argument_values['work_dir']):
        print_warning("Old workspace found!")
        if ask_for_permission("PURGE", " to wipe old workspace:"):
            print_warning("Wiping old workspace...")
            prepare_working_dir(config.argument_values['work_dir'], purge=True)
            time.sleep(2)
        else:
            print_fail("Aborting...")
            return 0

    if not check_if_old_state_exits(config.argument_values['work_dir']):
        if not prepare_working_dir(config.argument_values['work_dir'],
                                   purge=True):
            print_fail("Working directory is weired or corrupted...")
            return 1
        if not copy_seed_files(config.argument_values['work_dir'],
                               config.argument_values['seed_dir']):
            print_fail("Seed directory is empty...")
            return 1
        config.save_data()
    else:
        log_core("Old state exist -> loading...")
        config.load_data()

    comm = Communicator(num_processes=num_processes,
                        tasks_per_requests=config.argument_values['t'],
                        bitmap_size=config.config_values["BITMAP_SHM_SIZE"])
    comm.create_shm()

    qlookup = QemuLookupSet()

    master = MasterProcess(comm)

    update_process = multiprocessing.Process(name='UPDATE',
                                             target=update_loader,
                                             args=(comm, ))
    mapserver_process = multiprocessing.Process(name='MAPSERVER',
                                                target=mapserver_loader,
                                                args=(comm, ))

    slaves = []
    for i in range(num_processes):
        slaves.append(
            multiprocessing.Process(name='SLAVE' + str(i),
                                    target=slave_loader,
                                    args=(comm, i)))
        slaves[i].start()

    update_process.start()
    mapserver_process.start()

    try:
        master.loop()
    except KeyboardInterrupt:
        master.save_data()
        log_core("Date saved!")

    signal.signal(signal.SIGINT, signal.SIG_IGN)

    counter = 0
    print_pre_exit_msg(counter, clrscr=True)
    for slave in slaves:
        while True:
            counter += 1
            print_pre_exit_msg(counter)
            slave.join(timeout=0.25)
            if not slave.is_alive():
                break
    print_exit_msg()
    return 0