Example #1
0
    def send_payload(self):

        if self.exiting:
            sys.exit(0)

        # for -R > 1, count and toggle reload_mode at runtime
        if self.config.reload > 1:
            self.persistent_runs += 1
            if self.persistent_runs == 1:
                self.qemu_aux_buffer.set_reload_mode(False)
            if self.persistent_runs >= self.config.reload:
                self.qemu_aux_buffer.set_reload_mode(True)
                self.persistent_runs = 0

        result = None
        old_address = 0
        start_time = time.time()

        while True:
            self.run_qemu()

            result = self.qemu_aux_buffer.get_result()

            if result.pt_overflow:
                logger.warn("PT trashed!")

            if result.exec_code == RC.HPRINTF:
                self.handle_hprintf()
                continue

            if result.exec_code == RC.ABORT:
                self.handle_habort()

            if result.exec_done:
                break

            if result.page_fault:
                if result.page_fault_addr == old_address:
                    logger.error(
                        "%s Failed to resolve page after second execution! Qemu status:\n%s"
                        % (self, str(result._asdict())))
                    break
                old_address = result.page_fault_addr
                self.qemu_aux_buffer.dump_page(result.page_fault_addr)

        # record highest seen BBs
        self.bb_seen = max(self.bb_seen, result.bb_cov)

        #runtime = result.runtime_sec + result.runtime_usec/1000/1000
        res = ExecutionResult(self.c_bitmap, self.bitmap_size,
                              self.exit_reason(result),
                              time.time() - start_time)

        if result.exec_code == RC.STARVED:
            res.starved = True

        #self.audit(res.copy_to_array())
        #self.audit(bytearray(self.c_bitmap))

        return res
Example #2
0
    def handle_node(self, msg):
        meta_data = QueueNode.get_metadata(self.config.work_dir,
                                           msg["task"]["nid"])
        payload = QueueNode.get_payload(self.config.work_dir, meta_data)

        # fixme: determine globally based on all seen regulars
        t_dyn = self.t_soft + 1.2 * meta_data["info"]["performance"]
        self.q.set_timeout(min(self.t_hard, t_dyn))

        try:
            results, new_payload = self.logic.process_node(payload, meta_data)
        except QemuIOException:
            # mark node as crashing and free it before escalating
            logger.info("Qemu execution failed for node %d." % meta_data["id"])
            results = self.logic.create_update(meta_data["state"],
                                               {"crashing": True})
            self.conn.send_node_abort(meta_data["id"], results)
            raise

        if new_payload:
            default_info = {
                "method": "validate_bits",
                "parent": meta_data["id"]
            }
            if self.validate_bits(new_payload, meta_data, default_info):
                logger.debug(
                    "%s Stage %s found alternative payload for node %d" %
                    (self, meta_data["state"]["name"], meta_data["id"]))
            else:
                logger.warn(
                    "%s Provided alternative payload found invalid - bug in stage %s?"
                    % (self, meta_data["state"]["name"]))
        self.conn.send_node_done(meta_data["id"], results, new_payload)
Example #3
0
    def send_next_task(self, conn):
        # Inputs placed to imports/ folder have priority.
        # This can also be used to inject additional seeds at runtime.
        imports = glob.glob(self.config.work_dir + "/imports/*")
        if imports:
            path = imports.pop()
            logger.debug("Importing payload from %s" % path)
            seed = read_binary_file(path)
            os.remove(path)
            return self.comm.send_import(conn, {
                "type": "import",
                "payload": seed
            })
        # Process items from queue..
        node = self.queue.get_next()
        if node:
            return self.comm.send_node(conn, {
                "type": "node",
                "nid": node.get_id()
            })

        # No work in queue. Tell Worker to wait a little or attempt blind fuzzing.
        # If all Workers are waiting, check if we are getting any coverage..
        self.comm.send_busy(conn)
        self.busy_events += 1
        if self.busy_events >= self.config.processes:
            self.busy_events = 0
            main_bitmap = self.bitmap_storage.get_bitmap_for_node_type(
                "regular").c_bitmap
            if mmh3.hash(main_bitmap) == self.empty_hash:
                logger.warn(
                    "Coverage bitmap is empty?! Check -ip0 or try better seeds."
                )
Example #4
0
def generate_traces(config, nproc, input_list):

    trace_dir = config.input + "/traces/"

    # TODO What is the effect of not defining a trace region? will it trace?
    if not config.ip0:
        logger.warn("No trace region configured!")
        return None

    os.makedirs(trace_dir, exist_ok=True)

    work_queue = list()
    for input_path, nid, _ in input_list:

        # FIXME: should fully separate decode step to decide more flexibly which
        # type of traces to decode all of these can be relevant: runtime 'fuzz',
        # 'cov' (separate kafl_cov) or noise (additional traces generated from
        # noisy targets)
        # generate own cov_NNNNN.bin files for decoding
        dump_file = "%s/cov_%05d.bin.lz4" % (trace_dir, nid)
        trace_file = "%s/cov_%05d.lst.lz4" % (trace_dir, nid)
        # pickup existing fuzz_NNNNN.bin or generate them here for decoding
        dump_file = "%s/fuzz_%05d.bin.lz4" % (trace_dir, nid)
        trace_file = "%s/fuzz_%05d.lst.lz4" % (trace_dir, nid)
        work_queue.append((input_path, dump_file, trace_file))

    chunksize = ceil(len(work_queue) / nproc)
    offset = 0
    workers = list()

    try:
        for pid in range(nproc):
            sublist = work_queue[offset:offset + chunksize]
            offset += chunksize
            if len(sublist) > 0:
                worker = mp.Process(target=generate_traces_worker,
                                    args=(config, pid, sublist))
                worker.start()
                workers.append(worker)

        for worker in workers:
            while worker.is_alive():
                time.sleep(2)
            if worker.exitcode != 0:
                return None

    except KeyboardInterrupt:
        logger.info("Received Ctrl-C, closing Workers...")
        return None
    except Exception:
        return None
    finally:
        graceful_exit(workers)

    return trace_dir
Example #5
0
def qemu_sweep(msg):
    def get_qemu_processes():
        for proc in psutil.process_iter(['pid', 'name', 'username']):
            if proc.info['username'] == getpass.getuser():
                if 'qemu' in proc.info['name']:
                    yield (proc.info['pid'])

    pids = [p for p in get_qemu_processes()]

    if (len(pids) > 0):
        logger.warn(msg + " " + repr(pids))
Example #6
0
 def handle_import(self, msg):
     meta_data = {"state": {"name": "import"}, "id": 0}
     payload = msg["task"]["payload"]
     self.q.set_timeout(self.t_hard)
     try:
         self.logic.process_import(payload, meta_data)
     except QemuIOException:
         logger.warn("%s: Execution failure on import.")
         self.conn.send_node_abort(None, None)
         raise
     self.conn.send_ready()
Example #7
0
    def _parse_with_config(self, parser):

        config = confuse.Configuration('kafl', modname='kafl_fuzzer')

        # check default config search paths
        config.read(defaults=True, user=True)

        # local / workdir config
        workdir_config = os.path.join(os.getcwd(), 'kafl.yaml')
        if os.path.exists(workdir_config):
            config.set_file(workdir_config, base_for_paths=True)

        # ENV based config
        if 'KAFL_CONFIG' in os.environ:
            config.set_file(os.environ['KAFL_CONFIG'], base_for_paths=True)

        # merge all configs into a flat dictionary, delimiter = ':'
        config_values = FlatDict(config.flatten())
        if 'KAFL_CONFIG_DEBUG' in os.environ:
            print("Options picked up from config: %s" % str(config_values))

        # adopt defaults into parser, fixup 'required' and file/path fields
        for action in parser._actions:
            #print("action: %s" % repr(action))
            if action.dest in config_values:
                if action.type == parse_is_file:
                    action.default = config[action.dest].as_filename()
                elif isinstance(action, argparse._AppendAction):
                    assert ("append are not supported in in yaml config")
                    #action.default = [config[action.dest].as_str()]
                else:
                    action.default = config[action.dest].get()
                action.required = False
                config_values.pop(action.dest)

        # remove options not defined in argparse (set_defaults() imports everything)
        for option in config_values:
            if 'KAFL_CONFIG_DEBUG' in os.environ:
                logger.warn("Dropping unrecognized option '%s'." % option)
            config_values.pop(option)

        # allow unrecognized options?
        #parser.set_defaults(**config_values)

        args = parser.parse_args()

        if 'KAFL_CONFIG_DEBUG' in os.environ:
            print("Final parsed args: %s" % repr(args))
        return args
Example #8
0
    def parse_trace_file(trace_file):
        if not os.path.isfile(trace_file):
            logger.warn("Could not find trace file %s, skipping.." %
                        trace_file)
            return None

        bbs = set()
        edges = dict()
        with lz4.LZ4FrameFile(trace_file, 'rb') as f:
            for m in re.finditer("([\da-f]+),([\da-f]+)", f.read().decode()):
                edges["%s,%s" % (m.group(1), m.group(2))] = 1
                bbs.add(m.group(1))
                bbs.add(m.group(2))

        return {'bbs': bbs, 'edges': edges}
Example #9
0
    def __execute(self, data, retry=0):

        try:
            self.q.set_payload(data)
            return self.q.send_payload()
        except (ValueError, BrokenPipeError, ConnectionResetError) as e:
            if retry > 2:
                # TODO if it reliably kills qemu, perhaps log to Manager for harvesting..
                logger.error("%s Aborting due to repeated SHM/socket error." %
                             self)
                raise QemuIOException("Qemu SHM/socket failure.") from e

            logger.warn("%s Qemu SHM/socket error (retry %d)" % (self, retry))
            self.statistics.event_reload("shm/socket error")
            if not self.q.restart():
                raise QemuIOException("Qemu restart failure.") from e
        return self.__execute(data, retry=retry + 1)
Example #10
0
def benchmark(config):
    logger.info("Starting benchmark...")
    payload_file = config.input
    payload = read_binary_file(payload_file)

    q = qemu(1337, config, debug_mode=False)
    q.start()
    try:
        q.set_payload(payload)
        res = q.send_payload()

        logger.info("Payload hash: " + str(res.hash()))
        logger.info("Payload exit: " + res.exit_reason)
        logger.info("Calibrating...")

        start = time.time()
        iterations = 0
        while (time.time() - start < 1):
            q.set_payload(payload)
            q.send_payload()
            iterations += 1

        #logger.info("Calibrate to run at %d execs/s..." % iterations)
        rounds = 0
        runtime = 0
        total = 0
        while True:
            start = time.time()
            for _ in range(int(REFRESH*iterations)):
                q.set_payload(payload)
                q.send_payload()
            rounds += 1
            runtime = time.time() - start
            total += runtime
            print(color.FLUSH_LINE + "Performance: %.2f execs/s" % (iterations / runtime), end='\r')
    except Exception as e:
        logger.warn(repr(e))
    except KeyboardInterrupt:
        pass
    finally:
        print("\nPerformance Average: %.2f execs/s\n" % (rounds*iterations/total))
        q.shutdown()
    return 0
Example #11
0
def vmx_pt_get_addrn(verbose=True):

    KVMIO = 0xAE
    KVM_VMX_PT_GET_ADDRN = KVMIO << (8) | 0xe9

    try:
        fd = open("/dev/kvm", "wb")
    except:
        logger.error("KVM-PT is not loaded!")
        return 0

    try:
        ret = ioctl(fd, KVM_VMX_PT_GET_ADDRN, 0)
    except IOError:
        logger.warn("Kernel does not support multi-range tracing!")
        ret = 1
    finally:
        fd.close()
    return ret
Example #12
0
def test_build():
    native_path = os.path.dirname(inspect.getfile(native_pkg))
    bitmap_paths = glob.glob(native_path + "/bitmap*so")

    if len(bitmap_paths) < 1:
        logger.warn("Attempting to build native/bitmap.so ...")

        p = subprocess.Popen(("make -C " + native_path).split(" "),
                             stdout=subprocess.PIPE,
                             stdin=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        if p.wait() != 0:
            logger.error("Build failed, please check..")
            return False

    bitmap_paths = glob.glob(native_path + "/bitmap*so")
    assert len(bitmap_paths) > 0, "Failed to resolve native bitmap.so library."
    return True
Example #13
0
def start(config):

    assert prepare_working_dir(config), "Failed to create work_dir %s" % config.work_dir

    if not post_self_check(config):
        return -1

    work_dir = config.work_dir
    init_logger(config)

    # Without -ip0, Qemu will not active PT tracing and Redqueen will not
    # attempt to handle debug traps. This is a requirement for modes like gdb.
    if not config.ip0:
        logger.warn("No trace region configured! Intel PT disabled!")

    max_execs = config.iterations

    try:
        # TODO: noise, benchmark, trace are working, others untested
        mode = config.action
        if   (mode == "noise"):         debug_non_det(config, max_execs)
        elif (mode == "benchmark"):     benchmark(config)
        elif (mode == "gdb"):           gdb_session(config, qemu_verbose=True)
        elif (mode == "single"):        execute_once(config, qemu_verbose=False)
        elif (mode == "trace"):         debug_execution(config, max_execs)
        elif (mode == "trace-qemu"):    debug_execution(config, max_execs, qemu_verbose=True)
        elif (mode == "printk"):        debug_execution(config, 1, qemu_verbose=True, notifiers=False)
        elif (mode == "redqueen"):      redqueen_dbg(config, qemu_verbose=False)
        elif (mode == "redqueen-qemu"): redqueen_dbg(config, qemu_verbose=True)
        elif (mode == "verify"):        verify_dbg(config, qemu_verbose=True)
        else:
            logger.error("Unknown debug mode. Exit")
        logger.info("Done. Check logs for details.")
    except KeyboardInterrupt:
        logger.info("Received Ctrl-C, aborting...")
    except Exception as e:
        raise e

    time.sleep(0.2) # Qemu can take a moment to exit
    qemu_sweep("Any remaining qemu instances should be GC'ed on exit:")

    return 0
Example #14
0
    def debug_payload(self):

        self.set_timeout(0)
        #self.send_payload()
        while True:
            self.run_qemu()
            result = self.qemu_aux_buffer.get_result()
            if result.page_fault:
                logger.warn("Page fault encountered!")
            if result.pt_overflow:
                logger.warn("PT trashed!")
            if result.exec_code == RC.HPRINTF:
                self.handle_hprintf()
                continue
            if result.exec_code == RC.ABORT:
                self.handle_habort()

        logger.info("Result: %s\n" % self.exit_reason(result))
        #self.audit(result)
        return result
Example #15
0
def execute_once(config, qemu_verbose=False, notifiers=True):
    payload_file = config.input
    resume = config.resume
    null_hash = ExecutionResult.get_null_hash(config.bitmap_size)

    logger.info("Execute payload %s.. " % payload_file)

    q = qemu(1337, config, debug_mode=False, notifiers=notifiers, resume=resume)
    assert q.start(), "Failed to start Qemu?"


    store_traces = config.trace
    if store_traces:
        trace_out = config.work_dir + "/redqueen_workdir_1337/pt_trace_results.txt"
        trace_dir  = config.work_dir + "/traces/"

    payload = read_binary_file(payload_file)

    payload_limit = q.get_payload_limit()
    if len(payload) > payload_limit:
        payload = payload[:payload_limit]

    q.set_payload(payload)
    #q.send_payload() ## XXX first run has different trace?!
    if store_traces:
        result = q.execute_in_trace_mode()
    else:
        result = q.send_payload()

    print("Exit reason: %s" % result.exit_reason)

    current_hash = result.hash()
    logger.info("Feedback Hash: " + current_hash)
    if null_hash == current_hash:
        logger.warn("Null hash returned!")

    if store_traces:
        shutil.copyfile(trace_out, trace_dir + "/trace_%s_%s.txt" % (os.path.basename(payload_file),current_hash))

    q.shutdown()
    return 0
Example #16
0
def debug_execution(config, execs, qemu_verbose=False, notifiers=True):
    logger.info("Starting debug execution...(%d rounds)" % execs)

    payload_file = config.input
    resume = config.resume
    null_hash = ExecutionResult.get_null_hash(config.bitmap_size)

    q = qemu(1337, config, debug_mode=True, notifiers=notifiers, resume=resume)
    assert q.start(), "Failed to start Qemu?"

    payload = read_binary_file(payload_file)
    payload_limit = q.get_payload_limit()

    if len(payload) > payload_limit:
        payload = payload[:payload_limit]

    start = time.time()
    for i in range(execs):
        logger.info("Launching payload %d/%d.." % (i+1,execs))
        if i % 3 == 0:
            q.set_payload(payload)
        # time.sleep(0.01 * rand.int(0, 9))
        # a = str(q.send_payload())
        # hexdump(a)
        result = q.send_payload()

        current_hash = result.hash()
        logger.info("Feedback Hash: " + current_hash)
        if null_hash == current_hash:
            logger.warn("Null hash returned!")

        if result.is_crash():
            q.reload()

    q.shutdown()
    end = time.time()
    logger.info("Performance: " + str(execs / (end - start)) + "t/s")

    return 0
Example #17
0
def verify_dbg(config, qemu_verbose=False):
    global thread_done

    logger.info("Starting...")

    rq_state = RedqueenState()
    workdir = RedqueenWorkdir(1337)

    if os.path.exists("patches"):
        with open("patches", "r") as f:
            for x in f.readlines():
                rq_state.add_candidate_hash_addr(int(x, 16))
    if not rq_state.get_candidate_hash_addrs():
        logger.warn("No patches configured\nMaybe add ./patches with addresses to patch.")
    else:
        logger.info("OK: got patches %s\n", rq_state.get_candidate_hash_addrs())
    q = qemu(1337, config, debug_mode=True)

    logger.info("using qemu command:\n%s\n" % q.cmd)

    q.start()

    orig_input = read_binary_file(config.input)
    q.set_payload(orig_input)

    # result = q.send_payload()

    with open(q.redqueen_workdir.whitelist(), "w") as w:
        with open(q.redqueen_workdir.patches(), "w") as p:
            for addr in rq_state.get_candidate_hash_addrs():
                addr = hex(addr).rstrip("L").lstrip("0x") + "\n"
                w.write(addr)
                p.write(addr)

    logger.info("RUN WITH PATCHING:")
    bmp1 = q.send_payload(apply_patches=True)

    logger.info("\nNOT PATCHING:")
    bmp2 = q.send_payload(apply_patches=False)

    if bmp1 == bmp2:
        logger.warn("Patches don't seem to change anything, are checksums present?")
    else:
        logger.info("OK: bitmaps are distinct")

    q.soft_reload()

    hash = HashFixer(q, rq_state)

    logger.info("fixing hashes")
    fixed_payload = hash.try_fix_data(orig_input)
    if fixed_payload:

        logger.info("%s\n", repr("".join(map(chr, fixed_payload))))

        q.set_payload(fixed_payload)

        bmp3 = q.send_payload(apply_patches=False)

        if bmp1 == bmp3:
            logger.info("CONGRATZ, BITMAPS ARE THE SAME, all cmps fixed\n")
        else:
            logger.warn("After fixing cmps, bitmaps differ\n")
    else:
        logger.error("couldn't fix payload\n")

    start = time.time()
    return 0
Example #18
0
def debug_non_det(config, max_execs=0):
    logger.info("Starting non-deterministic...")

    delay = 0
    payload_file = config.input
    resume = config.resume
    null_hash = ExecutionResult.get_null_hash(config.bitmap_size)

    assert os.path.isfile(payload_file), "Provided -input argument must be a file."
    assert "ip0" in config, "Must set -ip0 range in order to obtain PT traces."
    payload = read_binary_file(payload_file)

    q = qemu(1337, config, debug_mode=False, resume=resume)
    assert q.start(), "Failed to launch Qemu."

    q.set_timeout(0)

    store_traces = config.trace
    if store_traces:
        trace_out = config.work_dir + "/redqueen_workdir_1337/pt_trace_results.txt"
        trace_dir  = config.work_dir + "/noise/"
        os.makedirs(trace_dir, exist_ok=True)

    payload_limit = q.get_payload_limit()

    if len(payload) > payload_limit:
        payload = payload[:payload_limit]

    hash_value = None
    first_hash = None
    hashes = dict()
    try:
        q.set_payload(payload)

        ## XXX first run has different trace?!
        #if store_traces: 
        #    exec_res = q.execute_in_trace_mode()
        #else:
        #    exec_res = q.send_payload()

        time.sleep(delay)

        if store_traces: 
            exec_res = q.execute_in_trace_mode()
        else:
            exec_res = q.send_payload()

        first_hash = exec_res.hash()
        hashes[first_hash] = 1

        logger.info("Null Hash:  " + null_hash)
        logger.info("First Hash: " + first_hash)

        if store_traces:
            shutil.copyfile(trace_out, trace_dir + "/trace_%s_%s.txt" % (os.path.basename(payload_file),first_hash))

        total = 0
        iterations = 1
        hash_mismatch = 0
        time.sleep(delay)
        while max_execs == 0 or iterations <= max_execs:
            start = time.time()
            execs = 0
            while (time.time() - start < REFRESH):
                # restart Qemu every time?
                #q.async_exit()
                #q = qemu(0, config, debug_mode=False, resume=resume)
                #assert q.start(), "Failed to launch Qemu."
                q.set_payload(payload)
                time.sleep(delay)
                if store_traces: 
                    exec_res = q.execute_in_trace_mode()
                else:
                    exec_res = q.send_payload()

                if exec_res.is_crash():
                    logger.info("\nExit reason `%s` - restarting..." % exec_res.exit_reason)
                    q.reload()

                time.sleep(delay)
                hash_value = exec_res.hash()
                if hash_value in hashes:
                    hashes[hash_value] = hashes[hash_value] + 1
                else:
                    hashes[hash_value] = 1
                    if store_traces:
                        shutil.copyfile(trace_out, trace_dir + "/trace_%s_%s.txt" % (os.path.basename(payload_file), hash_value))
                if hash_value != first_hash:
                    hash_mismatch += 1
                execs += 1
            runtime = time.time() - start
            total += runtime
            iterations += execs
            noise = hash_mismatch*100/iterations
            code = color.FAIL if (len(hashes) != 1) else color.OKGREEN
            print(color.FLUSH_LINE +
                    "Perf: %7.2f execs/s, Execs: %7d, Mismatches: %s %4d %s, Noise %3d" %
                    (execs / runtime, iterations, code, hash_mismatch, color.ENDC, noise), end='\r')

    except Exception as e:
        logger.warn(repr(e))
    except KeyboardInterrupt:
        pass
    finally:
        print("\nOverall Perf: %7.2f execs/s, Execs: %7d, Mismatches: %s %4d %s, Noise %3d" %
                (iterations / total, iterations, code, hash_mismatch, color.ENDC, noise))
        q.shutdown()

    for h in hashes.keys():
        if h == first_hash:
            logger.info("* %s: %03d" % (h, hashes[h]))
        else:
            logger.info("  %s: %03d" % (h, hashes[h]))

    return 0