Пример #1
0
def kafl_workdir_iterator(work_dir):
    input_id_time = list()
    start_time = time.time()
    for stats_file in glob.glob(work_dir + "/worker_stats_*"):
        if not stats_file:
            return None
        worker_stats = msgpack.unpackb(read_binary_file(stats_file),
                                       strict_map_key=False)
        start_time = min(start_time, worker_stats['start_time'])

    # enumerate inputs from corpus/ and match against metainfo in metadata/
    # TODO: Tracing crashes/timeouts has minimal overall improvement ~1-2%
    # Probably want to make this optional, and only trace a small sample
    # of non-regular payloads by default?
    for input_file in glob.glob(work_dir + "/corpus/[rck]*/*"):
        if not input_file:
            return None
        input_id = os.path.basename(input_file).replace("payload_", "")
        meta_file = work_dir + "/metadata/node_{}".format(input_id)
        metadata = msgpack.unpackb(read_binary_file(meta_file),
                                   strict_map_key=False)

        seconds = metadata["info"]["time"] - start_time
        nid = metadata["id"]

        input_id_time.append([input_file, nid, seconds])

    return input_id_time
Пример #2
0
    def send_next_task(self, conn):
        # Inputs placed to imports/ folder have priority.
        # This can also be used to inject additional seeds at runtime.
        imports = glob.glob(self.config.work_dir + "/imports/*")
        if imports:
            path = imports.pop()
            logger.debug("Importing payload from %s" % path)
            seed = read_binary_file(path)
            os.remove(path)
            return self.comm.send_import(conn, {
                "type": "import",
                "payload": seed
            })
        # Process items from queue..
        node = self.queue.get_next()
        if node:
            return self.comm.send_node(conn, {
                "type": "node",
                "nid": node.get_id()
            })

        # No work in queue. Tell Worker to wait a little or attempt blind fuzzing.
        # If all Workers are waiting, check if we are getting any coverage..
        self.comm.send_busy(conn)
        self.busy_events += 1
        if self.busy_events >= self.config.processes:
            self.busy_events = 0
            main_bitmap = self.bitmap_storage.get_bitmap_for_node_type(
                "regular").c_bitmap
            if mmh3.hash(main_bitmap) == self.empty_hash:
                logger.warn(
                    "Coverage bitmap is empty?! Check -ip0 or try better seeds."
                )
Пример #3
0
def perform_radamsa_round(data, func, num_inputs):
    global corpus_dir
    global input_dir
    global radamsa_path

    last_n = 10
    rand_n = 40
    files = sorted(glob.glob(corpus_dir + "/regular/payload_*"))
    samples = files[-last_n:] + random.sample(
        files[:-last_n], max(0, min(rand_n,
                                    len(files) - last_n)))

    if not samples:
        return

    radamsa_cmd = [
        radamsa_path, "-T",
        str(KAFL_MAX_FILE), "-o", input_dir + "input_%05n", "-n",
        str(num_inputs)
    ] + samples

    try:
        #logger.debug("Radamsa cmd: " + repr(radamsa_cmd))
        p = subprocess.Popen(radamsa_cmd, stdin=subprocess.PIPE, shell=False)

        while True:
            try:
                # repeatedly wait and process an item to update kAFL stats
                for path in os.listdir(input_dir):
                    #logger.debug("Radamsa input %s" % path)
                    func(read_binary_file(input_dir + path))
                    os.remove(input_dir + path)
                p.communicate(timeout=1)
                break
            except subprocess.SubprocessError as e:
                pass
    except SystemExit:
        # be sure to cleanup on kill signal
        p.terminate()

    # actual processing of generated inputs
    for path in os.listdir(input_dir):
        #logger.debug("Radamsa input %s" % path)
        func(read_binary_file(input_dir + path))
        os.remove(input_dir + path)
Пример #4
0
def benchmark(config):
    logger.info("Starting benchmark...")
    payload_file = config.input
    payload = read_binary_file(payload_file)

    q = qemu(1337, config, debug_mode=False)
    q.start()
    try:
        q.set_payload(payload)
        res = q.send_payload()

        logger.info("Payload hash: " + str(res.hash()))
        logger.info("Payload exit: " + res.exit_reason)
        logger.info("Calibrating...")

        start = time.time()
        iterations = 0
        while (time.time() - start < 1):
            q.set_payload(payload)
            q.send_payload()
            iterations += 1

        #logger.info("Calibrate to run at %d execs/s..." % iterations)
        rounds = 0
        runtime = 0
        total = 0
        while True:
            start = time.time()
            for _ in range(int(REFRESH*iterations)):
                q.set_payload(payload)
                q.send_payload()
            rounds += 1
            runtime = time.time() - start
            total += runtime
            print(color.FLUSH_LINE + "Performance: %.2f execs/s" % (iterations / runtime), end='\r')
    except Exception as e:
        logger.warn(repr(e))
    except KeyboardInterrupt:
        pass
    finally:
        print("\nPerformance Average: %.2f execs/s\n" % (rounds*iterations/total))
        q.shutdown()
    return 0
Пример #5
0
def gdb_session(config, qemu_verbose=True, notifiers=True):

    #from pprint import pprint
    payload_file = config.input
    resume = config.resume

    config.gdbserver = True
    q = qemu(1337, config, notifiers=notifiers, resume=resume)

    logger.info("Starting Qemu + GDB with payload %s" % payload_file)
    logger.info("Connect with gdb to release guest from reset (localhost:1234)")
    try:
        if q.start():
            q.set_payload(read_binary_file(payload_file))
            result = q.debug_payload()
            logger.info("Thank you for playing.")
            #pprint(result._asdict())
    finally:
        logger.info("Shutting down..")
        q.async_exit()
Пример #6
0
def execute_once(config, qemu_verbose=False, notifiers=True):
    payload_file = config.input
    resume = config.resume
    null_hash = ExecutionResult.get_null_hash(config.bitmap_size)

    logger.info("Execute payload %s.. " % payload_file)

    q = qemu(1337, config, debug_mode=False, notifiers=notifiers, resume=resume)
    assert q.start(), "Failed to start Qemu?"


    store_traces = config.trace
    if store_traces:
        trace_out = config.work_dir + "/redqueen_workdir_1337/pt_trace_results.txt"
        trace_dir  = config.work_dir + "/traces/"

    payload = read_binary_file(payload_file)

    payload_limit = q.get_payload_limit()
    if len(payload) > payload_limit:
        payload = payload[:payload_limit]

    q.set_payload(payload)
    #q.send_payload() ## XXX first run has different trace?!
    if store_traces:
        result = q.execute_in_trace_mode()
    else:
        result = q.send_payload()

    print("Exit reason: %s" % result.exit_reason)

    current_hash = result.hash()
    logger.info("Feedback Hash: " + current_hash)
    if null_hash == current_hash:
        logger.warn("Null hash returned!")

    if store_traces:
        shutil.copyfile(trace_out, trace_dir + "/trace_%s_%s.txt" % (os.path.basename(payload_file),current_hash))

    q.shutdown()
    return 0
Пример #7
0
def havoc_splicing(data, files):
    if len(data) < 2 or files is None:
        return data

    rand.shuffle(files)
    retry_limit = 64

    for file in files[:retry_limit]:
        file_data = read_binary_file(file)
        if len(file_data) < 2:
            continue

        first_diff, last_diff = find_diffs(data, file_data)
        if last_diff < 2 or first_diff == last_diff:
            continue

        split_location = first_diff + rand.int(last_diff - first_diff)
        return data[:split_location] + file_data[split_location:]

    # none of the files are suitable
    return None
Пример #8
0
def redqueen_dbg(config, qemu_verbose=False):
    global thread_done
    logger.info("Starting Redqueen debug...")

    q = qemu(1337, config, debug_mode=True)
    q.start()
    payload = read_binary_file(config.input)
    # q.set_payload(payload)

    if os.path.exists("patches"):
        shutil.copyfile("patches", "/tmp/redqueen_workdir_1337/redqueen_patches.txt")

    start = time.time()

    thread = Thread(target=lambda: redqueen_dbg_thread(q))
    thread.start()
    result = q.execute_in_redqueen_mode(payload, debug_mode=True)
    thread_done = True
    thread.join()
    requeen_print_state(q)
    end = time.time()

    if result:
        logger.info("Execution succeded!")
    else:
        logger.error("Execution failed!")

    logger.info("Time: " + str(end - start) + "t/s")

    num_muts, muts = parser.parse_rq_data(
        open("/tmp/kafl_debug_workdir/redqueen_workdir_1337/redqueen_results.txt").read(), payload)
    count = 0
    for offset in muts:
        for lhs in muts[offset]:
            for rhs in muts[offset][lhs]:
                count += 1
                logger.info(offset, lhs, rhs)
    logger.info(count)

    return 0
Пример #9
0
def debug_execution(config, execs, qemu_verbose=False, notifiers=True):
    logger.info("Starting debug execution...(%d rounds)" % execs)

    payload_file = config.input
    resume = config.resume
    null_hash = ExecutionResult.get_null_hash(config.bitmap_size)

    q = qemu(1337, config, debug_mode=True, notifiers=notifiers, resume=resume)
    assert q.start(), "Failed to start Qemu?"

    payload = read_binary_file(payload_file)
    payload_limit = q.get_payload_limit()

    if len(payload) > payload_limit:
        payload = payload[:payload_limit]

    start = time.time()
    for i in range(execs):
        logger.info("Launching payload %d/%d.." % (i+1,execs))
        if i % 3 == 0:
            q.set_payload(payload)
        # time.sleep(0.01 * rand.int(0, 9))
        # a = str(q.send_payload())
        # hexdump(a)
        result = q.send_payload()

        current_hash = result.hash()
        logger.info("Feedback Hash: " + current_hash)
        if null_hash == current_hash:
            logger.warn("Null hash returned!")

        if result.is_crash():
            q.reload()

    q.shutdown()
    end = time.time()
    logger.info("Performance: " + str(execs / (end - start)) + "t/s")

    return 0
Пример #10
0
def funky_trace_run(q, input_path, retry=1):
    validations = 12
    confirmations = 0

    payload = read_binary_file(input_path)

    hashes = dict()
    for _ in range(validations):
        res = simple_trace_run(q, payload)
        if not res:
            return None

        # skip crahses and timeouts as they tend to be slow
        if res.is_crash():
            return res

        h = res.hash()
        if h == null_hash:
            continue

        if h in hashes:
            hashes[h] += 1
        else:
            hashes[h] = 1

        # break early if we have a winner, with trace stored to temp file
        if hashes[h] >= 0.5 * validations:
            return res

    #print("Failed to get majority trace (retry=%d)\nHashes: %s\n" % (retry, str(hashes)))

    if retry > 0:
        q.restart()
        time.sleep(1)
        return funky_trace_run(q, input_path, retry=retry - 1)

    return None
Пример #11
0
    def shutdown(self):
        logger.info("%s Shutting down Qemu after %d execs.." %
                    (self, self.persistent_runs))

        if not self.process:
            # start() has never been called, all files/shm are closed.
            return 0

        # If Qemu exists, try to graciously read its I/O and SIGTERM it.
        # If still alive, attempt SIGKILL or loop-wait on kill -9.
        output = ""
        try:
            self.process.terminate()
            output = strdump(self.process.communicate(timeout=1)[0],
                             verbatim=True)
        except:
            pass

        if self.process.returncode is None:
            try:
                self.process.kill()
            except:
                pass

        logger.file_log(
            "INFO", "%s exit code: %s" % (self, str(self.process.returncode)))

        if len(output) > 0:
            header = "\n=================<%s Console Output>==================\n" % self
            footer = "====================</Console Output>======================\n"
            logger.file_log("INFO", header + output + footer)

        # on full debug, also include the serial log at point of Qemu exit
        serial_out = strdump(read_binary_file(self.serial_logfile),
                             verbatim=True)
        if len(serial_out) > 0:
            header = "\n=================<%s Serial Output>==================\n" % self
            footer = "====================</Serial Output>======================\n"
            logger.file_log("INFO", header + serial_out + footer)

        try:
            # TODO: exec_res keeps from_buffer() reference to kafl_shm
            self.kafl_shm.close()
        except BufferError as e:
            pass

        try:
            self.fs_shm.close()
        except:
            pass

        try:
            os.close(self.kafl_shm_f)
        except:
            pass

        try:
            os.close(self.fs_shm_f)
        except:
            pass

        for tmp_file in [
                self.qemu_aux_buffer_filename, self.payload_filename,
                self.control_filename, self.ijonmap_filename,
                self.bitmap_filename
        ]:
            try:
                os.remove(tmp_file)
            except:
                pass

        self.redqueen_workdir.rmtree()
        return self.process.returncode
Пример #12
0
 def get_payload(workdir, node_struct):
     return read_binary_file(
         QueueNode.__get_payload_filename(
             workdir, node_struct['info']['exit_reason'],
             node_struct['id']))
Пример #13
0
def generate_traces_worker(config, pid, work_queue):

    dump_mode = True

    def sigterm_handler(signal, frame):
        if q:
            q.async_exit()
        sys.exit(0)

    pname = mp.current_process().name
    pnum = mp.current_process()._identity[0]

    ptdump_path = config.ptdump_path

    if config.resume:
        # spawn worker in same workdir, picking up snapshot + page_cache
        config.purge = False  # not needed?
        qemu_id = int(pnum)  # get unique qemu ID != {0,1337}
    else:
        # spawn worker in separate workdir, booting a new VM state
        config.work_dir += "_%s" % pname
        config.purge = True  # not needed?
        qemu_id = 1337  # debug instance

    prepare_working_dir(config)

    work_dir = config.work_dir
    trace_dir = config.input + "/traces/"

    signal.signal(signal.SIGTERM, sigterm_handler)
    os.setpgrp()

    # FIXME: really ugly switch between -trace and -dump_pt
    if dump_mode:
        print("Tracing in '-trace' mode..")
        # new dump_pt mode - translate to edge trace in separate step
        config.trace = True
        config.trace_cb = False
    else:
        # traditional -trace mode - more noisy and no bitmap to check
        print("Tracing in legacy '-trace_cb' mode..")
        config.trace = False
        config.trace_cb = True

    q = qemu(qemu_id, config, debug_mode=False)
    if not q.start():
        logger.error("%s: Could not start Qemu. Exit." % pname)
        return None

    pbar = tqdm(total=len(work_queue),
                desc=pname,
                dynamic_ncols=True,
                smoothing=0.1,
                position=pid + 1)

    f = tempfile.NamedTemporaryFile(delete=False)
    tmpfile = f.name
    f.close()

    try:
        for input_path, dump_file, trace_file in work_queue:
            print("\nProcessing %s.." % os.path.basename(input_path))

            if dump_mode:
                # -trace mode (pt dump)
                if not os.path.exists(dump_file):
                    qemu_file = work_dir + "/pt_trace_dump_%d" % qemu_id
                    if simple_trace_run(q, read_binary_file(input_path),
                                        q.send_payload):
                        with open(qemu_file, 'rb') as f_in:
                            with lz4.LZ4FrameFile(
                                    dump_file,
                                    'wb',
                                    compression_level=lz4.
                                    COMPRESSIONLEVEL_MINHC) as f_out:
                                shutil.copyfileobj(f_in, f_out)

                if not os.path.exists(trace_file):
                    with tempfile.NamedTemporaryFile(delete=False) as pt_tmp:
                        with lz4.LZ4FrameFile(dump_file, 'rb') as pt_dump_lz4:
                            shutil.copyfileobj(pt_dump_lz4, pt_tmp)
                        pt_tmp.close()

                        cmd = [
                            ptdump_path, work_dir + "/page_cache", pt_tmp.name,
                            tmpfile
                        ]
                        for i in range(2):
                            key = "ip" + str(i)
                            if getattr(config, key, None):
                                range_a = hex(getattr(config, key)[0]).replace(
                                    "L", "")
                                range_b = hex(getattr(config, key)[1]).replace(
                                    "L", "")
                                cmd += [range_a, range_b]

                        try:
                            subprocess.run(cmd, timeout=180)
                            os.unlink(pt_tmp.name)
                        except subprocess.TimeoutExpired as e:
                            print(e)
                            os.unlink(pt_tmp.name)
                            continue

                        with open(tmpfile, 'rb') as f_in:
                            with lz4.LZ4FrameFile(
                                    trace_file,
                                    'wb',
                                    compression_level=lz4.
                                    COMPRESSIONLEVEL_MINHC) as f_out:
                                shutil.copyfileobj(f_in, f_out)

            else:
                # -trace_cb mode (libxdc callback)
                if not os.path.exists(trace_file):
                    qemu_file = work_dir + "/redqueen_workdir_%d/pt_trace_results.txt" % qemu_id
                    if simple_trace_run(q, read_binary_file(input_path),
                                        q.send_payload):
                        with open(qemu_file, 'rb') as f_in:
                            with lz4.LZ4FrameFile(
                                    trace_file,
                                    'wb',
                                    compression_level=lz4.
                                    COMPRESSIONLEVEL_MINHC) as f_out:
                                shutil.copyfileobj(f_in, f_out)
            pbar.update()
    except Exception:
        q.async_exit()
        raise
    finally:
        os.unlink(tmpfile)
    q.shutdown()
Пример #14
0
 def __read_msgpack(self, name):
     return msgpack.unpackb(read_binary_file(name), strict_map_key=False)
Пример #15
0
 def __read_payload(self, node_id, exit_reason):
     payload_file = self.workdir + "/corpus/" + exit_reason + "/payload_%05d" % node_id
     return read_binary_file(payload_file)
Пример #16
0
def debug_non_det(config, max_execs=0):
    logger.info("Starting non-deterministic...")

    delay = 0
    payload_file = config.input
    resume = config.resume
    null_hash = ExecutionResult.get_null_hash(config.bitmap_size)

    assert os.path.isfile(payload_file), "Provided -input argument must be a file."
    assert "ip0" in config, "Must set -ip0 range in order to obtain PT traces."
    payload = read_binary_file(payload_file)

    q = qemu(1337, config, debug_mode=False, resume=resume)
    assert q.start(), "Failed to launch Qemu."

    q.set_timeout(0)

    store_traces = config.trace
    if store_traces:
        trace_out = config.work_dir + "/redqueen_workdir_1337/pt_trace_results.txt"
        trace_dir  = config.work_dir + "/noise/"
        os.makedirs(trace_dir, exist_ok=True)

    payload_limit = q.get_payload_limit()

    if len(payload) > payload_limit:
        payload = payload[:payload_limit]

    hash_value = None
    first_hash = None
    hashes = dict()
    try:
        q.set_payload(payload)

        ## XXX first run has different trace?!
        #if store_traces: 
        #    exec_res = q.execute_in_trace_mode()
        #else:
        #    exec_res = q.send_payload()

        time.sleep(delay)

        if store_traces: 
            exec_res = q.execute_in_trace_mode()
        else:
            exec_res = q.send_payload()

        first_hash = exec_res.hash()
        hashes[first_hash] = 1

        logger.info("Null Hash:  " + null_hash)
        logger.info("First Hash: " + first_hash)

        if store_traces:
            shutil.copyfile(trace_out, trace_dir + "/trace_%s_%s.txt" % (os.path.basename(payload_file),first_hash))

        total = 0
        iterations = 1
        hash_mismatch = 0
        time.sleep(delay)
        while max_execs == 0 or iterations <= max_execs:
            start = time.time()
            execs = 0
            while (time.time() - start < REFRESH):
                # restart Qemu every time?
                #q.async_exit()
                #q = qemu(0, config, debug_mode=False, resume=resume)
                #assert q.start(), "Failed to launch Qemu."
                q.set_payload(payload)
                time.sleep(delay)
                if store_traces: 
                    exec_res = q.execute_in_trace_mode()
                else:
                    exec_res = q.send_payload()

                if exec_res.is_crash():
                    logger.info("\nExit reason `%s` - restarting..." % exec_res.exit_reason)
                    q.reload()

                time.sleep(delay)
                hash_value = exec_res.hash()
                if hash_value in hashes:
                    hashes[hash_value] = hashes[hash_value] + 1
                else:
                    hashes[hash_value] = 1
                    if store_traces:
                        shutil.copyfile(trace_out, trace_dir + "/trace_%s_%s.txt" % (os.path.basename(payload_file), hash_value))
                if hash_value != first_hash:
                    hash_mismatch += 1
                execs += 1
            runtime = time.time() - start
            total += runtime
            iterations += execs
            noise = hash_mismatch*100/iterations
            code = color.FAIL if (len(hashes) != 1) else color.OKGREEN
            print(color.FLUSH_LINE +
                    "Perf: %7.2f execs/s, Execs: %7d, Mismatches: %s %4d %s, Noise %3d" %
                    (execs / runtime, iterations, code, hash_mismatch, color.ENDC, noise), end='\r')

    except Exception as e:
        logger.warn(repr(e))
    except KeyboardInterrupt:
        pass
    finally:
        print("\nOverall Perf: %7.2f execs/s, Execs: %7d, Mismatches: %s %4d %s, Noise %3d" %
                (iterations / total, iterations, code, hash_mismatch, color.ENDC, noise))
        q.shutdown()

    for h in hashes.keys():
        if h == first_hash:
            logger.info("* %s: %03d" % (h, hashes[h]))
        else:
            logger.info("  %s: %03d" % (h, hashes[h]))

    return 0
Пример #17
0
 def get_metadata(workdir, node_id):
     return msgpack.unpackb(read_binary_file(
         QueueNode.__get_metadata_filename(workdir, node_id)),
                            strict_map_key=False)
Пример #18
0
def verify_dbg(config, qemu_verbose=False):
    global thread_done

    logger.info("Starting...")

    rq_state = RedqueenState()
    workdir = RedqueenWorkdir(1337)

    if os.path.exists("patches"):
        with open("patches", "r") as f:
            for x in f.readlines():
                rq_state.add_candidate_hash_addr(int(x, 16))
    if not rq_state.get_candidate_hash_addrs():
        logger.warn("No patches configured\nMaybe add ./patches with addresses to patch.")
    else:
        logger.info("OK: got patches %s\n", rq_state.get_candidate_hash_addrs())
    q = qemu(1337, config, debug_mode=True)

    logger.info("using qemu command:\n%s\n" % q.cmd)

    q.start()

    orig_input = read_binary_file(config.input)
    q.set_payload(orig_input)

    # result = q.send_payload()

    with open(q.redqueen_workdir.whitelist(), "w") as w:
        with open(q.redqueen_workdir.patches(), "w") as p:
            for addr in rq_state.get_candidate_hash_addrs():
                addr = hex(addr).rstrip("L").lstrip("0x") + "\n"
                w.write(addr)
                p.write(addr)

    logger.info("RUN WITH PATCHING:")
    bmp1 = q.send_payload(apply_patches=True)

    logger.info("\nNOT PATCHING:")
    bmp2 = q.send_payload(apply_patches=False)

    if bmp1 == bmp2:
        logger.warn("Patches don't seem to change anything, are checksums present?")
    else:
        logger.info("OK: bitmaps are distinct")

    q.soft_reload()

    hash = HashFixer(q, rq_state)

    logger.info("fixing hashes")
    fixed_payload = hash.try_fix_data(orig_input)
    if fixed_payload:

        logger.info("%s\n", repr("".join(map(chr, fixed_payload))))

        q.set_payload(fixed_payload)

        bmp3 = q.send_payload(apply_patches=False)

        if bmp1 == bmp3:
            logger.info("CONGRATZ, BITMAPS ARE THE SAME, all cmps fixed\n")
        else:
            logger.warn("After fixing cmps, bitmaps differ\n")
    else:
        logger.error("couldn't fix payload\n")

    start = time.time()
    return 0