Beispiel #1
0
    def maybe_insert_node(self, payload, bitmap_array, node_struct):
        bitmap = ExecutionResult.bitmap_from_bytearray(
            bitmap_array, node_struct["info"]["exit_reason"],
            node_struct["info"]["performance"])
        bitmap.lut_applied = True  # since we received the bitmap from Worker, the lut was already applied
        backup_data = bitmap.copy_to_array()
        should_store, new_bytes, new_bits = self.bitmap_storage.should_store_in_queue(
            bitmap)
        new_data = bitmap.copy_to_array()
        trace_dump_tmp = node_struct["info"].get("pt_dump", None)
        if should_store:
            node = QueueNode(self.config,
                             payload,
                             bitmap_array,
                             node_struct,
                             write=False)
            node.set_new_bytes(new_bytes, write=False)
            node.set_new_bits(new_bits, write=False)
            self.queue.insert_input(node, bitmap)
            self.store_trace(node, trace_dump_tmp)
            return

        if trace_dump_tmp and os.path.exists(trace_dump_tmp):
            os.remove(trace_dump_tmp)

        if self.config.debug:
            logger.debug(
                "Received duplicate payload with exit=%s, discarding." %
                node_struct["info"]["exit_reason"])
            for i in range(len(bitmap_array)):
                if backup_data[i] != new_data[i]:
                    assert (False), "Bitmap mangled at {} {} {}".format(
                        i, repr(backup_data[i]), repr(new_data[i]))
Beispiel #2
0
    def __perform_redqueen(self, payload, metadata):
        self.stage_update_label("redq_color")

        orig_hash = self.__get_bitmap_hash_robust(payload)
        extension = bytes([207, 117, 130, 107, 183, 200, 143, 154])
        appended_hash = self.__get_bitmap_hash_robust(payload + extension)

        if orig_hash and orig_hash == appended_hash:
            logger.debug("%s Redqueen: Input can be extended" % self)
            payload_array = bytearray(payload + extension)
        else:
            payload_array = bytearray(payload)

        colored_alternatives = self.__perform_coloring(payload_array)
        if colored_alternatives:
            payload_array = colored_alternatives[0]
            assert isinstance(colored_alternatives[0], bytearray), print(
                    "!! ColoredAlternatives:", repr(colored_alternatives[0]), type(colored_alternatives[0]))
        else:
            logger.debug("%s Redqueen: Input is not stable, skipping.." % self)
            return

        self.stage_update_label("redq_trace")
        rq_info = RedqueenInfoGatherer()
        rq_info.make_paths(RedqueenWorkdir(self.worker.pid, self.config))
        rq_info.verbose = False
        for pld in colored_alternatives:
            if self.execute_redqueen(pld):
                rq_info.get_info(pld)

        rq_info.get_proposals()
        self.stage_update_label("redq_mutate")
        rq_info.run_mutate_redqueen(payload_array, self.execute)
Beispiel #3
0
    def funky_validate(self, data, old_res, trace=False):
        # Validate in persistent mode with stochastic prop of funky results

        validations = 8
        confirmations = 0
        runtime_avg = 0
        num = 0
        trace_round = False

        for num in range(validations):
            stable, runtime = self.quick_validate(data,
                                                  old_res,
                                                  trace=trace_round)
            if stable:
                confirmations += 1
                runtime_avg += runtime

            if confirmations >= 0.5 * validations:
                trace_round = trace

            if confirmations >= 0.75 * validations:
                return True, runtime_avg / num

        logger.debug(
            "%s Funky input received %d/%d confirmations. Rejecting.." %
            (self, confirmations, validations))
        if self.config.debug:
            self.store_funky(data)
        return False, runtime_avg / num
Beispiel #4
0
    def send_next_task(self, conn):
        # Inputs placed to imports/ folder have priority.
        # This can also be used to inject additional seeds at runtime.
        imports = glob.glob(self.config.work_dir + "/imports/*")
        if imports:
            path = imports.pop()
            logger.debug("Importing payload from %s" % path)
            seed = read_binary_file(path)
            os.remove(path)
            return self.comm.send_import(conn, {
                "type": "import",
                "payload": seed
            })
        # Process items from queue..
        node = self.queue.get_next()
        if node:
            return self.comm.send_node(conn, {
                "type": "node",
                "nid": node.get_id()
            })

        # No work in queue. Tell Worker to wait a little or attempt blind fuzzing.
        # If all Workers are waiting, check if we are getting any coverage..
        self.comm.send_busy(conn)
        self.busy_events += 1
        if self.busy_events >= self.config.processes:
            self.busy_events = 0
            main_bitmap = self.bitmap_storage.get_bitmap_for_node_type(
                "regular").c_bitmap
            if mmh3.hash(main_bitmap) == self.empty_hash:
                logger.warn(
                    "Coverage bitmap is empty?! Check -ip0 or try better seeds."
                )
Beispiel #5
0
def worker_loader(pid, config):
    def sigterm_handler(signal, frame):
        if worker.q:
            worker.q.async_exit()
        sys.exit(0)

    logger.debug(("Worker-%02d PID: " % pid) + str(os.getpid()))
    # sys.stdout = open("worker_%d.out"%pid, "w")
    #config = FuzzerConfiguration()

    psutil.Process().cpu_affinity([pid + config.cpu_offset])

    connection = ClientConnection(pid, config)

    rand.reseed()

    worker = WorkerTask(pid, config, connection)

    signal.signal(signal.SIGTERM, sigterm_handler)
    os.setpgrp()

    try:
        worker.loop()
    except QemuIOException:
        # try to restart here, if Qemu is really dead?
        pass
    finally:
        if worker.q:
            worker.q.async_exit()
        logger.info("Worker-%02d Exit." % pid)
Beispiel #6
0
    def __qemu_connect(self):
        # Note: setblocking() disables the timeout! settimeout() will automatically set blocking!
        self.control = socket.socket(socket.AF_UNIX)
        self.control.settimeout(None)
        self.control.setblocking(1)

        # TODO: Don't try forever, set some timeout..
        while True:
            try:
                self.control.connect(self.control_filename)
                break
            except socket.error:
                if self.process.returncode is not None:
                    raise
            logger.debug("Waiting for Qemu connect..")

        self.ijon_shm_f = os.open(self.ijonmap_filename,
                                  os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.kafl_shm_f = os.open(self.bitmap_filename,
                                  os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.fs_shm_f = os.open(self.payload_filename,
                                os.O_RDWR | os.O_SYNC | os.O_CREAT)

        os.ftruncate(self.ijon_shm_f, self.ijonmap_size)
        os.ftruncate(self.kafl_shm_f, self.bitmap_size)
        os.ftruncate(self.fs_shm_f, self.payload_size)

        self.kafl_shm = mmap.mmap(self.kafl_shm_f, 0)
        self.c_bitmap = (ctypes.c_uint8 * self.bitmap_size).from_buffer(
            self.kafl_shm)
        self.fs_shm = mmap.mmap(self.fs_shm_f, 0)

        return True
Beispiel #7
0
    def handle_node(self, msg):
        meta_data = QueueNode.get_metadata(self.config.work_dir,
                                           msg["task"]["nid"])
        payload = QueueNode.get_payload(self.config.work_dir, meta_data)

        # fixme: determine globally based on all seen regulars
        t_dyn = self.t_soft + 1.2 * meta_data["info"]["performance"]
        self.q.set_timeout(min(self.t_hard, t_dyn))

        try:
            results, new_payload = self.logic.process_node(payload, meta_data)
        except QemuIOException:
            # mark node as crashing and free it before escalating
            logger.info("Qemu execution failed for node %d." % meta_data["id"])
            results = self.logic.create_update(meta_data["state"],
                                               {"crashing": True})
            self.conn.send_node_abort(meta_data["id"], results)
            raise

        if new_payload:
            default_info = {
                "method": "validate_bits",
                "parent": meta_data["id"]
            }
            if self.validate_bits(new_payload, meta_data, default_info):
                logger.debug(
                    "%s Stage %s found alternative payload for node %d" %
                    (self, meta_data["state"]["name"], meta_data["id"]))
            else:
                logger.warn(
                    "%s Provided alternative payload found invalid - bug in stage %s?"
                    % (self, meta_data["state"]["name"]))
        self.conn.send_node_done(meta_data["id"], results, new_payload)
Beispiel #8
0
    def __qemu_handshake(self):

        self.run_qemu()

        self.qemu_aux_buffer = QemuAuxBuffer(self.qemu_aux_buffer_filename)
        if not self.qemu_aux_buffer.validate_header():
            logger.error("%s Invalid header in qemu_aux_buffer.py. Abort." %
                         self)
            self.async_exit()

        while self.qemu_aux_buffer.get_state() != 3:
            logger.debug("%s Waiting for target to enter fuzz mode.." % self)
            self.run_qemu()
            result = self.qemu_aux_buffer.get_result()
            if result.exec_code == RC.ABORT:
                self.handle_habort()
            if result.exec_code == RC.HPRINTF:
                self.handle_hprintf()

        logger.debug("%s Handshake done." % self)

        # for -R = {0,1}, set reload_mode here just once
        if self.config.reload == 1:
            self.qemu_aux_buffer.set_reload_mode(True)
        else:
            self.qemu_aux_buffer.set_reload_mode(False)
        self.qemu_aux_buffer.set_timeout(self.config.timeout_hard)

        return
Beispiel #9
0
def perform_extend(payload, old_node, send_handler, max_len):

    num_findings = 0

    # Skip if payload is not starved, not regular or funky (test run yields is_new=True)
    old_res, is_new = send_handler(payload, label="stream_funky")
    if old_res.is_crash() or not old_res.is_starved() or is_new:
        return None

    padding = 128
    upper = max(0, max_len - len(payload))
    lower = 0
    for _ in range(2 * MAX_ROUNDS):
        new_res, is_new = send_handler(payload + bytes(padding),
                                       label="stream_zero")
        #logger.debug("Round: %d, lengths: %d + %d = %d, maxlen=%d, upper=%d, lower=%d" %(
        #    _, len(payload), padding, padding+len(payload), max_len, upper, lower))

        if is_new: num_findings += 1

        if new_res.is_starved():
            lower = padding
        else:
            upper = padding

        #print("stream_extend: upper=%d, lower=%d" % (upper, lower))
        padding = lower + abs(upper - lower) // 2
        if abs(upper - lower) <= 1:
            break

        if (len(payload) + padding > max_len):
            upper = max(0, max_len - len(payload))
            break

    pad_bytes = upper
    logger.debug("stream_extend: detected %d padding bytes" % (pad_bytes))

    if pad_bytes == 0:
        return None

    # run the payload with some colorized padding to potentially trigger the starved code
    for _ in range(MAX_ROUNDS):
        pad_buffer = rand.bytes(pad_bytes)
        _, is_new = send_handler(payload + pad_buffer, label="stream_color")
        if is_new: num_findings += 1

    # check if zero-padded payload is still valid, drop otherwise..
    new_res, is_new = send_handler(payload + bytes(pad_bytes),
                                   label="stream_zero")
    if is_new: num_findings += 1
    if check_trim_still_valid(old_node, old_res, new_res):
        return payload + bytes(pad_bytes)
    else:
        logger.debug(
            "stream_extend: dropped funky NUL padding (len=%d, other finds=%d)"
            % (pad_bytes, num_findings))
        return None
Beispiel #10
0
    def create_limiter_map(self, payload):
        limiter_map = bytearray([1 for _ in range(len(payload))])
        if self.config.afl_skip_range:
            for ignores in self.config.afl_skip_range:
                logger.debug("AFL ignore-range 0: " + str(ignores[0]) + " " + str(min(ignores[0], len(payload))))
                logger.debug("AFL ignore-range 1: " + str(ignores[1]) + " " + str(min(ignores[1], len(payload))))
                for i in range(min(ignores[0], len(payload)), min(ignores[1], len(payload))):
                    limiter_map[i] = 0

        return limiter_map
Beispiel #11
0
def redqueen_global_config(redq_hammering, redq_do_simple, afl_arith_max):
    global HAMMER_LEA
    global SKIP_SIMPLE
    global AFL_ARITH_MAX

    HAMMER_LEA = redq_hammering
    SKIP_SIMPLE = not redq_do_simple
    AFL_ARITH_MAX = afl_arith_max

    logger.debug("Redqueen config: hammer=%s, skip_simple=%s, arith_max=%s" % (HAMMER_LEA, SKIP_SIMPLE, AFL_ARITH_MAX))
Beispiel #12
0
def mutate_seq_radamsa_array(data, func, num_inputs):
    # avoid large amounts of temp files in radamsa (use socket I/O option?)
    max_round_inputs = 512
    rounds = math.ceil(num_inputs / max_round_inputs)

    logger.debug("Radamsa: %d inputs in %d rounds.." % (num_inputs, rounds))

    for _ in range(rounds):
        perform_radamsa_round(data, func, min(max_round_inputs, num_inputs))
        num_inputs -= max_round_inputs
Beispiel #13
0
    def handle_busy(self):
        busy_timeout = 4
        kickstart = self.config.kickstart

        if kickstart:
            logger.debug("%s No inputs in queue, attempting kickstart(%d).." %
                         (self, kickstart))
            self.q.set_timeout(self.t_hard)
            self.logic.process_kickstart(kickstart)
        else:
            logger.info("%s No inputs in queue, sleeping %ds.." %
                        (self, busy_timeout))
            time.sleep(busy_timeout)
        self.conn.send_ready()
Beispiel #14
0
    def handle_import(self, payload, metadata):
        # for funky targets, retry seed a couple times to avoid false negatives
        retries = 1
        if self.config.funky:
            retries = 8

        for _ in range(retries):
            _, is_new = self.execute(payload, label="import")
            if is_new: break

        # Inform user if seed yields no new coverage. This may happen if -ip0 is
        # wrong or the harness is buggy.
        if not is_new:
            logger.debug("%s Imported payload produced no new coverage, skipping.." % self)
Beispiel #15
0
    def handle_havoc(self, payload, metadata):
        grimoire_time = 0
        havoc_time = 0
        splice_time = 0
        radamsa_time = 0

        havoc_afl = True
        havoc_splice = True
        havoc_radamsa = self.config.radamsa
        havoc_grimoire = self.config.grimoire
        havoc_redqueen = self.config.redqueen

        for i in range(1):
            initial_findings = self.stage_info_findings

            # Dict based on RQ learned tokens
            # TODO: AFL only has deterministic dict stage for manual dictionary.
            # However RQ dict and auto-dict actually grow over time. Perhaps
            # create multiple dicts over time and store progress in metadata?
            if havoc_redqueen:
                self.__perform_rq_dict(payload, metadata)

            if havoc_grimoire:
                grimoire_start_time = time.time()
                self.__perform_grimoire(payload, metadata)
                self.grimoire_time += time.time() - grimoire_start_time

            if havoc_radamsa:
                radamsa_start_time = time.time()
                self.__perform_radamsa(payload, metadata)
                self.radamsa_time += time.time() - radamsa_start_time

            if havoc_afl:
                havoc_start_time = time.time()
                self.__perform_havoc(payload, metadata, use_splicing=False)
                self.havoc_time += time.time() - havoc_start_time

            if havoc_splice:
                splice_start_time = time.time()
                self.__perform_havoc(payload, metadata, use_splicing=True)
                self.splice_time += time.time() - splice_start_time

        logger.debug("%s HAVOC times: afl: %.1f, splice: %.1f, grim: %.1f, rdmsa: %.1f"
                  % (self, self.havoc_time, self.splice_time, self.grimoire_time, self.radamsa_time))
Beispiel #16
0
    def __perform_coloring(self, payload_array):
        logger.debug("%s Redqueen: Initial colorize..." % self)
        orig_hash = self.__get_bitmap_hash_robust(payload_array)
        if orig_hash is None:
            return None

        colored_arrays = []
        for i in range(FuzzingStateLogic.COLORIZATION_COUNT):
            if len(colored_arrays) >= FuzzingStateLogic.COLORIZATION_COUNT:
                assert False  # TODO remove me
            tmpdata = bytearray(payload_array)
            self.__colorize_payload(orig_hash, tmpdata)
            new_hash = self.__get_bitmap_hash(tmpdata)
            if new_hash is not None and new_hash == orig_hash:
                colored_arrays.append(tmpdata)
            else:
                return None

        colored_arrays.append(payload_array)
        return colored_arrays
Beispiel #17
0
 def __perform_rq_dict(self, payload_array, metadata):
     rq_dict = havoc.get_redqueen_dict()
     counter = 0
     seen_addr_to_value = havoc.get_redqueen_seen_addr_to_value()
     if len(payload_array) < 256:
         for addr in rq_dict:
             for repl in rq_dict[addr]:
                 if addr in seen_addr_to_value and (
                         len(seen_addr_to_value[addr]) > 32 or repl in seen_addr_to_value[addr]):
                     continue
                 if not addr in seen_addr_to_value:
                     seen_addr_to_value[addr] = set()
                 seen_addr_to_value[addr].add(repl)
                 logger.debug("%s RQ-Dict: attempting %s " % (self, repr(repl)))
                 for apply_dict in [havoc.dict_insert_sequence, havoc.dict_replace_sequence]:
                     for i in range(len(payload_array)-len(repl)):
                         counter += 1
                         mutated = apply_dict(payload_array, repl, i)
                         self.execute(mutated, label="redq_dict")
     logger.debug("%s RedQ-Dict: Have performed %d iters" % (self, counter))
Beispiel #18
0
    def handle_grimoire_inference(self, payload, metadata):
        grimoire_info = {}

        if not self.config.grimoire:
            return grimoire_info
        if len(metadata["new_bytes"]) <= 0 or len(payload) >= 16384:
            return grimoire_info

        self.stage_update_label("grim_infer")
        start_time = time.time()

        generalized_input = self.grimoire.generalize_input(payload, metadata)

        if generalized_input is None:
            return grimoire_info

        grimoire_info["generalized_input"] = generalized_input

        self.grimoire_inference_time = time.time() - start_time
        logger.debug("%s Grimoire generalization took %d seconds" % (self, self.grimoire_inference_time))
        logger.debug("%s Number of unique generalized inputs: %d" % (self, len(list(self.grimoire.generalized_inputs.keys()))))
        return grimoire_info
Beispiel #19
0
    def __init__(self, config):
        self.config = config
        self.comm = ServerConnection(self.config)

        self.busy_events = 0
        self.empty_hash = mmh3.hash(("\x00" * config.bitmap_size),
                                    signed=False)

        self.statistics = ManagerStatistics(config)
        self.queue = InputQueue(self.config, self.statistics)
        self.bitmap_storage = BitmapStorage(config, "main", read_only=False)

        helper_init()

        redqueen_global_config(
            redq_hammering=self.config.redqueen_hammer,
            redq_do_simple=self.config.redqueen_simple,
            afl_arith_max=self.config.afl_arith_max,
        )

        logger.debug("Starting (pid: %d)" % os.getpid())
        with open(self.config.work_dir + "/config", 'wb') as fd:
            fd.write(msgpack.packb(vars(self.config)))
Beispiel #20
0
 def loop(self):
     while True:
         for conn, msg in self.comm.wait(self.statistics.plot_thres):
             if msg["type"] == MSG_NODE_DONE:
                 # Worker execution done, update queue item + send new task
                 if msg["node_id"]:
                     self.queue.update_node_results(msg["node_id"],
                                                    msg["results"],
                                                    msg["new_payload"])
                 self.send_next_task(conn)
             elif msg["type"] == MSG_NODE_ABORT:
                 # Worker execution aborted, update queue item + DONT send new task
                 if msg["node_id"]:
                     self.queue.update_node_results(msg["node_id"],
                                                    msg["results"], None)
             elif msg["type"] == MSG_NEW_INPUT:
                 # Worker reports new interesting input
                 if self.config.debug:
                     logger.debug("Received new input (exit=%s): %s" %
                                  (msg["input"]["info"]["exit_reason"],
                                   repr(msg["input"]["payload"][:24])))
                 node_struct = {
                     "info": msg["input"]["info"],
                     "state": {
                         "name": "initial"
                     }
                 }
                 self.maybe_insert_node(msg["input"]["payload"],
                                        msg["input"]["bitmap"], node_struct)
             elif msg["type"] == MSG_READY:
                 # Initial Worker hello, send first task...
                 # logger.debug("Worker is ready..")
                 self.send_next_task(conn)
             else:
                 raise ValueError("unknown message type {}".format(msg))
         self.statistics.maybe_write_stats()
         self.check_abort_condition()
Beispiel #21
0
    def handle_initial(self, payload, metadata):
        time_initial_start = time.time()

        if self.config.trace_cb:
            self.stage_update_label("trace")
            self.worker.trace_payload(payload, metadata)

        self.stage_update_label("calibrate")
        # Update input performance using multiple randomized executions
        # Scheduler will de-prioritize execution of very slow nodes..
        num_execs = 10
        timer_start = time.time()
        havoc.mutate_seq_havoc_array(payload, self.execute, num_execs)
        timer_end = time.time()
        self.performance = (timer_end-timer_start) / num_execs

        # Trimming only for stable + non-crashing inputs
        if metadata["info"]["exit_reason"] != "regular": #  or metadata["info"]["stable"]:
            logger.debug("%s Validate: Skip trimming.." % self)
            return None

        if metadata['info']['starved']:
            return trim.perform_extend(payload, metadata, self.execute, self.worker.payload_limit)

        new_payload = trim.perform_trim(payload, metadata, self.execute)

        center_trim = True
        if center_trim:
            new_payload = trim.perform_center_trim(new_payload, metadata, self.execute)

        self.initial_time += time.time() - time_initial_start
        if new_payload == payload:
            return None
        #logger.debug("before trim:\t\t{}".format(repr(payload)), self)
        #logger.debug("after trim:\t\t{}".format(repr(new_payload)), self)
        return new_payload
Beispiel #22
0
    def execute(self, data, info, hard_timeout=False):

        if len(data) > self.payload_limit:
            data = data[:self.payload_limit]

        exec_res = self.__execute(data)
        self.statistics.event_exec(bb_cov=self.q.bb_seen)

        is_new_input = self.bitmap_storage.should_send_to_manager(
            exec_res, exec_res.exit_reason)
        crash = exec_res.is_crash()
        stable = False

        # -trace_cb causes slower execution and different bitmap computation
        # if both -trace and -trace_cb is provided, we must delay tracing to calibration stage
        trace_pt = self.config.trace and not self.config.trace_cb

        # store crashes and any validated new behavior
        # do not validate timeouts and crashes at this point as they tend to be nondeterministic
        if is_new_input:
            if not crash:
                assert exec_res.is_lut_applied()

                if self.config.funky:
                    stable, runtime = self.funky_validate(data,
                                                          exec_res,
                                                          trace=trace_pt)
                    exec_res.performance = runtime
                else:
                    stable, runtime = self.quick_validate(data,
                                                          exec_res,
                                                          trace=trace_pt)
                    exec_res.performance = (exec_res.performance + runtime) / 2

                if trace_pt and stable:
                    trace_in = "%s/pt_trace_dump_%d" % (self.config.work_dir,
                                                        self.pid)
                    if os.path.exists(trace_in):
                        with tempfile.NamedTemporaryFile(
                                delete=False,
                                dir=self.config.work_dir + "/traces") as f:
                            shutil.move(trace_in, f.name)
                            info['pt_dump'] = f.name
                if not stable:
                    # TODO: auto-throttle persistent runs based on funky rate?
                    logger.debug(
                        "%s Input validation failed! Target funky?.." % self)
                    self.statistics.event_funky()
            if exec_res.exit_reason == "timeout" and not hard_timeout:
                # re-run payload with max timeout
                # can be quite slow, so we only do this if prior run has some new edges or t_check=True.
                # t_dyn should grow over time and eventually include slower inputs up to max timeout
                maybe_new_regular = self.bitmap_storage.should_send_to_manager(
                    exec_res, "regular")
                if self.t_check or maybe_new_regular:
                    dyn_timeout = self.q.get_timeout()
                    self.q.set_timeout(self.t_hard)
                    # if still new, register the payload as regular or (true) timeout
                    exec_res, is_new = self.execute(data,
                                                    info,
                                                    hard_timeout=True)
                    self.q.set_timeout(dyn_timeout)
                    if is_new and exec_res.exit_reason != "timeout":
                        logger.debug(
                            "Timeout checker found non-timeout with runtime %f >= %f!"
                            % (exec_res.performance, dyn_timeout))
                    else:
                        # uselessly spend time validating a soft-timeout
                        # log it so user may adjust soft-timeout handling
                        self.statistics.event_reload("slow")
                    # sub-call to execute() has submitted the payload if relevant, so we can just return its result here
                    return exec_res, is_new

            if crash and self.config.log_crashes:
                self.__store_crashlogs(exec_res.exit_reason)

            if crash or stable:
                self.__send_to_manager(data, exec_res, info)

        # restart Qemu on crash
        if crash:
            self.statistics.event_reload(exec_res.exit_reason)
            self.q.reload()

        return exec_res, is_new_input
Beispiel #23
0
    def handle_deterministic(self, payload, metadata):
        if self.config.afl_dumb_mode:
            return False, {}

        skip_zero = self.config.afl_skip_zero
        arith_max = self.config.afl_arith_max
        use_effector_map = not self.config.afl_no_effector and len(payload) > 128
        limiter_map = self.create_limiter_map(payload)
        effector_map = None

        # Mutable payload allows faster bitwise manipulations
        payload_array = bytearray(payload)
        
        default_info = {"stage": "flip_1"}
        det_info = metadata.get("afl_det_info", default_info)

        # Walking bitflips
        if det_info["stage"] == "flip_1":
            bitflip.mutate_seq_walking_bits(payload_array,      self.execute, skip_null=skip_zero, effector_map=limiter_map)
            bitflip.mutate_seq_two_walking_bits(payload_array,  self.execute, skip_null=skip_zero, effector_map=limiter_map)
            bitflip.mutate_seq_four_walking_bits(payload_array, self.execute, skip_null=skip_zero, effector_map=limiter_map)

            det_info["stage"] = "flip_8"
            if self.stage_timeout_reached():
                return True, det_info

        # Walking byte sets..
        if det_info["stage"] == "flip_8":
            # Generate AFL-style effector map based on walking_bytes()
            if use_effector_map:
                logger.debug("%s Preparing effector map.." % self)
                effector_map = bytearray(limiter_map)

            bitflip.mutate_seq_walking_byte(payload_array, self.execute, skip_null=skip_zero, limiter_map=limiter_map, effector_map=effector_map)

            if use_effector_map:
                self.dilate_effector_map(effector_map, limiter_map)
            else:
                effector_map = limiter_map

            bitflip.mutate_seq_two_walking_bytes(payload_array,  self.execute, effector_map=effector_map)
            bitflip.mutate_seq_four_walking_bytes(payload_array, self.execute, effector_map=effector_map)

            det_info["stage"] = "arith"
            if effector_map:
                det_info["eff_map"] = bytearray(effector_map)
            if self.stage_timeout_reached():
                return True, det_info

        # Arithmetic mutations..
        if det_info["stage"] == "arith":
            effector_map = det_info.get("eff_map", None)
            arithmetic.mutate_seq_8_bit_arithmetic(payload_array,  self.execute, skip_null=skip_zero, effector_map=effector_map, arith_max=arith_max)
            arithmetic.mutate_seq_16_bit_arithmetic(payload_array, self.execute, skip_null=skip_zero, effector_map=effector_map, arith_max=arith_max)
            arithmetic.mutate_seq_32_bit_arithmetic(payload_array, self.execute, skip_null=skip_zero, effector_map=effector_map, arith_max=arith_max)

            det_info["stage"] = "intr"
            if self.stage_timeout_reached():
                return True, det_info

        # Interesting value mutations..
        if det_info["stage"] == "intr":
            effector_map = det_info.get("eff_map", None)
            interesting_values.mutate_seq_8_bit_interesting(payload_array, self.execute, skip_null=skip_zero, effector_map=effector_map)
            interesting_values.mutate_seq_16_bit_interesting(payload_array, self.execute, skip_null=skip_zero, effector_map=effector_map, arith_max=arith_max)
            interesting_values.mutate_seq_32_bit_interesting(payload_array, self.execute, skip_null=skip_zero, effector_map=effector_map, arith_max=arith_max)

            det_info["stage"] = "done"

        return False, det_info