示例#1
0
 def load_data(self):
     """
     Method to load an entire master state from JSON file...
     """
     with open(FuzzerConfiguration().argument_values['work_dir'] + "/mapserver.json", 'r') as infile:
         dump = json.load(infile)
         for key, value in dump.iteritems():
             if key == "hash_list" or key == "shadow_map":
                 tmp = set()
                 for e in value:
                     tmp.add(tuple(e))
                 setattr(self, key, tmp)
             elif key == "mapserver_state_obj":
                 tmp = MapserverState()
                 tmp.load_data(value)
                 setattr(self, key, tmp)
             else:
                 setattr(self, key, value)
示例#2
0
def main():
    cfg = FuzzerConfiguration(IRPT_CONFIG)
    q = qemu(0, cfg, debug_mode=0)

    if not q.start():
        return

    interface_manager.load(cfg.argument_values['interface'])
    iocode_list = list(interface_manager.get_all_codes())

    exec_res = q.send_irp(IRP(iocode_list[0], 0, 0))
    for iocode in iocode_list[1:]:
        q.reload_driver()
        exec_res2 = q.send_irp(IRP(iocode, 0, 0))
        if exec_res.copy_to_array() != exec_res2.copy_to_array():
            print("IoControlCode(%x) == IoControlCode(%x)" %
                  (iocode_list[0], iocode))
        else:
            print("IoControlCode(%x) != IoControlCode(%x)" %
                  (iocode_list[0], iocode))

    q.shutdown()
示例#3
0
文件: master.py 项目: buszk/Drifuzz
    def __init__(self, comm, reload=False):
        self.comm = comm
        self.kafl_state = State()
        self.payload = ""

        self.counter = 0
        self.round_counter = 0
        self.start = time.time()
        self.benchmark_time = time.time()
        self.counter_offset = 0
        self.payload_buffer = []
        self.byte_map = []
        self.stage_abortion = False
        self.abortion_counter = 0

        self.mapserver_status_pending = False

        self.skip_zero = False
        self.config = FuzzerConfiguration()
        # self.skip_zero = self.config.argument_values['s']
        # self.refresh_rate = self.config.config_values['UI_REFRESH_RATE']
        # self.use_effector_map = self.config.argument_values['d']
        # self.arith_max = FuzzerConfiguration().config_values["ARITHMETIC_MAX"]
        self.use_effector_map = False
        self.arith_max = 35
        self.refresh_rate = 0.25
        if not self.config.argument_values['D']:
            self.use_effector_map = False

        # self.global_model = GlobalModel(self.config)
        self.concolic_payloads = []

        self.load_old_state = False
        if reload:
            self.load_old_state = True
            self.load_data()

        self._stop_event = threading.Event()
示例#4
0
文件: havoc.py 项目: zhang-li/kAFL
def load_dict(file_name):
    f = open(file_name)
    dict_entries = []
    for line in f:
        if not line.startswith("#"):
            try:
                dict_entries.append((line.split("=\"")[1].split("\"\n")[0]
                                     ).decode("string_escape"))
            except:
                pass
    f.close()
    return dict_entries


if FuzzerConfiguration().argument_values["I"]:
    set_dict(load_dict(FuzzerConfiguration().argument_values["I"]))
    append_handler(havoc_dict)
    append_handler(havoc_dict)

location_findings = FuzzerConfiguration(
).argument_values['work_dir'] + "/findings/"
location_corpus = FuzzerConfiguration(
).argument_values['work_dir'] + "/corpus/"


def havoc_range(perf_score):

    max_iterations = int(perf_score * 2.5)

    if max_iterations < AFL_HAVOC_MIN:
示例#5
0
    def __init__(self, level, payload, bitmap, methode, sequence=None,
                 node_state=None, node_type=None, current=False, write_data=True, performance=None, new_byte_count=0, new_bit_count=0):
        global KaflNodeID, KaflCrashID, KaflKASanID, KaflTimeoutID, KaflPreliminaryID

        if methode:
            self.bb_delta = methode.bb_delta
        else:
            self.bb_delta = 0
        self.new_byte_count = new_byte_count
        self.new_bit_count = new_bit_count
        self.level = level
        self.current = current
        self.performance = performance

        self.fav_bits = 0

        if node_state:
            self.node_state = node_state
        else:
            self.node_state = KaflNodeState.untouched
        if node_type:
            self.node_type = node_type
        else:
            self.node_type = KaflNodeType.regular

        if self.node_type == KaflNodeType.regular or self.node_type == KaflNodeType.favorite:
            self.node_id = KaflNodeID
            KaflNodeID += 1
        elif self.node_type == KaflNodeType.crash:
            self.node_id = KaflCrashID
            KaflCrashID += 1
        elif self.node_type == KaflNodeType.kasan:
            self.node_id = KaflKASanID
            KaflKASanID += 1
        elif self.node_type == KaflNodeType.timeout:
            self.node_id = KaflTimeoutID
            KaflTimeoutID += 1
        elif self.node_type == KaflNodeType.preliminary:
            self.node_id = KaflPreliminaryID
            KaflPreliminaryID += 1
            
        if methode:
            if self.node_type == KaflNodeType.preliminary:
                methode.save_to_file(FuzzerConfiguration().argument_values['work_dir'], self.node_id, preliminary=True)
            elif self.node_type == KaflNodeType.regular or self.node_type == KaflNodeType.favorite:
                methode.save_to_file(FuzzerConfiguration().argument_values['work_dir'], self.node_id, preliminary=False)
            else:
                pass

        self.payload_hash = mmh3.hash(payload)

        self.bits = {}
        if write_data:
            self.__save_payload(payload)
            self.__write_eval_results()
            self.__process_bitmap(bitmap)
            if sequence:
                self.__save_payload_sequence(sequence)

        FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
        self.identifier = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or '.') for x in payload])
        if len(self.identifier) > 10:
            self.identifier = self.identifier[:10] + " (...)"
        self.payload_len = len(payload)


        if performance is None:
            self.performance = 0.0
            self.fav_factor = 10000000
        else:
            self.fav_factor = self.performance * self.payload_len
示例#6
0
文件: node.py 项目: vient/kAFL
 def __get_payload_filename(exit_reason, id):
     workdir = FuzzerConfiguration().argument_values['work_dir']
     filename = "/corpus/%s/payload_%05d" % (exit_reason, id)
     return workdir + filename
示例#7
0
文件: node.py 项目: vient/kAFL
 def __get_metadata_filename(id):
     workdir = FuzzerConfiguration().argument_values['work_dir']
     return workdir + "/metadata/node_%05d" % id
示例#8
0
    return max_iterations


def execute(cmd):
    logger("CMD: " + cmd)
    proc = subprocess.Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=None,
                            shell=True)
    return proc


# proc.wait()

location_corpus = FuzzerConfiguration(
).argument_values['work_dir'] + "/corpus/"


def mutate_seq_radamsa_array(data, func, default_info, max_iterations):
    logger("FILES: " + str(len(os.listdir(location_corpus))))
    default_info["method"] = "radamsa"
    files = sorted(glob.glob(location_corpus + "/*/payload_*"))
    last_n = 5
    rand_n = 5
    samples = files[-last_n:] + random.sample(
        files[:-last_n], max(0, min(rand_n,
                                    len(files) - last_n)))
    try:
        if samples:
            proc = execute("./fuzzer/technique/radamsa -o :21337 -n inf " +
                           " ".join(samples))
示例#9
0
文件: node.py 项目: vient/kAFL
 def __get_bitmap_filename(self):
     workdir = FuzzerConfiguration().argument_values['work_dir']
     filename = "/bitmaps/payload_%05d.lz4" % (self.get_id())
     return workdir + filename
示例#10
0
    print "bitmap:", len(bitmap), pop
    print "crash %d, timeout %d, kasan %d" % (q.crashed, q.timeout, q.kasan)
    print
    sys.stdout.flush()
    time.sleep(0.2)


def testFile(fn):
    print fn
    dat = file(fn, 'rb').read()
    return test(dat)


# -----------

config = FuzzerConfiguration()
enable_logging()

start = time.time()
q = qemu(0, config)
q.start()
timediff("start qemu", start)

testFile('tests/ok')
testFile('tests/ok')
testFile('tests/panic')
q.soft_reload()
testFile('tests/ok')
testFile('tests/exit')
q.soft_reload()
testFile('tests/timeout')
示例#11
0
文件: update.py 项目: buszk/Drifuzz
 def __init__(self, comm, use_ui):
     self.comm = comm
     self.config = FuzzerConfiguration()
     self.timeout = self.config.config_values['UI_REFRESH_RATE']
     self.use_ui = use_ui
示例#12
0
    def __init__(self, seed, enable_graphviz=False, flush=True):
        global KaflNodeID

        self.fuzz_yield = fuzz_yield()

        self.qemu_lookup = QemuLookupSet()

        self.level = 0
        self.max_level = 0
        self.cycles = -1
        self.all_nodes = []
        self.references = {}
        self.current = self.MASTER_NODE_ID
        self.random_shuffled = False

        self.favorite_buf = []
        self.favorite_unfinished_buf = []
        self.regular_buf = []
        self.regular_unfinished_buf = []
        self.finished_buf = []

        self.bitmap_size = FuzzerConfiguration().config_values['BITMAP_SHM_SIZE']
        self.depth_search_first = FuzzerConfiguration().config_values["DEPTH-FIRST-SEARCH"]

        self.buckets = [0x0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80]

        self.fav_bitmap = []
        self.fav_bitmap_updated = False

        if FuzzerConfiguration().config_values['MAX_MIN_BUCKETS']:
            self.max_bucket_values = []
            self.backup_max_bucket_values = []
            self.max_bucket_ref = []
            self.next_max_bucket = []
            self.old_pending_node = None
            self.max_min_bucketing_enabled = True
        else:
            self.max_min_bucketing_enabled = False


        self.preliminary_mode = False
        self.preliminary_mode_queue = []

        self.bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.crash_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/crash_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.kasan_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/kasan_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.timeout_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/timeout_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.backup_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/backup_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.backup_crash_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/backup_crash_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.backup_kasan_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/backup_kasan_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.backup_timeout_bitmap_fd = os.open(FuzzerConfiguration().argument_values['work_dir'] + "/bitmaps/backup_timeout_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)

        os.ftruncate(self.bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.crash_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.kasan_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.timeout_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.backup_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.backup_crash_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.backup_kasan_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.backup_timeout_bitmap_fd, FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])

        self.bitmap = mmap.mmap(self.bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.crash_bitmap = mmap.mmap(self.crash_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.kasan_bitmap = mmap.mmap(self.kasan_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.timeout_bitmap = mmap.mmap(self.timeout_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.backup_bitmap = mmap.mmap(self.backup_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.backup_crash_bitmap = mmap.mmap(self.backup_crash_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.backup_kasan_bitmap = mmap.mmap(self.backup_kasan_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.backup_timeout_bitmap = mmap.mmap(self.backup_timeout_bitmap_fd, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)

        if flush:
            for i in range(self.bitmap_size):
                self.bitmap[i] = '\x00'
                self.crash_bitmap[i] = '\x00'
                self.kasan_bitmap[i] = '\x00'
                self.timeout_bitmap[i] = '\x00'
        for i in range(self.bitmap_size):
            self.fav_bitmap.append(None)

        if self.max_min_bucketing_enabled:
            for i in range(self.bitmap_size):
                self.max_bucket_values.append(None)
                self.backup_max_bucket_values.append(None)
                self.max_bucket_ref.append(None)

        self.c_bitmap = (c_uint8 * self.bitmap_size).from_buffer(self.bitmap)
        self.c_crash_bitmap = (c_uint8 * self.bitmap_size).from_buffer(self.crash_bitmap)
        self.c_kasan_bitmap = (c_uint8 * self.bitmap_size).from_buffer(self.kasan_bitmap)
        self.c_timeout_bitmap = (c_uint8 * self.bitmap_size).from_buffer(self.timeout_bitmap)

        self.graph = KaflGraph([], enabled=enable_graphviz)
        self.favorites = 0
        self.favorites_in_progress = 0
        self.favorites_finished = 0
        self.paths = 0
        self.paths = len(seed)
        self.paths_in_progress = 0
        self.paths_finished = 0

        self.score_changed = False

        self.payload_hashes = {}

        self.bitmap_native_so = CDLL(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/native/bitmap.so')

        for payload, bitmap in seed:
            node = KaflNode(self.level, payload, bitmap, None, node_type=KaflNodeType.favorite)
            self.__append_to_level(node)
            self.favorites += 1

        if self.all_nodes:
            self.current = 0 
        self.__restore_state()
        self.__restore_graph()

        self.sort_default = True
        self.ignore_bit_counts = False
示例#13
0
 def __init__(self, comm):
     self.comm = comm
     self.config = FuzzerConfiguration()
     self.global_model = GlobalModel(self.config)
示例#14
0
 def __write_eval_results(self):
     with open(FuzzerConfiguration().argument_values['work_dir']+"/evaluation/findings.csv",'ab') as f:
         f.write("%s\n"%json.dumps([time.time()-GlobalState().values["inittime"], self.__get_filename()] ))
示例#15
0
    def __check_hash(self, new_hash, bitmap, payload, crash, timeout, kasan,
                     slave_id, reloaded, performance, qid, pos, methode):
        self.ring_buffers[slave_id].append(str(payload))

        if self.preliminary_mode:
            hash_was_new = True

        else:
            hash_was_new = False
            if new_hash != self.last_hash:
                if len(self.hash_list) == 0:
                    hash_was_new = True
                if new_hash not in self.hash_list and new_hash not in self.shadow_map:
                    hash_was_new = True

        if crash or kasan or timeout:
            if crash:
                state_str = "crash"
                node_type = KaflNodeType.crash
            elif kasan:
                state_str = "kasan"
                node_type = KaflNodeType.kasan
            elif timeout:
                state_str = "timeout"
                node_type = KaflNodeType.timeout

            if self.treemap.append(payload,
                                   bitmap,
                                   methode,
                                   node_type=node_type):
                if not self.preliminary_mode:
                    log_mapserver("Unique " + state_str +
                                  " submited by slave #" + str(slave_id) +
                                  " ...")
                    self.__save_ring_buffer(
                        slave_id, self.config.argument_values['work_dir'] +
                        "/rbuf/" + state_str + "_" +
                        str(self.state[state_str + "_unique"]) + ".rbuf")
                    self.state[state_str] += 1
                    self.state[state_str + "_unique"] += 1
                else:
                    self.state["preliminary"] += 1
                    log_mapserver("Unique " + state_str +
                                  " submited by slave #" + str(slave_id) +
                                  " [preliminary]...")
            else:
                if not self.preliminary_mode:
                    self.state[state_str] += 1
                    path = FuzzerConfiguration().argument_values[
                        'work_dir'] + "/findings/non_uniq/" + state_str + "_non_uniq_" + str(
                            self.state[state_str])
                    with open(path, "w") as f:
                        f.write(payload)
                    with open(
                            FuzzerConfiguration().argument_values['work_dir'] +
                            "/evaluation/findings.csv", 'ab') as f:
                        f.write("%s\n" % json.dumps([
                            time.time() - GlobalState().values["inittime"],
                            path
                        ]))

        elif hash_was_new:
            if self.treemap.append(payload,
                                   bitmap,
                                   methode,
                                   performance=performance):
                if not self.preliminary_mode:
                    if methode.get_type() == METHODE_IMPORT:
                        self.state["imports"] += 1
                    self.hash_list.add(new_hash)
                    self.new_findings += 1
                    self.state["last_hash_time"] = time.time()
                    self.__update_state()
                else:
                    self.state["preliminary"] += 1
            else:
                if not self.preliminary_mode:
                    self.shadow_map.add(new_hash)

        if reloaded:
            self.ring_buffers[slave_id].clear()
示例#16
0
文件: tree.py 项目: buszk/Drifuzz
    def __init__(self, seed, enable_graphviz=False, flush=True):
        global KaflNodeID

        self.level = 0
        self.max_level = 0
        self.cycles = -1
        self.all_nodes = []
        self.references = {}
        self.current = self.MASTER_NODE_ID
        self.random_shuffled = False

        self.favorite_buf = []
        self.favorite_unfinished_buf = []
        self.regular_buf = []
        self.regular_unfinished_buf = []
        self.finished_buf = []

        self.bitmap_size = FuzzerConfiguration(
        ).config_values['BITMAP_SHM_SIZE']

        self.buckets = [0x0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80]

        self.fav_bitmap = []
        self.fav_bitmap_updated = False

        self.bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] + "/bitmap",
            os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.crash_bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] +
            "/crash_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.kasan_bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] +
            "/kasan_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.timeout_bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] +
            "/timeout_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)

        os.ftruncate(self.bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.crash_bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.kasan_bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.timeout_bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])

        self.bitmap = mmap.mmap(self.bitmap_fd, self.bitmap_size,
                                mmap.MAP_SHARED,
                                mmap.PROT_WRITE | mmap.PROT_READ)
        self.crash_bitmap = mmap.mmap(self.crash_bitmap_fd, self.bitmap_size,
                                      mmap.MAP_SHARED,
                                      mmap.PROT_WRITE | mmap.PROT_READ)
        self.kasan_bitmap = mmap.mmap(self.kasan_bitmap_fd, self.bitmap_size,
                                      mmap.MAP_SHARED,
                                      mmap.PROT_WRITE | mmap.PROT_READ)
        self.timeout_bitmap = mmap.mmap(self.timeout_bitmap_fd,
                                        self.bitmap_size, mmap.MAP_SHARED,
                                        mmap.PROT_WRITE | mmap.PROT_READ)

        if flush:
            for i in range(self.bitmap_size):
                self.bitmap[i] = 0
                self.crash_bitmap[i] = 0
                self.kasan_bitmap[i] = 0
                self.timeout_bitmap[i] = 0
        for i in range(self.bitmap_size):
            self.fav_bitmap.append(None)

        self.graph = KaflGraph([], enabled=enable_graphviz)
        self.favorites = 0
        self.favorites_in_progress = 0
        self.favorites_finished = 0
        self.paths = 0
        self.paths = len(seed)
        self.paths_in_progress = 0
        self.paths_finished = 0

        self.score_changed = False

        self.payload_hashes = {}

        for payload, bitmap in seed:
            node = KaflNode(self.level,
                            payload,
                            bitmap,
                            node_type=KaflNodeType.favorite)
            self.__append_to_level(node)
            self.favorites += 1

        if self.all_nodes:
            self.current = 0  #self.__get_ref(self.all_nodes[0])
        self.__restore_state()
        self.__restore_graph()
示例#17
0
def start():
    config = FuzzerConfiguration()

    if not post_self_check(config):
        return -1

    if config.argument_values['v']:
        enable_logging()

    num_processes = config.argument_values['p']

    if config.argument_values['Purge'] and check_if_old_state_exits(
            config.argument_values['work_dir']):
        print_warning("Old workspace found!")
        if ask_for_permission("PURGE", " to wipe old workspace:"):
            print_warning("Wiping old workspace...")
            prepare_working_dir(config.argument_values['work_dir'], purge=True)
            time.sleep(2)
        else:
            print_fail("Aborting...")
            return 0

    if not check_if_old_state_exits(config.argument_values['work_dir']):
        if not prepare_working_dir(config.argument_values['work_dir'],
                                   purge=True):
            print_fail("Working directory is weired or corrupted...")
            return 1
        if not copy_seed_files(config.argument_values['work_dir'],
                               config.argument_values['seed_dir']):
            print_fail("Seed directory is empty...")
            return 1
        config.save_data()
    else:
        log_core("Old state exist -> loading...")
        config.load_data()

    comm = Communicator(num_processes=num_processes,
                        tasks_per_requests=config.argument_values['t'],
                        bitmap_size=config.config_values["BITMAP_SHM_SIZE"])
    comm.create_shm()

    qlookup = QemuLookupSet()

    master = MasterProcess(comm)

    update_process = multiprocessing.Process(name='UPDATE',
                                             target=update_loader,
                                             args=(comm, ))
    mapserver_process = multiprocessing.Process(name='MAPSERVER',
                                                target=mapserver_loader,
                                                args=(comm, ))

    slaves = []
    for i in range(num_processes):
        slaves.append(
            multiprocessing.Process(name='SLAVE' + str(i),
                                    target=slave_loader,
                                    args=(comm, i)))
        slaves[i].start()

    update_process.start()
    mapserver_process.start()

    try:
        master.loop()
    except KeyboardInterrupt:
        master.save_data()
        log_core("Date saved!")

    signal.signal(signal.SIGINT, signal.SIG_IGN)

    counter = 0
    print_pre_exit_msg(counter, clrscr=True)
    for slave in slaves:
        while True:
            counter += 1
            print_pre_exit_msg(counter)
            slave.join(timeout=0.25)
            if not slave.is_alive():
                break
    print_exit_msg()
    return 0
示例#18
0
 def save_to_file(self, label):
     workdir = FuzzerConfiguration().argument_values['work_dir']
     filename = "/corpus/%s/payload_%05d" % (label, Program.PayloadCount)
     atomic_write(workdir + filename, self.serialize())
     Program.PayloadCount += 1
示例#19
0
def main():
    signal.signal(signal.SIGUSR1, handle_pdb)
    print(os.getpid())
    time.sleep(1)

    config = FuzzerConfiguration()
    num_processes = config.argument_values['p']
    num_concolic = config.argument_values['concolic']
    reload = False

    if config.argument_values['Purge'] and check_if_old_state_exits(
            config.argument_values['work_dir']):
        print_warning("Old workspace found!")
        print_warning("Wiping old workspace...")
        prepare_working_dir(config.argument_values['work_dir'],
                            purge=config.argument_values['Purge'])
        time.sleep(2)

    if not check_if_old_state_exits(config.argument_values['work_dir']):
        if not prepare_working_dir(config.argument_values['work_dir'],
                                   purge=config.argument_values['Purge']):
            print_fail("Working directory is weired or corrupted...")
            return 1
        if not copy_seed_files(config.argument_values['work_dir'],
                               config.argument_values['seed_dir']):
            print_fail("Seed directory is empty...")
            return 1
        config.save_data()
    else:
        log_core("Old state exist -> loading...")
        config.load_data()
        reload = True

    DO_USE_UI = (USE_UI and not config.argument_values['verbose']
                 and config.argument_values['f'])
    comm = Communicator(num_processes=num_processes,
                        concolic_thread=num_concolic)
    master = MasterProcess(comm, reload=reload)
    mapserver_process = multiprocessing.Process(name='MAPSERVER',
                                                target=mapserver_loader,
                                                args=(comm, reload))
    modelserver_process = multiprocessing.Process(name='MODELSERVER',
                                                  target=modelserver_loader,
                                                  args=(comm, ))
    update_process = multiprocessing.Process(name='UPDATE',
                                             target=update_loader,
                                             args=(comm, DO_USE_UI))

    slaves = []
    for i in range(num_processes):
        slave = SlaveThread(comm, i, reload=reload)
        slaves.append(slave)
    concolic_models = []
    for i in range(num_concolic):
        controller = ConcolicController(comm, num_processes, i)
        slaves.append(controller)
        concolic_models.append(controller.model)

    concserv = ConcolicServerThread(comm, num_processes, num_concolic,
                                    concolic_models)

    comm.start()
    comm.create_shm()

    update_process.start()
    time.sleep(.1)

    mapserver_process.start()
    modelserver_process.start()
    concserv.start()

    for slave in slaves:
        slave.start()

    # print('Starting master loop')
    try:
        master.loop()
    except KeyboardInterrupt:
        master.stop()
        print('Saving data')
        # Wait for child processes to properly exit
        mapserver_process.join()
        update_process.join()
        concserv.stop()

        # Properly stop threads
        for slave in slaves:
            slave.stop()
        time.sleep(1)
        # Stop communicator last because Queues may be in used
        comm.stop()
        master.save_data()
        print('Data saved')
示例#20
0
    def append(self, payload, bitmap, methode, node_state=None, node_type=None, performance=0.0):
        accepted = False

        new_byte_count = 0
        new_bit_count = 0

        if self.preliminary_mode:
            found = False
            if node_type >= KaflNodeType.crash:
                if node_type == KaflNodeType.crash:
                    accepted = self.__is_unique_crash(bitmap)
                elif node_type == KaflNodeType.kasan:
                    accepted = self.__is_unique_kasan(bitmap)
                elif node_type == KaflNodeType.timeout:
                    accepted = self.__is_unique_timeout(bitmap)
            else:
                new_byte_count, new_bit_count = self.__are_new_bits_present(bitmap)
                if new_bit_count != 0 and not self.__check_if_duplicate(payload):
                    accepted = True

            if accepted:
                log_tree("new preliminary input found: " + repr(payload[:32]))
                new_node = KaflNode((self.level + 1), payload, bitmap, methode, node_state=KaflNodeState.untouched, node_type=KaflNodeType.preliminary, performance=performance)
                self.preliminary_mode_queue.append(new_node)
                self.graph.append(self.__get_from_ref(self.current), new_node)
                self.draw()
                return True
            return False

        if node_type:
            if node_type >= KaflNodeType.crash:
                if node_type == KaflNodeType.crash:
                    accepted = self.__is_unique_crash(bitmap)
                elif node_type == KaflNodeType.kasan:
                    accepted = self.__is_unique_kasan(bitmap)
                elif node_type == KaflNodeType.timeout:
                    accepted = self.__is_unique_timeout(bitmap)
                if not accepted:
                    return False

        if not accepted:
            new_byte_count, new_bit_count = self.__are_new_bits_present(bitmap)
            found = (new_bit_count != 0)
            if self.__check_if_duplicate(payload):
                return False
            if found:
                if not node_type >=KaflNodeType.crash:
                    self.paths += 1
                accepted = True

        if accepted:
            if self.__check_if_duplicate(payload):
                return False

            log_tree("new input found: " + repr(payload[:32]))
            new_node = KaflNode((self.level + 1), payload, bitmap, methode, node_state=node_state, node_type=node_type, performance=performance, new_byte_count=new_byte_count, new_bit_count=new_bit_count)
            if methode:
                self.fuzz_yield.append_result(methode)
                self.fuzz_yield.write_result(FuzzerConfiguration().argument_values['work_dir'] + "/yield.txt")
            self.__append_to_level(new_node)
            self.graph.append(self.__get_from_ref(self.current), new_node)
            if not node_type >=KaflNodeType.crash:
                self.__check_if_favorite(new_node)
                self.__is_favorite(new_node)
                if self.max_min_bucketing_enabled and self.old_pending_node:
                    self.__is_favorite(self.old_pending_node)
                    self.old_pending_node = None
            self.draw()
            return True
        else:
            return False