def fetch_payload(self): if self.stopped(): return None if not self.vm_ready: self.vm_ready = True send_msg(KAFL_TAG_START, self.q.qemu_id, self.comm.to_master_queue, source=self.slave_id) if self.reproduce and self.reproduce != "": with open(self.reproduce, 'rb') as infile: return infile.read() while not self.stopped(): if self.payload_sem.acquire(timeout=0.1): break else: return None payload = self.payload # print(len(payload)) assert (self.state != SlaveState.WAITING) self.start_time = time.time() log_slave("fetch_payload", self.slave_id) return payload
def __add_new_hash(self, new_hash, bitmap, payload, performance): """ try: self.q.enable_sampling_mode() for i in range(5): self.q.set_payload(payload) new_bitmap = self.q.send_payload() if new_bitmap == self.q.send_payload(): self.q.submit_sampling_run() break self.q.submit_sampling_run() self.q.disable_sampling_mode() accepted = self.treemap.append(payload, new_bitmap, performance=performance) except: self.__restart_vm() self.q.disable_sampling_mode() accepted = self.treemap.append(payload, bitmap, performance=performance) """ accepted = self.treemap.append(payload, bitmap, performance=performance) if accepted: self.hash_list.add(new_hash) send_msg(DRIFUZZ_NEW_INPUT, payload, self.comm.to_concolicserver_queue) else: self.shadow_map.add(new_hash) return accepted
def __effector_sync_handler(self): log_mapserver("__effector_sync_handler: " + str(self.round_counter_effector_sync) + " / " + str(self.round_counter)) if (self.round_counter_effector_sync == self.round_counter) or self.abortion_alredy_sent: send_msg(KAFL_TAG_GET_EFFECTOR, self.effector_map, self.comm.to_master_from_mapserver_queue) return True return False
def __pre_sync_handler(self): log_mapserver("__pre_sync_handler: " + str(self.round_counter) + " / " + str(self.round_counter_master_pre)) if (self.round_counter_master_pre == self.round_counter ): # or self.abortion_alredy_sent: send_msg(KAFL_TAG_UNTOUCHED_NODES, self.treemap.get_num_of_untouched_nodes(), self.comm.to_master_from_mapserver_queue) return True return False
def req_dma_idx(self, key, size, cnt): send_msg(DRIFUZZ_REQ_DMA_IDX, (key, size, cnt), \ self.comm.to_modelserver_queue, source=self.slave_id) # response = recv_tagged_msg(self.comm.to_slave_queues[self.slave_id], DRIFUZZ_REQ_READ_IDX) # print("requesting") if self.idx_sem.acquire(timeout=5): # print("requested") return self.idx else: log_concolicserver('Req dma index: timeout') # self.stop() return 0
def loop(self): while True: msg = recv_msg(self.comm.to_modelserver_queue) if msg.tag == DRIFUZZ_REQ_READ_IDX: key, _, _ = msg.data res = self.global_model.get_read_idx(*msg.data) send_msg(DRIFUZZ_REQ_READ_IDX, (key, res), self.comm.to_slave_queues[msg.source]) elif msg.tag == DRIFUZZ_REQ_DMA_IDX: key, _, _ = msg.data res = self.global_model.get_dma_idx(*msg.data) send_msg(DRIFUZZ_REQ_DMA_IDX, (key, res), self.comm.to_slave_queues[msg.source]) else: continue
def loop(self): # print('starting qemu') # self.comm.reload_semaphore.acquire() self.start_vm() # self.comm.reload_semaphore.release() # print('started qemu') send_msg(KAFL_TAG_REQ, self.q.qemu_id, self.comm.to_master_queue, source=self.slave_id) while not self.stopped(): #try: # if self.comm.slave_termination.value: # return self.interprocess_proto_handler()
def req_read_idx(self, key, size, cnt): if self.globalmodel: return self.globalmodel.get_read_idx(key, size, cnt) else: send_msg(DRIFUZZ_REQ_READ_IDX, (key, size, cnt), \ self.comm.to_modelserver_queue, source=self.slave_id) # response = recv_tagged_msg(self.comm.to_slave_queues[self.slave_id], DRIFUZZ_REQ_READ_IDX) # print("requesting") if self.idx_sem.acquire(timeout=5): # print("requested") return self.idx else: log_slave('Req read index: timeout', self.slave_id) print(key, " ", size, " ", cnt) # self.stop() return 0
def restart_vm(self, reuse=False): log_slave(f"restarting vm reuse={reuse}", self.slave_id) if not reuse and self.exit_if_reproduce(): return # Consume the idx_sem if it is released self.idx_sem.acquire(blocking=False) # Avoid blocking socket thread # let the slave thread to start vm. send_msg(DRIFUZZ_START_QEMU, None, self.comm.to_slave_queues[self.slave_id]) if self.comm.slave_termination.value: return False # Reuse self.payload if reuse: log_slave(f"release payload in restart_vm", self.slave_id) self.payload_sem.acquire(blocking=False) self.payload_sem.release() return True
def __post_sync_handler(self): if self.round_counter_master_post == self.round_counter: #or self.abortion_alredy_sent: if self.post_sync_master_tag == KAFL_TAG_NXT_UNFIN: data = self.treemap.get_next(self.performance, finished=False) else: data = self.treemap.get_next(self.performance, finished=True) self.__update_state() self.mapserver_state_obj.level = data.level + 1 state = data.node_state #data = data.load_payload() #data = state if state == KaflNodeState.in_progress or state == KaflNodeState.finished: send_msg(KAFL_TAG_NXT_UNFIN, data, self.comm.to_master_from_mapserver_queue) else: send_msg(KAFL_TAG_NXT_FIN, data, self.comm.to_master_from_mapserver_queue) #if len(self.shadow_map) > 1024: # self.shadow_map = set() return True return False
def run(self): while not self.stopped(): if self.comm.concolic_locks[self.concolic_id].acquire(timeout=0.1): break if self.stopped(): return self.model.payload = self.payload self.model.payload_len = len(self.payload) self.model.read_cnt = {} self.model.dma_cnt = {} log_concolicserver("concolic locked " + str(self.concolic_id)) fname = join(self.work_dir, f'tmp_conc_payload_{self.concolic_id}') outdir = join(self.work_dir, f'tmp_conc_out_{self.concolic_id}') drifuzz_path = dirname(dirname(dirname(realpath(__file__)))) # Preparation if os.path.exists(outdir): shutil.rmtree(outdir) with open(fname, 'wb') as f: f.write(self.payload) # Run concolic script cmd = [ 'taskset', '-c', f'{2*self.concolic_id},{2*self.concolic_id+1}', 'python3', '-u', f'{drifuzz_path}/../drifuzz-concolic/concolic.py', self.target, fname, '--outdir', outdir, '--tempdir', '--id', str(self.concolic_id), # '--pincpu', f'{2*self.concolic_id},{2*self.concolic_id+1}', '--socket', self.comm.qemu_socket_prefix + str(self.slave_id) ] p = subprocess.Popen(cmd, stdin=subprocess.DEVNULL, stdout=self.log, stderr=self.log) total_timeout = 500 while not self.stopped(): try: p.wait(timeout=1) except subprocess.TimeoutExpired: total_timeout -= 1 if total_timeout == 0: # Concolic timedout p.kill() log_concolicserver("thread timedout. Killing concolic process") self.comm.concolic_locks[self.concolic_id].release() return continue break else: log_concolicserver("thread stopped. Killing concolic process") p.kill() # Postprocessing payloads = [] if os.path.exists(outdir): for filename in os.listdir(outdir): file_path = os.path.join(outdir, filename) with open(file_path, 'rb') as f: payloads.append(f.read()) log_concolicserver(f"Concolic generate {len(payloads)} inputs " + str(self.concolic_id)) # If no inputs were generated, redo the analysis if len(payloads) <= 1: send_msg(DRIFUZZ_NEW_INPUT, self.payload, self.comm.to_concolicserver_queue) # Send to master for pl in payloads: send_msg(DRIFUZZ_NEW_INPUT, pl, self.comm.to_master_queue) self.comm.concolic_locks[self.concolic_id].release() log_concolicserver("concolic unlocked " + str(self.concolic_id))
def send_bitmap(self, perf=10, kasan=False, timeout=False, payload=None): if self.exit_if_reproduce(): return log_slave(f"Execution time: {time.time()-self.start_time}", self.slave_id) self.unlock_concolic_thread() if self.state == SlaveState.PROC_BITMAP: self.state = SlaveState.WAITING bitmap_shm = self.comm.get_bitmap_shm(self.slave_id) bitmap_shm.seek(0) bitmap = bitmap_shm.read(self.bitmap_size) self.lock_concolic_thread() # Reply master's BITMAP cmd send_msg(KAFL_TAG_REQ_BITMAP, bitmap, self.comm.to_master_queue, source=self.slave_id) # Ask master for new payloads once # Transit from PROC_BITMAP to regular fuzzing # if not self.requested_input: # self.requested_input = True # send_msg(KAFL_TAG_REQ, str(self.slave_id), self.comm.to_master_queue, source = self.slave_id) elif self.state == SlaveState.PROC_TASK or \ self.state == SlaveState.PROC_IMPORT: tag = KAFL_TAG_RESULT if self.state == SlaveState.PROC_TASK else DRIFUZZ_CONC_BITMAP self.state = SlaveState.WAITING bitmap_shm = self.comm.get_bitmap_shm(self.slave_id) bitmap_shm.seek(0) bitmap = bitmap_shm.read(self.bitmap_size) bitmap_shm.flush() # Process bitmap results hnb = self.check_for_unseen_bits(bitmap) if hnb: # Update mapserver with the payload mapserver_payload_shm = self.comm.get_mapserver_payload_shm( self.slave_id) mapserver_payload_shm.seek(0) mapserver_payload_shm.write(struct.pack('<I', len(payload))) mapserver_payload_shm.write(payload) result = FuzzingResult(0, False, timeout, kasan, self.affected_bytes[0], self.slave_id, perf, reloaded=False, new_bits=hnb, qid=self.slave_id) # Notify mapserver the result send_msg(tag, [result], self.comm.to_mapserver_queue, source=self.slave_id) # Wait for mapserver to finish self.comm.slave_locks_bitmap[self.slave_id].acquire() # Acquire concolic lock before asking master for payload # Prevent master from sending out a payload that is never processed, # May cause starvation because concolic thread is busy self.lock_concolic_thread() # Ask master for new payloads send_msg(KAFL_TAG_REQ, str(self.slave_id), self.comm.to_master_queue, source=self.slave_id) else: log_slave("Error: slave thread in wrong state", self.slave_id)
def __map_info_tag_handler(self, request): send_msg(KAFL_TAG_MAP_INFO, self.mapserver_state_obj, self.comm.to_master_queue)
def __result_tag_handler(self, request, imported=False): # self.comm.slave_locks_B[request.source].acquire() results = request.data payloads = [] bitmaps = [] payload_shm = self.comm.get_mapserver_payload_shm(request.source) bitmap_shm = self.comm.get_bitmap_shm(request.source) for result in results: if result.new_bits: bitmap_shm.flush() # bitmap_shm.seek(result.pos * self.comm.get_bitmap_shm_size()) bitmap_shm.seek(0) payload_shm.seek(result.pos * self.comm.get_mapserver_payload_shm_size()) length = payload_shm.read(4) data_len = struct.unpack('<I', length)[0] payloads.append(payload_shm.read(data_len)) # bitmaps.append(bitmap_shm.read(self.comm.get_bitmap_shm_size())) bitmap = bitmap_shm.read(self.comm.get_bitmap_shm_size()) bitmaps.append(bitmap) b = 0 for i in range(len(bitmap)): if bitmap[i] != 255: b += 1 else: payloads.append(None) bitmaps.append(None) #log_mapserver("[MAPS]\t\ SKIP") # self.comm.slave_locks_A[request.source].release() self.comm.slave_locks_bitmap[request.source].release() for i in range(len(results)): if results[i].reloaded: if not imported: self.abortion_counter += 1 if results[i].new_bits: if results[i].timeout: self.mapserver_state_obj.timeout += 1 new_hash = mmh3.hash64(bitmaps[i]) self.__check_hash(new_hash, bitmaps[i], payloads[i], results[i].crash, results[i].timeout, results[i].kasan, results[i].slave_id, results[i].reloaded, results[i].performance, results[i].qid, results[i].pos) self.last_hash = new_hash if not imported: self.round_counter += 1 if self.effector_initial_bitmap: if self.effector_initial_bitmap != new_hash: for j in results[i].affected_bytes: if not self.effector_map[j]: self.effector_map[j] = True else: if not imported: self.round_counter += 1 if imported: return # TODO: Replace const value by performance*(1/50)s if self.abortion_counter >= self.abortion_threshold: if not self.abortion_alredy_sent: log_mapserver("Stage abortion limit (" + str(self.abortion_threshold) + ") reached!") send_msg(KAFL_TAG_ABORT_REQ, self.mapserver_state_obj, self.comm.to_master_queue) self.abortion_alredy_sent = True self.comm.stage_abortion_notifier.value = True