def generate_concat_test_case(package, options): test_case_list = [] for _ in range(config.get('NR_CONCAT')): state = FsState() for _ in range(config.get('NR_SEGMENT')): state.rand_gen(config.get('LENGTH_PER_SEGMENT') // 2) state.kmeans_gen(config.get('LENGTH_PER_SEGMENT') // 2) state.expand_param() test_case_list.append(package.new_multi_test_case(state)) # # Merge all test cases into the first one # main_test_case = test_case_list[0] for i in range(1, config.get('NR_CONCAT')): main_test_case.concat_test_case(test_case_list[i]) main_test_case.save() M4.print_red(f'Test Case: {main_test_case.path_}') return main_test_case
def binary_reduce(self): left, right = 0, len(self.test_case_.seq_) - 1 # # It's not that restrict for indexes. # while left + 3 < right: mid = left + (right - left) // 2; M4.print_cyan(f'Reduce {left}--{right}: {mid}') new_case = self.test_case_.truncate_seq(mid - 1) M4.print_blue('Save new C file', no_newline=True) new_case.save() M4.print_blue('... ending') if self._reproduce() == True: M4.print_green(f'Yes [{left},{right}] => [{left},{mid}]') right = mid else: M4.print_red(f'No [{left},{right}] => [{mid},{right}]') left = mid new_case = self.test_case_.truncate_seq(right) new_case.save()
def _reproduce(self): # # Prepare qemu # self.qemu_ = Qemu() self.qemu_.start() # # RUN trigger.sh # trigger = Trigger(os.path.join(self.case_dir_, 'trigger.sh')) trigger.start() # # Check log # result = self._check_result() # # If a bug occurs, qemu will restart; we wait for it # if result is True: M4.wait_for(13, 'Wait for qemu restarting') self.qemu_.stop() trigger.stop() # # Wait for qemu releasing the socket # M4.wait_for(5) return result
def _build_mknod_param(self): modes = ('S_IRWXU', 'S_IRWXG', 'S_IROTH', 'S_IXOTH') devs = ('S_IFREG', 'S_IFCHR', 'S_IFBLK', 'S_IFIFO', 'S_IFSOCK') m = M4.rand_sample(modes) d = M4.rand_sample(devs) return '|'.join(m + d), random.randint(0, 10000000) # mode, dev
def stop(self): M4.print_red('Stop qemu daemon', no_newline=True) subprocess.Popen(['./ctrl', 'stop']).communicate() if self.qemu_thread.is_alive(): self.qemu_thread.terminate() M4.print_red('... ending')
def log_contain(self, keywords): M4.exec(['sync', './vm.log']) with open('./vm.log', 'r') as fd: for line in fd: for k in keywords: if re.search(k, line.strip()): # print(k, line) return True return False
def _add_node(self, path, node): parent_path = M4.parent_dir(path) name = M4.basename(path) parent_node = self._must_lookup_node(parent_path) parent_node.add_child(name, node) node.update_atime(self.time_count_) parent_node.update_atime(self.time_count_) self.node_count_ += 1
def _build_write_xattr(self): nodes = self.picker_.pick_any(self.fs_state_.tree_) namespace_prefix = random.choice( ('security', 'system', 'trusted', 'user')) key = M4.rand_str(10) value = M4.rand_str(10) return [ WriteXattr(nd.get_path(), f'{namespace_prefix}.{key}', value) for nd in nodes ]
def start(self): def target(): command_process = subprocess.Popen([self.script_path_]) command_process.communicate() M4.print_green(f'Start trigger {self.script_path_}') self.trigger_thread_ = multiprocessing.Process(target=target) self.trigger_thread_.start() M4.wait_for(20, 'Running triggering script')
def _fetch_command_use(self, cmd): if cmd.type_ in (Command.MKDIR, Command.CREATE, Command.MKNOD): return [M4.parent_dir(cmd.path_)] elif cmd.type_ in (Command.SYMLINK, Command.HARDLINK, Command.RENAME): return [cmd.old_path_, M4.parent_dir(cmd.new_path_)] elif cmd.type_ in (Command.CLOSE, Command.READ, Command.WRITE, Command.FSYNC): return [cmd.fd_] elif cmd.type_ in (Command.SYNC, Command.REMOUNT_ROOT): return [] else: return [cmd.path_]
def _build_rename(self): old_nodes = self.picker_.pick_any(self.fs_state_.tree_) cmd_list = [] for old_nd in old_nodes: if old_nd == self.fs_state_.tree_.root_: continue nodes = self.picker_.pick_dir(self.fs_state_.tree_) for nd in nodes: if not nd.get_children() and old_nd.type_ == Jnode.DIR: if M4.true_with_prob(50): new_path = nd.new_child_path(self.fs_state_.namespace_) else: # # Replace the empty dir # new_path = nd.get_path() else: new_path = nd.new_child_path(self.fs_state_.namespace_) old_path = old_nd.get_path() # # Rename '/a/b/c' with '/a' or '/a/b/c/d' is forbidden. # if (new_path.startswith(old_path) or old_path.startswith(new_path)): continue cmd_list.append(Rename(new_path, old_path)) return cmd_list
def _del_node(self, path): ''' Just delete the node, not the subtree. Called by move operation''' if path == '/': print('*** WARNING: to remove root ***') return parent_path = M4.parent_dir(path) name = M4.basename(path) parent_node = self._must_lookup_node(parent_path) if name in parent_node.get_children_names(): parent_node.update_atime(self.time_count_) removed_child = parent_node.del_child(name) return removed_child else: return None
def new_concat_test_case(self, state): file_path = M4.path_join(self.package_path_, f'{self.file_id_}.c') self.file_id_ += 1 return ConcatTestCase(file_path, state.seq_, random.randint(1, 100000000))
def _build_open_param(self): params = ['O_RDWR'] for p in ParamBuilder._open_params: if M4.true_with_prob(10): params.append(p) return ' | '.join(params)
def get_path(self): if not self.parents_: return '/' # # Randomly generate a valid path to the node # parent = random.choice(list(self.parents_.values())) return M4.path_join(parent.get_path(), parent.child_name(self))
def _take_reduce(self, cmd): node = self._must_lookup_node(cmd.path_) if node.type_ == Jnode.DIR: for name in node.get_children_names(): self._del_subtree(M4.path_join(cmd.path_, name)) node.clear_children() node.update_atime(self.time_count_)
def generate_test_case(package, options): state = FsState() # # we use kmeans generation and random generation alternately. # for _ in range(config.get('NR_SEGMENT')): state.rand_gen(config.get('LENGTH_PER_SEGMENT') // 2) state.kmeans_gen(config.get('LENGTH_PER_SEGMENT') // 2) # # Filling papameters # state.expand_param() test_case = package.new_test_case(state) test_case.save() M4.print_red(f'Test Case: {test_case.path_}') return test_case
def main(options): ''' There some implementation optimizations based on the algorithm presented in the paper: Algotirhm: while not terminate(): s, w <- prio_pick(Q) fill papameter to w and execute w if length(w) > MAX: drop s, w s', w' <- generate_new_from(s) First, we set a pre-defined max value as the total number of workloads (i.e. testcases), and we group workloads into test packages. For each package, we create a new disk to manipulate. Second, the prio_pick procedure favours the longest workload, the queue Q is not necessary. Third, since checking each workload w incurs high overhead, we checking workloads in a batch. I.e., for workload w is <call1, call2> and its following workload w' is <call1, call2, call3>, there is not need to check them separately; when we execute w', w is also been executed. Fourth, kmeans incurs overhead, we use kmeans generation and random generation alternately. ''' fs = options.fs for cnt in range(config.get('NR_TEST_PACKAGE')): package = TestPackage() if options.test: result = M4.exec(['./ctrl', 'disk', fs, f'{fs}-{cnt}']) M4.print_result(result) for _ in range(config.get('NR_TESTCASE_PER_PACKAGE')): test_case = generate_test_case(package, options) # test_case = generate_concat_test_case(package, options) if options.test: M4.process_exec( ['./ctrl', 'run-case', fs, f'{fs}-{cnt}', test_case.path_], timeout=20) # M4.print_result(result) M4.process_exec(['./ctrl', 'kill']) if options.test: result = M4.exec(['rm', '-rf', f'{fs}-{cnt}'])
def get_all_path(self): # # Only file can be hardlinked # assert self.type_ == Jnode.FILE all_paths = [] for parent in self.parents_.values(): parent_path = parent.get_path() name = parent.child_name(self) all_paths.append(M4.path_join(parent_path, name)) return all_paths
def start(self): M4.print_blue('Start qemu daemon') def target(): command_process = subprocess.Popen(['./ctrl', 'start'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) command_process.communicate() self.qemu_thread = multiprocessing.Process(target=target) self.qemu_thread.start() M4.wait_for(25, 'Starting qemu') if not self.log_contain(['syzkaller login:'******'Qemu starting failure') subprocess.Popen(['./ctrl', 'stop']).communicate() return M4.print_blue('=================') subprocess.Popen(['./ctrl', 'init']).communicate() M4.print_blue('... ending')
def single_reduce(self): idx = 0 while idx < len(self.test_case_.seq_): M4.print_cyan(f'Reduce {idx} --{str(self.test_case_.seq_[idx])}') new_case = self.test_case_.filter_out_seq(idx) M4.print_blue('Save new C file', no_newline=True) new_case.save() M4.print_blue('... ending') if self._reproduce() == True: M4.print_green(f'Yes [{idx}]') # # idx should not change because we move the next command forward # to the current `idx` position. # self.test_case_ = new_case else: M4.print_red(f'No [{idx}]') idx += 1 self.test_case_.save()
def _add_dir_recursive(self, path): path_to_add = [path] while True: parent_path = M4.parent_dir(path_to_add[-1]) if not self._lookup_node(parent_path): path_to_add.append(parent_path) else: break path_to_add.reverse() for path in path_to_add: self._add_dir(path)
def _del_subtree(self, path): ''' Note the difference with `_del_node` ''' node = self._must_lookup_node(path) if node.type_ == Jnode.DIR: for name in node.get_children_names(): self._del_subtree(M4.path_join(path, name)) elif node.type_ == Jnode.SYMLINK: link_target = self._lookup_node(node.target_path_) if link_target: if link_target.num_symlinked_ > 0: link_target.dec_num_symlinked() parent_path = M4.parent_dir(path) parent_node = self._must_lookup_node(parent_path) parent_node.update_atime(self.time_count_) name = M4.basename(path) parent_node.del_child(name) assert self.node_count_ > 0 self.node_count_ -= 1
def _random_available_types(self): all_types = [ ty for ty in Command.all_types() if M4.true_with_prob(config.get_command_prob(ty)) ] random.shuffle(all_types) if self.fs_state_.tree_.node_count_ >= config.get('TREE_MAX_SIZE'): for ty in (Command.MKDIR, Command.CREATE, Command.SYMLINK, Command.HARDLINK): if ty in all_types: all_types.remove(ty) return all_types
def modules(): print("----M1------") exp = text_re.get("1.0", "end-1c") pos_exp = M1.postfixNotation(exp) print(pos_exp) print("\n") print("----M2------") alf, matriz_Trans, init_st, final_st = M2.regularExpressionToNFA_e(pos_exp) M2.printTransTable(matriz_Trans, alf) print("\n") print("----M3------") matriz_Trans, final_st, alf = M3.NFAconverter(alf, matriz_Trans, init_st, final_st) M2.printTransTable(matriz_Trans, alf) print("\n") print("----M4------") matriz_Trans, final_st = M4.afdConverter(matriz_Trans, final_st) M2.printTransTable(matriz_Trans, alf) print("\n") print("----M5------") matriz_Trans, final_st = M5.Minimize(matriz_Trans, final_st) M2.printTransTable(matriz_Trans, alf) print("\n") print("----M6------") f = text_input.get("1.0", END) result = M6.parse(matriz_Trans, final_st, f, alf) text_output.config(state=NORMAL) text_output.delete('1.0', END) text_output.insert(INSERT, result) text_output.config(state=DISABLED) print("\n")
def unique_name(self): self.foo_id_ += 1 prefix = M4.rand_str(random.randint(1, 50)) return f'{prefix}foo_{self.foo_id_}'
def __hash__(self): h = M4.hash_int(Jnode.DIR) for (name, child) in self.children_.items(): h_child = M4.hash_concat(M4.hash_str(name), hash(child)) h = M4.hash_concat(h, h_child) return h
def __hash__(self): return M4.hash_int(Jnode.FILE)
def __hash__(self): h1 = M4.hash_int(Jnode.SYMLINK) h2 = M4.hash_str(self.target_path_) return M4.hash_concat(h1, h2)
def __hash__(self): return M4.hash_int(Jnode.SPECIAL)