def check_corr_crash_vuls(pace_configs, sock_config, threads=1, replay=False): print 'Parsing traces to determine logical operations ...' #initialize the replayer replayer = DSReplayer(pace_configs, sock_config) #set the environment - what file system (defaultfs)? what network(defaultnet)? replayer.set_environment(defaultfs('count', 1), defaultnet(), load_cross_deps=True) #did we parse and understand? if yes, print. replayer.print_ops(show_io_ops=True) print 'Successfully parsed logical operations!' if replay == False: return assert threads > 0 for i in range(0, threads): t = MultiThreadedChecker(MultiThreadedChecker.queue, i) t.setDaemon(True) t.start() (reachable_prefix_fsync_deps, reachable_prefix_no_deps) = compute_reachable_global_prefixes(replayer) grps_0_1_no_deps = unique_grp(reachable_prefix_no_deps, replayer.conceptual_machines(), [0, 1]) grps_0_1_fsync_deps = unique_grp(reachable_prefix_fsync_deps, replayer.conceptual_machines(), [0, 1]) MultiThreadedChecker.reset() replay_correlated_global_prefix(replayer, grps_0_1_no_deps, True) MultiThreadedChecker.reset() replay_correlated_reordering(replayer, grps_0_1_fsync_deps, replayer.client_index, True) MultiThreadedChecker.reset() replay_correlated_atomicity_reordering(replayer, grps_0_1_no_deps, replayer.client_index, True) MultiThreadedChecker.reset() replay_correlated_atomicity_prefix(replayer, grps_0_1_no_deps, replayer.client_index, True) uppath = lambda _path, n: os.sep.join(_path.split(os.sep)[:-n]) os.system('cp ' + os.path.join( uppath(paceconfig(0).cached_prefix_states_file, 1), 'micro_ops') + ' ' + paceconfig(0).scratchpad_dir)
def check_corr_crash_vuls(pace_configs, sock_config, threads=1, replay=False): print 'Parsing traces to determine logical operations ...' uppath = lambda _path, n: os.sep.join(_path.split(os.sep)[:-n]) replayer = DSReplayer(pace_configs, sock_config) replayer.set_environment(defaultfs('count', 1), defaultnet(), load_cross_deps=True) replayer.print_ops(show_io_ops=True) client_index = replayer.client_index pace_conf_file = os.path.join( uppath(paceconfig(0).cached_prefix_states_file, 1), 'pace_conf') assert os.path.exists( pace_conf_file), "Hint: Run the prot tool to produce pace_conf file!" if replay == False: return assert threads > 0 for i in range(0, threads): t = MultiThreadedChecker(MultiThreadedChecker.queue, i) t.setDaemon(True) t.start() (interesting_prefix_states_reorder, interesting_prefix_states_other) = __get_interesting_prefixes(replayer) workload_range = __get_workload_range(pace_configs, pace_conf_file, interesting_prefix_states_reorder) MultiThreadedChecker.reset() globally_valid_prefix(replayer, interesting_prefix_states_other, True) MultiThreadedChecker.reset() atomicity_prefix_correlated(replayer, interesting_prefix_states_other, client_index, True) MultiThreadedChecker.reset() reordering_correlated(replayer, interesting_prefix_states_reorder, client_index, True) MultiThreadedChecker.reset() atomicity_reordering_correlated(replayer, interesting_prefix_states_other, client_index, True) os.system('cp ' + os.path.join( uppath(paceconfig(0).cached_prefix_states_file, 1), 'micro_ops') + ' ' + paceconfig(0).scratchpad_dir)
def replay_correlated_atomicity_reordering(replayer, interesting_prefix_states, client_index, replay=True): machines = replayer.conceptual_machines() fs_ops = replayer.fs_ops_indexes() can_omit_ops = {} server_machines = machines[:] server_machines.remove(client_index) server_count = len(server_machines) majority_count = int(len(server_machines) / 2) + 1 assert server_count == 3 and majority_count == 2 atomicity_reordering_count = 0 pick_server_count = majority_count how_many_majorities = 1 replay_start = time.time() replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps=False) apm_imposed_subset_machineset = list( itertools.combinations(server_machines, pick_server_count)) assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count) apm_imposed_subset_machineset = apm_imposed_subset_machineset[ 0:how_many_majorities] for machine in machines: replayer.load(machine, 0) for apm_imposed_machines in apm_imposed_subset_machineset: for crash_point in interesting_prefix_states: append_trunc_indexes = append_or_trunc_ops(replayer, server_machines, crash_point) if any(append_trunc_indexes.values()): # First, end all machine at the GRP point machine = 0 for machine in machines: replayer.iops_end_at( machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1)) machine + 1 # Next we have to omit the sub (io or disk) ops as we call it atomic_omits = {} atomic_ro_currs = {} machine = 0 for end_point in crash_point: atomic_ro_currs[machine] = 0 if machine in apm_imposed_machines: if append_trunc_indexes[machine]: # If it is an append or trunc, break it into pieces and see for its absence atomic_omits[machine] = range( 0, replayer.iops_len(machine, end_point)) else: # if not append, just put a marker. We will exclude this marker later atomic_omits[machine] = [ str(replayer.iops_len(machine, end_point) - 1) ] machine += 1 atomic_omit_list = [] while atleast_one_present(apm_imposed_machines, atomic_ro_currs, atomic_omits): atomic_omit = {} for machine in apm_imposed_machines: if atomic_ro_currs[machine] < len( atomic_omits[machine]): atomic_omit[machine] = atomic_omits[machine][ atomic_ro_currs[machine]] else: atomic_omit[machine] = None atomic_ro_currs[machine] += 1 atomic_omit_list.append(atomic_omit) for atomic_omit_x in atomic_omit_list: atomic_omit = atomic_omit_x.copy() base_name_prep = atomic_omit_x.copy() for mac in apm_imposed_machines: iop_index = atomic_omit[mac] if type(iop_index) == str or iop_index == None: del atomic_omit[mac] else: atomic_omit[mac] = (crash_point[mac], iop_index) base_name_prep[mac] = (crash_point[mac], iop_index) replayer.iops_omit_group(atomic_omit) base_name = replay_dir_base_name_ARO( crash_point, base_name_prep) atomicity_reordering_count += 1 if replay: (base_path, dirnames, stdout_files) = get_replay_dirs(machines, base_name) replayer.construct_crashed_dirs(dirnames, stdout_files) MultiThreadedChecker.check_later( base_path, dirnames, stdout_files[machines[-1]], base_name) replayer.iops_include_group(atomic_omit) if replay: MultiThreadedChecker.wait_and_get_outputs() replay_end = time.time() print 'Atomicity reordering correlated states : ' + str( atomicity_reordering_count) print 'Atomicity reordering correlated replay took approx ' + str( replay_end - replay_start) + ' seconds...'
def replay_correlated_atomicity_prefix(replayer, interesting_prefix_states, client_index, replay=True): machines = replayer.conceptual_machines() fs_ops = replayer.fs_ops_indexes() server_machines = machines[:] server_machines.remove(client_index) server_count = len(server_machines) majority_count = int(len(server_machines) / 2) + 1 assert server_count == 3 and majority_count == 2 count = 0 how_many_majorities = 1 pick_server_count = majority_count replay_start = time.time() replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps=False) apm_imposed_subset_machineset = list( itertools.combinations(server_machines, pick_server_count)) assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count) apm_imposed_subset_machineset = apm_imposed_subset_machineset[ 0:how_many_majorities] assert len(apm_imposed_subset_machineset) == 1 apm_imposed_machines = apm_imposed_subset_machineset[0] for machine in machines: replayer.load(machine, 0) for crash_point in interesting_prefix_states: atomic_ends = {} atomic_currs = {} machine = 0 for end_point in crash_point: if machine in apm_imposed_machines: atomic_ends[machine] = range( 0, replayer.iops_len(machine, end_point)) atomic_currs[machine] = 0 machine += 1 atomic_end_list = [] while atleast_one_present(apm_imposed_machines, atomic_currs, atomic_ends): atomic_end = {} for machine in apm_imposed_machines: if atomic_currs[machine] < len(atomic_ends[machine]): atomic_end[machine] = atomic_ends[machine][ atomic_currs[machine]] else: atomic_end[machine] = atomic_ends[machine][ len(atomic_ends[machine]) - 1] atomic_currs[machine] += 1 atomic_end_list.append(atomic_end) for atomic_end in atomic_end_list: for machine in server_machines: if machine in apm_imposed_machines: replayer.iops_end_at( machine, (crash_point[machine], atomic_end[machine])) else: replayer.iops_end_at( machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1)) replayer.iops_end_at( client_index, (crash_point[client_index], replayer.iops_len(client_index, crash_point[client_index]) - 1)) base_name = replay_dir_base_name_AP(crash_point, atomic_end) count += 1 if replay: (base_path, dirnames, stdout_files) = get_replay_dirs(machines, base_name) replayer.construct_crashed_dirs(dirnames, stdout_files) MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name) if replay: MultiThreadedChecker.wait_and_get_outputs() replay_end = time.time() print 'Atomicity Prefix correlated states : ' + str(count) print 'Atomicity Prefix correlated replay took approx ' + str( replay_end - replay_start) + ' seconds...'