def events_strain_visualization(path_to_data_dir, input_param): list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] operation_on_events( path_to_data_dir, list_of_test_id, lambda x: single_event_strain_visualization(path_to_data_dir, x), num_of_proc)
def all_events_local_atoms_finder(path_to_data_dir, input_param, residual_threshold = 0.5): """ this function developed correlation model between feature and target using model for all events available in tests with list_of_test_id find the outlier atom index and save these local atoms index in a file in that event dir feature: str Now only allow "displacement" option target: str Now allow "shear_strain" option Model: str Now alow "linear_model" and "LinearSVR" option, which is also adopted when model is None """ list_of_test_id = input_param["list_of_test_id"] model = input_param["model"] feature = input_param["feature"] target = input_param["target"] num_of_proc = input_param["num_of_proc"] re_calc = input_param["re_calc"] print "current residual_threshold:", residual_threshold # perform a function on all events in all tests in list_of_test_id with num_of_proc result_list = operation_on_events(path_to_data_dir, list_of_test_id, lambda x: single_event_local_atoms_index(x, path_to_data_dir, model, feature, target, residual_threshold, True, re_calc=re_calc),num_of_proc) print "done finding all local atoms index for all final selected events in interested tests!"
def run_all_tests_voronoi_calculator(path_to_data_dir, input_param, return_volume=False): list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] box_range = input_param["box_range"] cut_off = input_param["cut_off"] atom_list = input_param["atom_list"] periodic = input_param["periodic"] re_calc = input_param["re_calc"] operation = lambda x: single_event_voronoi_calculator(x, path_to_data_dir, box_range, cut_off, atom_list=atom_list, periodic=periodic, re_calc=re_calc, return_volume= return_volume) result_list = operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc=num_of_proc) print("done voronoi cell calculations for all interested tests!")
def pn_calculator_run_all_tests_mp(path_to_data_dir, input_param, save_results=True): list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] re_calc = input_param["re_calc"] operation = lambda x: single_event_pn_calculator( x, path_to_data_dir, re_calc=re_calc) result_list = operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc=num_of_proc) print("done pn calculations for all interested tests!") return result_list
def events_local_atoms(path_to_data_dir, input_param, residual_threshold = 0.5): """ this function developed correlation model between feature and target using model for all events available in tests with list_of_test_id feature: str Now only allow "displacement" option target: str Now allow "shear_strain" option Model: str Now alow "linear_model" option, which is also adopted when model is None """ list_of_test_id = input_param["list_of_test_id"] model = input_param["model"] feature = input_param["feature"] target = input_param["target"] num_of_proc = input_param["num_of_proc"] print "current residual_threshold:", residual_threshold # perform a function on all events in all tests in list_of_test_id with num_of_proc result_list = operation_on_events(path_to_data_dir, list_of_test_id, lambda x: single_event_local_atoms(x, path_to_data_dir, model, feature, target, residual_threshold),num_of_proc) init_sad_num,sad_fin_num,init_fin_num = [],[],[] init_sad_k,sad_fin_k,init_fin_k = [],[],[] for event_res in result_list: init_sad_num.append(event_res[0][0]) sad_fin_num.append(event_res[1][0]) init_fin_num.append(event_res[2][0]) init_sad_k.append(event_res[0][1]) sad_fin_k.append(event_res[1][1]) init_fin_k.append(event_res[2][1]) path_to_image_1 = path_to_data_dir + "/num_local_atoms.png" plot_histogram_3(path_to_image_1,[init_sad_num,sad_fin_num,init_fin_num]) ave_num_local_atoms = np.mean(init_sad_num) print "the average number of local atoms:", ave_num_local_atoms path_to_image_2 = path_to_data_dir + "/slope.png" plot_histogram_3(path_to_image_2,[init_sad_k,sad_fin_k,init_fin_k]) #ave_num_local_atoms = sum(init_fin)*1.0/len(init_fin) ave_slope = np.mean(init_sad_k) print "the average number of slope:", ave_slope print "done plotting for number of local atoms for all final selected events in interested tests" return ave_num_local_atoms, ave_slope
def generate_correlation_table_mp(path_to_data_dir, input_param): list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] atom_list = input_param["atom_list"] print "if atom_list is local or pn, voronoi volume calculation only act on atom_list from init to sad" print "confirm if voronoi_index_results.json is corresponding to the atom_list you just specified:", atom_list if not prompt_yes_no(): raise Exception( "quitting, re_calc the voronoi indexes for your specified atom_list by --voro --calc --re_calc --local if atom_list is local" ) operation = lambda x: single_event_data_extractor(x, path_to_data_dir, atom_list) result_list = operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc=num_of_proc) convert_to_csv(path_to_data_dir, result_list) print "All done!"
def strain_events_stats_visualization(path_to_data_dir, input_param): """ use this new version of strain_events_stats_visualization need to reran the new strain_calc.py to overwrite events_stats.pkl with added event_state """ list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] all_events_results = operation_on_events( path_to_data_dir, list_of_test_id, lambda x: single_event_strain_stats(path_to_data_dir, x), num_of_proc) disp_ave, disp_std, disp_max , disp_ave_2, disp_std_2, disp_max_2, disp_ave_3, disp_std_3, disp_max_3 = [], [], [], [], [], [], [], [], [] shear_ave, shear_std, shear_max, shear_ave_2, shear_std_2, shear_max_2, shear_ave_3, shear_std_3, shear_max_3 = [], [], [], [], [], [], [], [], [] vol_ave, vol_std, vol_max, vol_ave_2, vol_std_2, vol_max_2, vol_ave_3, vol_std_3, vol_max_3 = [], [], [], [], [], [], [], [], [] for event_res in all_events_results: init_sad = event_res[0] sad_fin = event_res[1] init_fin = event_res[2] # calculate the statistics of init_sad and sad_fin disp_ave.append(init_sad["ave"][2]) disp_std.append(init_sad["std"][2]) disp_max.append(init_sad["max"][2]) shear_ave.append(init_sad["ave"][1]) shear_std.append(init_sad["std"][1]) shear_max.append(init_sad["max"][1]) vol_ave.append(init_sad["ave"][0]) vol_std.append(init_sad["std"][0]) vol_max.append(init_sad["max"][0]) disp_ave_2.append(sad_fin["ave"][2]) disp_std_2.append(sad_fin["std"][2]) disp_max_2.append(sad_fin["max"][2]) shear_ave_2.append(sad_fin["ave"][1]) shear_std_2.append(sad_fin["std"][1]) shear_max_2.append(sad_fin["max"][1]) vol_ave_2.append(sad_fin["ave"][0]) vol_std_2.append(sad_fin["std"][0]) vol_max_2.append(sad_fin["max"][0]) disp_ave_3.append(init_fin["ave"][2]) disp_std_3.append(init_fin["std"][2]) disp_max_3.append(init_fin["max"][2]) shear_ave_3.append(init_fin["ave"][1]) shear_std_3.append(init_fin["std"][1]) shear_max_3.append(init_fin["max"][1]) vol_ave_3.append(init_fin["ave"][0]) vol_std_3.append(init_fin["std"][0]) vol_max_3.append(init_fin["max"][0]) plot_histogram_3(path_to_data_dir + "/disp_ave.png", [disp_ave, disp_ave_2, disp_ave_3]) plot_histogram_3(path_to_data_dir + "/disp_std.png", [disp_std, disp_std_2, disp_std_3]) plot_histogram_3(path_to_data_dir + "/disp_max.png", [disp_max, disp_max_2, disp_max_3]) plot_histogram_3(path_to_data_dir + "/shear_ave.png", [shear_ave, shear_ave_2, shear_ave_3]) plot_histogram_3(path_to_data_dir + "/shear_std.png", [shear_std, shear_std_2, shear_std_3]) plot_histogram_3(path_to_data_dir + "/shear_max.png", [shear_max, shear_max_2, shear_max_3]) plot_histogram_3(path_to_data_dir + "/vol_ave.png", [vol_ave, vol_ave_2, vol_ave_3]) plot_histogram_3(path_to_data_dir + "/vol_std.png", [vol_std, vol_std_2, vol_std_3]) plot_histogram_3(path_to_data_dir + "/vol_max.png", [vol_max, vol_max_2, vol_max_3]) print "done plotting strain statistics for all interested tests!"
def run_all_tests_voronoi_classifier(path_to_data_dir, input_param): list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] operation = lambda x: single_event_voronoi_classifier(x, path_to_data_dir) result_list = operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc = num_of_proc) total_init_ICO, total_init_ICO_LIKE, total_init_GUM = 0,0,0 total_sad_ICO, total_sad_ICO_LIKE, total_sad_GUM = 0,0,0 total_fin_ICO, total_fin_ICO_LIKE, total_fin_GUM = 0,0,0 ICO_to_ICO = 0 ICO_to_ICO_LIKE = 0 ICO_to_GUM = 0 ICO_LIKE_to_ICO = 0 ICO_LIKE_to_ICO_LIKE = 0 ICO_LIKE_to_GUM = 0 GUM_to_ICO = 0 GUM_to_ICO_LIKE = 0 GUM_to_GUM = 0 for event_result in result_list: if event_result is None: continue init_voronoi_class, sad_voronoi_class,fin_voronoi_class = event_result["init"], event_result["sad"], event_result["fin"] atom_index = range(len(init_voronoi_class)) for atom_id in atom_index: if init_voronoi_class[atom_id] == 0: if sad_voronoi_class[atom_id] == 0: ICO_to_ICO = ICO_to_ICO + 1 elif sad_voronoi_class[atom_id] == 1: ICO_to_ICO_LIKE = ICO_to_ICO_LIKE + 1 elif sad_voronoi_class[atom_id] == 2: ICO_to_GUM = ICO_to_GUM + 1 if init_voronoi_class[atom_id] == 1: if sad_voronoi_class[atom_id] == 0: ICO_LIKE_to_ICO = ICO_LIKE_to_ICO + 1 elif sad_voronoi_class[atom_id] == 1: ICO_LIKE_to_ICO_LIKE = ICO_LIKE_to_ICO_LIKE + 1 elif sad_voronoi_class[atom_id] == 2: ICO_LIKE_to_GUM = ICO_LIKE_to_GUM + 1 if init_voronoi_class[atom_id] == 2: if sad_voronoi_class[atom_id] == 0: GUM_to_ICO = GUM_to_ICO + 1 elif sad_voronoi_class[atom_id] == 1: GUM_to_ICO_LIKE = GUM_to_ICO_LIKE + 1 elif sad_voronoi_class[atom_id] == 2: GUM_to_GUM = GUM_to_GUM + 1 init_count = Counter(init_voronoi_class) sad_count = Counter(sad_voronoi_class) fin_count = Counter(fin_voronoi_class) total_init_ICO = total_init_ICO + init_count[0] total_init_ICO_LIKE = total_init_ICO_LIKE + init_count[1] total_init_GUM = total_init_GUM + init_count[2] total_sad_ICO = total_sad_ICO + sad_count[0] total_sad_ICO_LIKE = total_sad_ICO_LIKE + sad_count[1] total_sad_GUM = total_sad_GUM + sad_count[2] total_fin_ICO = total_fin_ICO + fin_count[0] total_fin_ICO_LIKE = total_fin_ICO_LIKE + fin_count[1] total_fin_GUM = total_fin_GUM + fin_count[2] # work on more statistics if necessary # begin calculate the probability for dynamic transition from init to sad init_total = total_init_ICO + total_init_ICO_LIKE + total_init_GUM sad_total = total_sad_ICO + total_sad_ICO_LIKE + total_sad_GUM init_ICO_pt = float(total_init_ICO)/init_total init_ICO_LIKE_pt = float(total_init_ICO_LIKE)/init_total init_GUM_pt = float(total_init_GUM)/init_total p11_0 = 1.0/3 p12_0 = p11_0 p13_0 = p11_0 p21_0 = 1.0/3 p22_0 = p21_0 p23_0 = p21_0 p31_0 = 1.0/3 p32_0 = p31_0 p33_0 = p31_0 p11 = float(ICO_to_ICO)/total_init_ICO p12 = float(ICO_to_ICO_LIKE)/total_init_ICO p13 = float(ICO_to_GUM)/total_init_ICO p21 = float(ICO_LIKE_to_ICO)/total_init_ICO_LIKE p22 = float(ICO_LIKE_to_ICO_LIKE)/total_init_ICO_LIKE p23 = float(ICO_LIKE_to_GUM)/total_init_ICO_LIKE p31 = float(GUM_to_ICO)/total_init_GUM p32 = float(GUM_to_ICO_LIKE)/total_init_GUM p33 = float(GUM_to_GUM)/total_init_GUM p = np.array([[p11,p12,p13],[p21,p22,p23],[p31,p32,p33]]) p_0 = np.array([[p11_0,p12_0,p13_0],[p21_0,p22_0,p23_0],[p31_0,p32_0,p33_0]]) c_matrix = p/p_0 - 1 print p print c_matrix path_to_image = path_to_data_dir + "/dynamic_transition_matrix_all_events.png" plot_dynamic_transition_matrix(path_to_image, c_matrix) print ("done voronoi index classification for all interested tests!")
def run_all_tests_voronoi_classifier(path_to_data_dir, input_param): list_of_test_id = input_param["list_of_test_id"] num_of_proc = input_param["num_of_proc"] operation = lambda x: single_event_voronoi_classifier(x, path_to_data_dir) result_list = operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc = num_of_proc) total_init_ICO, total_init_ICO_LIKE, total_init_GUM = 0,0,0 total_sad_ICO, total_sad_ICO_LIKE, total_sad_GUM = 0,0,0 total_fin_ICO, total_fin_ICO_LIKE, total_fin_GUM = 0,0,0 ICO_to_ICO = 0 ICO_to_ICO_LIKE = 0 ICO_to_GUM = 0 ICO_LIKE_to_ICO = 0 ICO_LIKE_to_ICO_LIKE = 0 ICO_LIKE_to_GUM = 0 GUM_to_ICO = 0 GUM_to_ICO_LIKE = 0 GUM_to_GUM = 0 init_voronoi_class_tol, sad_voronoi_class_tol, fin_voronoi_class_tol = [], [], [] for event_result in result_list: if event_result is None: continue init_voronoi_class, sad_voronoi_class,fin_voronoi_class = event_result["init"], event_result["sad"], event_result["fin"] init_voronoi_class_tol.extend(init_voronoi_class) sad_voronoi_class_tol.extend(sad_voronoi_class) fin_voronoi_class_tol.extend(fin_voronoi_class) atom_index = range(len(init_voronoi_class)) for atom_id in atom_index: if init_voronoi_class[atom_id] == 0: if sad_voronoi_class[atom_id] == 0: ICO_to_ICO = ICO_to_ICO + 1 elif sad_voronoi_class[atom_id] == 1: ICO_to_ICO_LIKE = ICO_to_ICO_LIKE + 1 elif sad_voronoi_class[atom_id] == 2: ICO_to_GUM = ICO_to_GUM + 1 if init_voronoi_class[atom_id] == 1: if sad_voronoi_class[atom_id] == 0: ICO_LIKE_to_ICO = ICO_LIKE_to_ICO + 1 elif sad_voronoi_class[atom_id] == 1: ICO_LIKE_to_ICO_LIKE = ICO_LIKE_to_ICO_LIKE + 1 elif sad_voronoi_class[atom_id] == 2: ICO_LIKE_to_GUM = ICO_LIKE_to_GUM + 1 if init_voronoi_class[atom_id] == 2: if sad_voronoi_class[atom_id] == 0: GUM_to_ICO = GUM_to_ICO + 1 elif sad_voronoi_class[atom_id] == 1: GUM_to_ICO_LIKE = GUM_to_ICO_LIKE + 1 elif sad_voronoi_class[atom_id] == 2: GUM_to_GUM = GUM_to_GUM + 1 init_count = Counter(init_voronoi_class) sad_count = Counter(sad_voronoi_class) fin_count = Counter(fin_voronoi_class) total_init_ICO = total_init_ICO + init_count[0] total_init_ICO_LIKE = total_init_ICO_LIKE + init_count[1] total_init_GUM = total_init_GUM + init_count[2] total_sad_ICO = total_sad_ICO + sad_count[0] total_sad_ICO_LIKE = total_sad_ICO_LIKE + sad_count[1] total_sad_GUM = total_sad_GUM + sad_count[2] total_fin_ICO = total_fin_ICO + fin_count[0] total_fin_ICO_LIKE = total_fin_ICO_LIKE + fin_count[1] total_fin_GUM = total_fin_GUM + fin_count[2] # work on more statistics if necessary init_total = total_init_ICO + total_init_ICO_LIKE + total_init_GUM sad_total = total_sad_ICO + total_sad_ICO_LIKE + total_sad_GUM fin_total = total_fin_ICO + total_fin_ICO_LIKE + total_fin_GUM init_ICO_pt = float(total_init_ICO)/init_total init_ICO_LIKE_pt = float(total_init_ICO_LIKE)/init_total init_GUM_pt = float(total_init_GUM)/init_total init_pt = [init_ICO_pt, init_ICO_LIKE_pt, init_GUM_pt] sad_ICO_pt = float(total_sad_ICO)/sad_total sad_ICO_LIKE_pt = float(total_sad_ICO_LIKE)/sad_total sad_GUM_pt = float(total_sad_GUM)/sad_total sad_pt = [sad_ICO_pt, sad_ICO_LIKE_pt, sad_GUM_pt] fin_ICO_pt = float(total_fin_ICO)/fin_total fin_ICO_LIKE_pt = float(total_fin_ICO_LIKE)/fin_total fin_GUM_pt = float(total_fin_GUM)/fin_total fin_pt = [fin_ICO_pt, fin_ICO_LIKE_pt, fin_GUM_pt] path_to_voro_class_pt = path_to_data_dir + "/voronoi_class_fraction_all_events.png" print "All filtered events in list_of_test_id:" print "initial state ICO, ICO-like, GUM fraction is:", init_pt print "sadlle state ICO, ICO-like, GUM fraction is:", sad_pt print "final state ICO, ICO-like, GUM fraction is:", fin_pt plot_voronoi_histogram_3(path_to_voro_class_pt, [init_voronoi_class_tol, sad_voronoi_class_tol, fin_voronoi_class_tol]) # begin calculate the probability for dynamic transition from init to sad p11_0 = 1.0/3 p12_0 = p11_0 p13_0 = p11_0 p21_0 = 1.0/3 p22_0 = p21_0 p23_0 = p21_0 p31_0 = 1.0/3 p32_0 = p31_0 p33_0 = p31_0 if total_init_ICO == 0 or total_init_ICO_LIKE == 0 or total_init_GUM == 0: print "total number of ICO is:", total_init_ICO print "total number of ICO-LIKE is:", total_init_ICO_LIKE print "total number of GUM is:", total_init_GUM print "Can not calculate the dynamic transition probability matrix since either total number of ICO or ICO-LIKE or GUM is zero!" return p11 = float(ICO_to_ICO)/total_init_ICO p12 = float(ICO_to_ICO_LIKE)/total_init_ICO p13 = float(ICO_to_GUM)/total_init_ICO p21 = float(ICO_LIKE_to_ICO)/total_init_ICO_LIKE p22 = float(ICO_LIKE_to_ICO_LIKE)/total_init_ICO_LIKE p23 = float(ICO_LIKE_to_GUM)/total_init_ICO_LIKE p31 = float(GUM_to_ICO)/total_init_GUM p32 = float(GUM_to_ICO_LIKE)/total_init_GUM p33 = float(GUM_to_GUM)/total_init_GUM p = np.array([[p11,p12,p13],[p21,p22,p23],[p31,p32,p33]]) p_0 = np.array([[p11_0,p12_0,p13_0],[p21_0,p22_0,p23_0],[p31_0,p32_0,p33_0]]) c_matrix = p/p_0 - 1 print "Probability matrix:", p print "Normalized probability matrix:", c_matrix path_to_image = path_to_data_dir + "/dynamic_transition_probability_matrix_all_events.png" plot_dynamic_transition_matrix(path_to_image, p) print ("done voronoi index classification for all interested tests!")