def change_cwd_results(self): cwd = os.getcwd() os.chdir( RESULT_PATH.format(reference=self.__reference_distribution, data=self.full_data_label, classes=self.full_classes_label)) return cwd
def load_data() -> pd.DataFrame: all_files = [] for ref in SELECTED_REFERENCES: for data_count_var in ['All Data', 'Partial Data']: for class_count_var in ['Full Classes', 'Limited Classes']: glob_files = list(filter(lambda z: '[' not in z, glob.glob(RESULT_PATH.format(reference=ref, data=data_count_var, classes=class_count_var) + '*.csv'))) for glob_file in glob_files: assert glob_file not in all_files all_files += glob_files oa_df_list = [] for file in all_files: ref_style, dim, data_style, hidden_layer, size, class_style = name_columns(file) if dim == '3': continue temp = pd.read_csv(file) if 'False Negatives' in temp.columns: temp = dm.false_positive_rate(temp) temp = dm.f1_score(temp) temp = dm.specificity(temp) temp = dm.false_omission_rate(temp) temp.drop(columns=['True Positives', 'True Negatives', 'False Positives', 'False Negatives'], inplace=True) columns = [ref_style + ' D ' + dim + ' ' + data_style + ' ' + col + ' HL ' + hidden_layer + ' ' + size + ' ' + class_style for col in temp.columns] oa_df_list.append(temp.set_axis(columns, axis=1)) return pd.concat(oa_df_list, axis=1)
incomplete_knn_sizes = [] find_svm = False incomplete_svm_sizes = [] for dim in [1, 2]: print('Dimension:', dim) for data in ['All Data', 'Partial Data']: print('\t' + data) for classes in ['Full Classes', 'Limited Classes']: if classes == 'Limited Classes' and dim == 1: continue print('\t\t' + classes) for reference in REFERENCE_LIST: print('\t\t\t' + reference) os.chdir( RESULT_PATH.format(dim=dim, reference=reference, data=data, classes=classes)) for size_var in SIZE_SET: detail_size(size_var) for dim in [1, 2]: print('Dimension:', dim) for reference in REFERENCE_LIST: print('\t' + reference) for data in ['All Data']: print('\t\t' + data) for classes in ['Full Classes', 'Limited Classes']: print('\t\t\t' + classes + '\n\t\t\t\tAll Sizes', end='\t\t') os.chdir( RESULT_PATH.format(dim=dim, reference=reference, data=data,
elif class_count is None: plt.title(data_count + ' ' + current_metric, fontsize=10) plt.savefig('Same Data\\' + current_metric + '\\' + data_count + ' ' + current_title + '.png') elif data_count is None: plt.title(class_count + ' ' + current_metric, fontsize=10) plt.savefig('Same Classes\\' + current_metric + '\\' + class_count + ' ' + current_title + '.png') else: plt.title(current_metric, fontsize=10) plt.savefig(data_count + '\\' + class_count + '\\' + current_metric + '\\' + current_title + '.png') plt.close('all') x = range(1, NUM_EPOCHS + 1) for class_count_var in ['Full Classes', 'Limited Classes']: for data_count_var in ['All Data', 'Partial Data']: os.chdir(RESULT_PATH.format(data=data_count_var, classes=class_count_var)) print(os.getcwd()) # Build DataFrame oa_hidden_layer_list = [] for hidden_layer in NUM_HIDDEN_LAYERS: hidden_layer_files = list(filter(lambda data: data_count_var not in data, glob.glob('PD*Hidden Layer ' + str(hidden_layer) + '*.csv'))) if len(hidden_layer_files) == 0: continue hidden_layer_files.sort(key=sort) for file in hidden_layer_files: size = file.split(',')[1].split(' ')[-1] if int(size) < 100: size = '0' + size temp = pd.read_csv(file) columns = [col + ' HL ' + str(hidden_layer) + ' S ' + size + '.' for col in temp.columns] oa_hidden_layer_list.append(temp.set_axis(columns, axis=1))