def get_pruning_results(project_name, pruning_methods, y_label): print("Retrieving tasks...") tasks = Task.get_tasks(project_name=project_name) print('Done') # logger = get_clearml_logger(project_name, 'Summary').logger print(f"Generating graph comparison for {y_label}") x_values, y_values, labels, num_funcs = compare_clearml_project( pruning_methods, project_name=project_name, tasks=tasks, x_name='dim_edges', y_name=y_label) data = seperate_labels(x_values, y_values, labels, num_funcs) for n_funcs, n_funcs_data in data.items(): plt.figure() for method, m_data in n_funcs_data.items(): x = m_data['x'] y = m_data['y'] order = np.argsort(x) x = [x[i] for i in order] y = [y[i] for i in order] plt.plot(x, y, label=method) plt.title(f"Prunning using {n_funcs} minhash functions") plt.xlabel("Edge dim") plt.ylabel("Accuracy") plt.legend() plt.show()
def summary_clearml_project(project_name, x_label, seperate_legends): print("Retrieving tasks...") tasks = Task.get_tasks(project_name=project_name) print('Done') y_labels = set(tasks[0].data.hyperparams['General']) y_labels.remove(x_label) logger = get_clearml_logger(project_name, 'Summary').logger for y_label in y_labels: print(f"Generating graph comparison for {y_label}") x_values, y_values, labels = compare_clearml_project( seperate_legends, project_name=project_name, tasks=tasks, x_name=x_label, y_name=y_label) labeled_x, labeled_y, unique_labels = seperate_labels( x_values, y_values, labels) for label in unique_labels: # plt.plot(labeled_x[label], labeled_y[label], label=label) scatter2d = np.vstack((labeled_x[label], labeled_y[label])).T logger.report_scatter2d(title=y_label, series=label, iteration=0, scatter=scatter2d, xaxis=x_label, yaxis=y_label)
def compare_clearml_project(seperate_legends, project_name=None, tasks=None, x_name=None, y_name=None): tasks = Task.get_tasks( project_name=project_name) if tasks is None else tasks x_y_values = [] for task in tasks: try: x_y_values.append(( float(task.data.hyperparams['Args'][x_name].value), float(task.data.hyperparams['General'][y_name].value), task.data.hyperparams['Args'][seperate_legends].value, float( task.data.hyperparams['Args']['num_minhash_funcs'].value))) except Exception as e: print(e) pass x_values = [x for x, y, sep, sep2 in x_y_values] y_values = [y for x, y, sep, sep2 in x_y_values] labels1 = [sep for x, y, sep, sep2 in x_y_values] labels2 = [sep2 for x, y, sep, sep2 in x_y_values] return x_values, y_values, labels1, labels2
def get_pruning_results(project_name, pruning_methods, y_label): print("Retrieving tasks...") tasks = Task.get_tasks(project_name=project_name) print('Done') # logger = get_clearml_logger(project_name, 'Summary').logger print(f"Generating graph comparison for {y_label}") x_values, y_values, labels = compare_clearml_project( pruning_methods, project_name=project_name, tasks=tasks, x_name='keep edges', y_name=y_label) labeled_x, labeled_y, unique_labels = seperate_labels( x_values, y_values, labels) for label in unique_labels: print(f'======================= {label} =======================') x = np.array(labeled_x[label]) y = np.array(labeled_y[label]) order = np.argsort(x) x = x[order] y = y[order] for x_, y_ in zip(x, y): print(f'({x_}, {y_})') print()