def __kernel_knn_cv_median(dataset_all, ds_name, knn_options, mpg_options, kernel_options, mge_options, ged_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary): Gn = dataset_all.graphs y_all = dataset_all.targets n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[ 'n_splits'], knn_options['test_size'] # get shuffles. train_indices, test_indices, train_nums, y_app = __get_shuffles( y_all, n_splits, test_size) accuracies = [[], [], []] for trial in range(len(train_indices)): print('\ntrial =', trial) train_index = train_indices[trial] test_index = test_indices[trial] G_app = [Gn[i] for i in train_index] G_test = [Gn[i] for i in test_index] y_test = [y_all[i] for i in test_index] gm_unnorm_trial = gram_matrix_unnorm[ train_index, :][:, train_index].copy() # compute pre-images for each class. medians = [[], [], []] train_nums_tmp = [0] + train_nums print('\ncomputing pre-image for each class...\n') for i_class in range(len(train_nums_tmp) - 1): print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes:') i_start = int(np.sum(train_nums_tmp[0:i_class + 1])) i_end = i_start + train_nums_tmp[i_class + 1] median_set = G_app[i_start:i_end] dataset = dataset_all.copy() dataset.load_graphs([g.copy() for g in median_set], targets=None) mge_options['update_order'] = True mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[ i_start:i_end, i_start:i_end].copy() mpg_options['runtime_precompute_gm'] = 0 set_median, gen_median_uo = __generate_median_preimages( dataset, mpg_options, kernel_options, ged_options, mge_options) mge_options['update_order'] = False mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[ i_start:i_end, i_start:i_end].copy() mpg_options['runtime_precompute_gm'] = 0 _, gen_median = __generate_median_preimages( dataset, mpg_options, kernel_options, ged_options, mge_options) medians[0].append(set_median) medians[1].append(gen_median) medians[2].append(gen_median_uo) # for each set of medians. print('\nperforming k-nn...') for i_app, G_app in enumerate(medians): # compute dis_mat between medians. dataset = dataset_all.copy() dataset.load_graphs([g.copy() for g in G_app], targets=None) gm_app_unnorm, _ = __compute_gram_matrix_unnorm( dataset, kernel_options.copy()) # compute the entire Gram matrix. graph_kernel = __get_graph_kernel(dataset.copy(), kernel_options.copy()) kernels_to_medians = [] for g in G_app: kernels_to_median, _ = graph_kernel.compute( g, G_test, **kernel_options.copy()) kernels_to_medians.append(kernels_to_median) kernels_to_medians = np.array(kernels_to_medians) gm_all = np.concatenate((gm_app_unnorm, kernels_to_medians), axis=1) gm_all = np.concatenate( (gm_all, np.concatenate( (kernels_to_medians.T, gram_matrix_unnorm[test_index, :][:, test_index].copy()), axis=1)), axis=0) gm_all = normalize_gram_matrix(gm_all.copy()) dis_mat, _, _, _ = compute_distance_matrix(gm_all) N = len(G_app) d_app = dis_mat[range(N), :][:, range(N)].copy() d_test = np.zeros((N, len(test_index))) for i in range(N): for j in range(len(test_index)): d_test[i, j] = dis_mat[i, j] accuracies[i_app].append( knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples)) # write result detail. if save_results: f_detail = open(dir_save + fn_output_detail, 'a') print('writing results to files...') for i, median_type in enumerate( ['set-median', 'gen median', 'gen median uo']): csv.writer(f_detail).writerow([ ds_name, kernel_options['name'], train_examples + ': ' + median_type, trial, knn_options['n_neighbors'], len(gm_all), knn_options['test_size'], accuracies[i][-1][0], accuracies[i][-1][1] ]) f_detail.close() results = {} results['ave_perf_train'] = [ np.mean([i[0] for i in j], axis=0) for j in accuracies ] results['std_perf_train'] = [ np.std([i[0] for i in j], axis=0, ddof=1) for j in accuracies ] results['ave_perf_test'] = [ np.mean([i[1] for i in j], axis=0) for j in accuracies ] results['std_perf_test'] = [ np.std([i[1] for i in j], axis=0, ddof=1) for j in accuracies ] # write result summary for each letter. if save_results: f_summary = open(dir_save + fn_output_summary, 'a') for i, median_type in enumerate( ['set-median', 'gen median', 'gen median uo']): csv.writer(f_summary).writerow([ ds_name, kernel_options['name'], train_examples + ': ' + median_type, knn_options['n_neighbors'], knn_options['test_size'], results['ave_perf_train'][i], results['ave_perf_test'][i], results['std_perf_train'][i], results['std_perf_test'][i], time_precompute_gm ]) f_summary.close()
def __kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary): Gn = dataset_all.graphs y_all = dataset_all.targets n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[ 'n_splits'], knn_options['test_size'] # get shuffles. train_indices, test_indices, train_nums, y_app = __get_shuffles( y_all, n_splits, test_size) accuracies = [] for trial in range(len(train_indices)): print('\ntrial =', trial) train_index = train_indices[trial] test_index = test_indices[trial] G_app = [Gn[i] for i in train_index] G_test = [Gn[i] for i in test_index] y_test = [y_all[i] for i in test_index] gm_unnorm_trial = gram_matrix_unnorm[ train_index, :][:, train_index].copy() # get best graph from trainset according to distance in kernel space for each class. best_graphs = [] train_nums_tmp = [0] + train_nums print('\ngetting best graph from trainset for each class...') for i_class in range(len(train_nums_tmp) - 1): print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes.') i_start = int(np.sum(train_nums_tmp[0:i_class + 1])) i_end = i_start + train_nums_tmp[i_class + 1] G_class = G_app[i_start:i_end] gm_unnorm_class = gm_unnorm_trial[i_start:i_end, i_start:i_end] gm_class = normalize_gram_matrix(gm_unnorm_class.copy()) k_dis_list = [] for idx in range(len(G_class)): k_dis_list.append( compute_k_dis(idx, range(0, len(G_class)), [1 / len(G_class)] * len(G_class), gm_class, withterm3=False)) idx_k_dis_min = np.argmin(k_dis_list) best_graphs.append(G_class[idx_k_dis_min].copy()) # perform k-nn. print('\nperforming k-nn...') # compute dis_mat between medians. dataset = dataset_all.copy() dataset.load_graphs([g.copy() for g in best_graphs], targets=None) gm_app_unnorm, _ = __compute_gram_matrix_unnorm( dataset, kernel_options.copy()) # compute the entire Gram matrix. graph_kernel = __get_graph_kernel(dataset.copy(), kernel_options.copy()) kernels_to_best_graphs = [] for g in best_graphs: kernels_to_best_graph, _ = graph_kernel.compute( g, G_test, **kernel_options.copy()) kernels_to_best_graphs.append(kernels_to_best_graph) kernels_to_best_graphs = np.array(kernels_to_best_graphs) gm_all = np.concatenate((gm_app_unnorm, kernels_to_best_graphs), axis=1) gm_all = np.concatenate( (gm_all, np.concatenate( (kernels_to_best_graphs.T, gram_matrix_unnorm[test_index, :][:, test_index].copy()), axis=1)), axis=0) gm_all = normalize_gram_matrix(gm_all.copy()) dis_mat, _, _, _ = compute_distance_matrix(gm_all) N = len(best_graphs) d_app = dis_mat[range(N), :][:, range(N)].copy() d_test = np.zeros((N, len(test_index))) for i in range(N): for j in range(len(test_index)): d_test[i, j] = dis_mat[i, j] accuracies.append( knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples)) # write result detail. if save_results: f_detail = open(dir_save + fn_output_detail, 'a') print('writing results to files...') csv.writer(f_detail).writerow([ ds_name, kernel_options['name'], train_examples, trial, knn_options['n_neighbors'], len(gm_all), knn_options['test_size'], accuracies[-1][0], accuracies[-1][1] ]) f_detail.close() results = {} results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0) results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1) results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0) results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1) # write result summary for each letter. if save_results: f_summary = open(dir_save + fn_output_summary, 'a') csv.writer(f_summary).writerow([ ds_name, kernel_options['name'], train_examples, knn_options['n_neighbors'], knn_options['test_size'], results['ave_perf_train'], results['ave_perf_test'], results['std_perf_train'], results['std_perf_test'], time_precompute_gm ]) f_summary.close()
def __kernel_knn_cv_trainset(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary): y_all = dataset_all.targets n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[ 'n_splits'], knn_options['test_size'] # compute distance matrix. gram_matrix = normalize_gram_matrix(gram_matrix_unnorm.copy()) dis_mat, _, _, _ = compute_distance_matrix(gram_matrix) # get shuffles. train_indices, test_indices, _, _ = __get_shuffles(y_all, n_splits, test_size) accuracies = [] for trial in range(len(train_indices)): print('\ntrial =', trial) train_index = train_indices[trial] test_index = test_indices[trial] y_app = [y_all[i] for i in train_index] y_test = [y_all[i] for i in test_index] N = len(train_index) d_app = dis_mat[train_index, :][:, train_index].copy() d_test = np.zeros((N, len(test_index))) for i in range(N): for j in range(len(test_index)): d_test[i, j] = dis_mat[train_index[i], test_index[j]] accuracies.append( knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples)) # write result detail. if save_results: print('writing results to files...') f_detail = open(dir_save + fn_output_detail, 'a') csv.writer(f_detail).writerow([ ds_name, kernel_options['name'], train_examples, trial, knn_options['n_neighbors'], len(gram_matrix), knn_options['test_size'], accuracies[-1][0], accuracies[-1][1] ]) f_detail.close() results = {} results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0) results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1) results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0) results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1) # write result summary for each letter. if save_results: f_summary = open(dir_save + fn_output_summary, 'a') csv.writer(f_summary).writerow([ ds_name, kernel_options['name'], train_examples, knn_options['n_neighbors'], knn_options['test_size'], results['ave_perf_train'], results['ave_perf_test'], results['std_perf_train'], results['std_perf_test'], time_precompute_gm ]) f_summary.close()
def xp_simple_preimage(): import numpy as np """**1. Get dataset.**""" from gklearn.utils import Dataset, split_dataset_by_target # Predefined dataset name, use dataset "MAO". ds_name = 'MAO' # The node/edge labels that will not be used in the computation. irrelevant_labels = { 'node_attrs': ['x', 'y', 'z'], 'edge_labels': ['bond_stereo'] } # Initialize a Dataset. dataset_all = Dataset() # Load predefined dataset "MAO". dataset_all.load_predefined_dataset(ds_name) # Remove irrelevant labels. dataset_all.remove_labels(**irrelevant_labels) # Split the whole dataset according to the classification targets. datasets = split_dataset_by_target(dataset_all) # Get the first class of graphs, whose median preimage will be computed. dataset = datasets[0] len(dataset.graphs) """**2. Set parameters.**""" import multiprocessing # Parameters for MedianPreimageGenerator (our method). mpg_options = { 'fit_method': 'k-graphs', # how to fit edit costs. "k-graphs" means use all graphs in median set when fitting. 'init_ecc': [4, 4, 2, 1, 1, 1], # initial edit costs. 'ds_name': ds_name, # name of the dataset. 'parallel': True, # whether the parallel scheme is to be used. 'time_limit_in_sec': 0, # maximum time limit to compute the preimage. If set to 0 then no limit. 'max_itrs': 10, # maximum iteration limit to optimize edit costs. If set to 0 then no limit. 'max_itrs_without_update': 3, # If the times that edit costs is not update is more than this number, then the optimization stops. 'epsilon_residual': 0.01, # In optimization, the residual is only considered changed if the change is bigger than this number. 'epsilon_ec': 0.1, # In optimization, the edit costs are only considered changed if the changes are bigger than this number. 'verbose': 2 # whether to print out results. } # Parameters for graph kernel computation. kernel_options = { 'name': 'PathUpToH', # use path kernel up to length h. 'depth': 9, 'k_func': 'MinMax', 'compute_method': 'trie', 'parallel': 'imap_unordered', # or None 'n_jobs': multiprocessing.cpu_count(), 'normalize': True, # whether to use normalized Gram matrix to optimize edit costs. 'verbose': 2 # whether to print out results. } # Parameters for GED computation. ged_options = { 'method': 'IPFP', # use IPFP huristic. 'initialization_method': 'RANDOM', # or 'NODE', etc. 'initial_solutions': 10, # when bigger than 1, then the method is considered mIPFP. 'edit_cost': 'CONSTANT', # use CONSTANT cost. 'attr_distance': 'euclidean', # the distance between non-symbolic node/edge labels is computed by euclidean distance. 'ratio_runs_from_initial_solutions': 1, 'threads': multiprocessing.cpu_count( ), # parallel threads. Do not work if mpg_options['parallel'] = False. 'init_option': 'EAGER_WITHOUT_SHUFFLED_COPIES' } # Parameters for MedianGraphEstimator (Boria's method). mge_options = { 'init_type': 'MEDOID', # how to initial median (compute set-median). "MEDOID" is to use the graph with smallest SOD. 'random_inits': 10, # number of random initialization when 'init_type' = 'RANDOM'. 'time_limit': 600, # maximum time limit to compute the generalized median. If set to 0 then no limit. 'verbose': 2, # whether to print out results. 'refine': False # whether to refine the final SODs or not. } print('done.') """**3. Compute the Gram matrix and distance matrix.**""" from gklearn.utils.utils import get_graph_kernel_by_name # Get a graph kernel instance. graph_kernel = get_graph_kernel_by_name( kernel_options['name'], node_labels=dataset.node_labels, edge_labels=dataset.edge_labels, node_attrs=dataset.node_attrs, edge_attrs=dataset.edge_attrs, ds_infos=dataset.get_dataset_infos(keys=['directed']), kernel_options=kernel_options) # Compute Gram matrix. gram_matrix, run_time = graph_kernel.compute(dataset.graphs, **kernel_options) # Compute distance matrix. from gklearn.utils import compute_distance_matrix dis_mat, _, _, _ = compute_distance_matrix(gram_matrix) print('done.') """**4. Find the candidate graph.**""" from gklearn.preimage.utils import compute_k_dis # Number of the nearest neighbors. k_neighbors = 10 # For each graph G in dataset, compute the distance between its image \Phi(G) and the mean of its neighbors' images. dis_min = np.inf # the minimum distance between possible \Phi(G) and the mean of its neighbors. for idx, G in enumerate(dataset.graphs): # Find the k nearest neighbors of G. dis_list = dis_mat[ idx] # distance between \Phi(G) and image of each graphs. idx_sort = np.argsort( dis_list) # sort distances and get the sorted indices. idx_nearest = idx_sort[1:k_neighbors + 1] # indices of the k-nearest neighbors. dis_k_nearest = [dis_list[i] for i in idx_nearest ] # k-nearest distances, except the 0. G_k_nearest = [dataset.graphs[i] for i in idx_nearest] # k-nearest neighbors. # Compute the distance between \Phi(G) and the mean of its neighbors. dis_tmp = compute_k_dis( idx, # the index of G in Gram matrix. idx_nearest, # the indices of the neighbors [1 / k_neighbors] * k_neighbors, # coefficients for neighbors. gram_matrix, withterm3=False) # Check if the new distance is smallers. if dis_tmp < dis_min: dis_min = dis_tmp G_cand = G G_neighbors = G_k_nearest print('The minimum distance is', dis_min) """**5. Run median preimage generator.**""" from gklearn.preimage import MedianPreimageGenerator # Set the dataset as the k-nearest neighbors. dataset.load_graphs(G_neighbors) # Create median preimage generator instance. mpg = MedianPreimageGenerator() # Add dataset. mpg.dataset = dataset # Set parameters. mpg.set_options(**mpg_options.copy()) mpg.kernel_options = kernel_options.copy() mpg.ged_options = ged_options.copy() mpg.mge_options = mge_options.copy() # Run. mpg.run() """**4. Get results.**""" # Get results. import pprint pp = pprint.PrettyPrinter(indent=4) # pretty print results = mpg.get_results() pp.pprint(results) draw_graph(mpg.set_median) draw_graph(mpg.gen_median) draw_graph(G_cand)