示例#1
0
	def __do_trial(self, g_init, fdgs, term3, trial):
		# add and delete edges.
		gtemp = g_init.copy()
		seed = (trial + int(time.time())) % (2 ** 32 - 1)
		rdm_state = np.random.RandomState(seed=seed)
		# which edges to change.
		# @todo: should we use just half of the adjacency matrix for undirected graphs?
		nb_vpairs = nx.number_of_nodes(g_init) * (nx.number_of_nodes(g_init) - 1)
		# @todo: what if fdgs is bigger than nb_vpairs?
		idx_change = rdm_state.randint(0, high=nb_vpairs, size=(fdgs if 
									   fdgs < nb_vpairs else nb_vpairs))
# 		print(idx_change)
		for item in idx_change:
			node1 = int(item / (nx.number_of_nodes(g_init) - 1))
			node2 = (item - node1 * (nx.number_of_nodes(g_init) - 1))
			if node2 >= node1: # skip the self pair.
				node2 += 1
			# @todo: is the randomness correct?
			if not gtemp.has_edge(node1, node2):
				gtemp.add_edge(node1, node2)
			else:
				gtemp.remove_edge(node1, node2)
				
		# compute new distances.
		kernels_to_gtmp, _ = self._graph_kernel.compute(gtemp, self._dataset.graphs, **self._kernel_options)
		kernel_gtmp, _ = self._graph_kernel.compute(gtemp, gtemp, **self._kernel_options)
		if self._kernel_options['normalize']:
			kernels_to_gtmp = [kernels_to_gtmp[i] / np.sqrt(self.__gram_matrix_unnorm[i, i] * kernel_gtmp) for i in range(len(kernels_to_gtmp))] # normalize 
			kernel_gtmp = 1
		# @todo: not correct kernel value
		gram_with_gtmp = np.concatenate((np.array([kernels_to_gtmp]), np.copy(self._graph_kernel.gram_matrix)), axis=0)
		gram_with_gtmp = np.concatenate((np.array([[kernel_gtmp] + kernels_to_gtmp]).T, gram_with_gtmp), axis=1)
		dnew = compute_k_dis(0, range(1, 1 + len(self._dataset.graphs)), self.__alphas, gram_with_gtmp, term3=term3, withterm3=True)
		
		return gtemp, dnew
def _get_best_graph(Gn, gram_matrix):
	k_dis_list = []
	for idx in range(len(Gn)):
		k_dis_list.append(compute_k_dis(idx, range(0, len(Gn)), [1 / len(Gn)] * len(Gn), gram_matrix, withterm3=False))
	best_index = np.argmin(k_dis_list)
	best_dis = k_dis_list[best_index]
	best_graph = Gn[best_index].copy()
	return best_index, best_dis, best_graph 
示例#3
0
	def __compute_distances_to_true_median(self):		
		# compute distance in kernel space for set median.
		kernels_to_sm, _ = self._graph_kernel.compute(self.__set_median, self._dataset.graphs, **self._kernel_options)
		kernel_sm, _ = self._graph_kernel.compute(self.__set_median, self.__set_median, **self._kernel_options)
		if self._kernel_options['normalize']:
			kernels_to_sm = [kernels_to_sm[i] / np.sqrt(self.__gram_matrix_unnorm[i, i] * kernel_sm) for i in range(len(kernels_to_sm))] # normalize 
			kernel_sm = 1
		# @todo: not correct kernel value
		gram_with_sm = np.concatenate((np.array([kernels_to_sm]), np.copy(self._graph_kernel.gram_matrix)), axis=0)
		gram_with_sm = np.concatenate((np.array([[kernel_sm] + kernels_to_sm]).T, gram_with_sm), axis=1)
		self.__k_dis_set_median = compute_k_dis(0, range(1, 1+len(self._dataset.graphs)), 
										  [1 / len(self._dataset.graphs)] * len(self._dataset.graphs),
										  gram_with_sm, withterm3=False)
		
		# compute distance in kernel space for generalized median.
		kernels_to_gm, _ = self._graph_kernel.compute(self.__gen_median, self._dataset.graphs, **self._kernel_options)
		kernel_gm, _ = self._graph_kernel.compute(self.__gen_median, self.__gen_median, **self._kernel_options)
		if self._kernel_options['normalize']:
			kernels_to_gm = [kernels_to_gm[i] / np.sqrt(self.__gram_matrix_unnorm[i, i] * kernel_gm) for i in range(len(kernels_to_gm))] # normalize
			kernel_gm = 1
		gram_with_gm = np.concatenate((np.array([kernels_to_gm]), np.copy(self._graph_kernel.gram_matrix)), axis=0)
		gram_with_gm = np.concatenate((np.array([[kernel_gm] + kernels_to_gm]).T, gram_with_gm), axis=1)
		self.__k_dis_gen_median = compute_k_dis(0, range(1, 1+len(self._dataset.graphs)), 
										  [1 / len(self._dataset.graphs)] * len(self._dataset.graphs),
										  gram_with_gm, withterm3=False)
				
		# compute distance in kernel space for each graph in median set.
		k_dis_median_set = []
		for idx in range(len(self._dataset.graphs)):
			k_dis_median_set.append(compute_k_dis(idx+1, range(1, 1+len(self._dataset.graphs)), 
								 [1 / len(self._dataset.graphs)] * len(self._dataset.graphs), 
								 gram_with_gm, withterm3=False))
		idx_k_dis_median_set_min = np.argmin(k_dis_median_set)
		self.__k_dis_dataset = k_dis_median_set[idx_k_dis_median_set_min]
		self.__best_from_dataset = self._dataset.graphs[idx_k_dis_median_set_min].copy()
			
		if self._verbose >= 2:
			print()
			print('distance in kernel space for set median:', self.__k_dis_set_median)
			print('distance in kernel space for generalized median:', self.__k_dis_gen_median)
			print('minimum distance in kernel space for each graph in median set:', self.__k_dis_dataset)
			print('distance in kernel space for each graph in median set:', k_dis_median_set)	
示例#4
0
def __kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options,
                            gram_matrix_unnorm, time_precompute_gm,
                            train_examples, save_results, dir_save,
                            fn_output_detail, fn_output_summary):
    Gn = dataset_all.graphs
    y_all = dataset_all.targets
    n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options[
        'n_splits'], knn_options['test_size']

    # get shuffles.
    train_indices, test_indices, train_nums, y_app = __get_shuffles(
        y_all, n_splits, test_size)

    accuracies = []
    for trial in range(len(train_indices)):
        print('\ntrial =', trial)

        train_index = train_indices[trial]
        test_index = test_indices[trial]
        G_app = [Gn[i] for i in train_index]
        G_test = [Gn[i] for i in test_index]
        y_test = [y_all[i] for i in test_index]
        gm_unnorm_trial = gram_matrix_unnorm[
            train_index, :][:, train_index].copy()

        # get best graph from trainset according to distance in kernel space for each class.
        best_graphs = []
        train_nums_tmp = [0] + train_nums
        print('\ngetting best graph from trainset for each class...')
        for i_class in range(len(train_nums_tmp) - 1):
            print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes.')
            i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
            i_end = i_start + train_nums_tmp[i_class + 1]
            G_class = G_app[i_start:i_end]
            gm_unnorm_class = gm_unnorm_trial[i_start:i_end, i_start:i_end]
            gm_class = normalize_gram_matrix(gm_unnorm_class.copy())

            k_dis_list = []
            for idx in range(len(G_class)):
                k_dis_list.append(
                    compute_k_dis(idx,
                                  range(0, len(G_class)),
                                  [1 / len(G_class)] * len(G_class),
                                  gm_class,
                                  withterm3=False))
            idx_k_dis_min = np.argmin(k_dis_list)
            best_graphs.append(G_class[idx_k_dis_min].copy())

        # perform k-nn.
        print('\nperforming k-nn...')
        # compute dis_mat between medians.
        dataset = dataset_all.copy()
        dataset.load_graphs([g.copy() for g in best_graphs], targets=None)
        gm_app_unnorm, _ = __compute_gram_matrix_unnorm(
            dataset, kernel_options.copy())

        # compute the entire Gram matrix.
        graph_kernel = __get_graph_kernel(dataset.copy(),
                                          kernel_options.copy())
        kernels_to_best_graphs = []
        for g in best_graphs:
            kernels_to_best_graph, _ = graph_kernel.compute(
                g, G_test, **kernel_options.copy())
            kernels_to_best_graphs.append(kernels_to_best_graph)
        kernels_to_best_graphs = np.array(kernels_to_best_graphs)
        gm_all = np.concatenate((gm_app_unnorm, kernels_to_best_graphs),
                                axis=1)
        gm_all = np.concatenate(
            (gm_all,
             np.concatenate(
                 (kernels_to_best_graphs.T,
                  gram_matrix_unnorm[test_index, :][:, test_index].copy()),
                 axis=1)),
            axis=0)

        gm_all = normalize_gram_matrix(gm_all.copy())
        dis_mat, _, _, _ = compute_distance_matrix(gm_all)

        N = len(best_graphs)

        d_app = dis_mat[range(N), :][:, range(N)].copy()

        d_test = np.zeros((N, len(test_index)))
        for i in range(N):
            for j in range(len(test_index)):
                d_test[i, j] = dis_mat[i, j]

        accuracies.append(
            knn_classification(d_app,
                               d_test,
                               y_app,
                               y_test,
                               n_neighbors,
                               verbose=True,
                               text=train_examples))

        # write result detail.
        if save_results:
            f_detail = open(dir_save + fn_output_detail, 'a')
            print('writing results to files...')
            csv.writer(f_detail).writerow([
                ds_name, kernel_options['name'], train_examples, trial,
                knn_options['n_neighbors'],
                len(gm_all), knn_options['test_size'], accuracies[-1][0],
                accuracies[-1][1]
            ])
            f_detail.close()

    results = {}
    results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
    results['std_perf_train'] = np.std([i[0] for i in accuracies],
                                       axis=0,
                                       ddof=1)
    results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
    results['std_perf_test'] = np.std([i[1] for i in accuracies],
                                      axis=0,
                                      ddof=1)

    # write result summary for each letter.
    if save_results:
        f_summary = open(dir_save + fn_output_summary, 'a')
        csv.writer(f_summary).writerow([
            ds_name, kernel_options['name'], train_examples,
            knn_options['n_neighbors'], knn_options['test_size'],
            results['ave_perf_train'], results['ave_perf_test'],
            results['std_perf_train'], results['std_perf_test'],
            time_precompute_gm
        ])
        f_summary.close()
示例#5
0
	def run(self):
		self._graph_kernel = get_graph_kernel_by_name(self._kernel_options['name'], 
						  node_labels=self._dataset.node_labels,
						  edge_labels=self._dataset.edge_labels, 
						  node_attrs=self._dataset.node_attrs,
						  edge_attrs=self._dataset.edge_attrs,
						  ds_infos=self._dataset.get_dataset_infos(keys=['directed']),
						  kernel_options=self._kernel_options)
		
		# record start time.
		start = time.time()
		
		# 1. precompute gram matrix.
		if self.__gram_matrix_unnorm is None:
			gram_matrix, run_time = self._graph_kernel.compute(self._dataset.graphs, **self._kernel_options)
			self.__gram_matrix_unnorm = self._graph_kernel.gram_matrix_unnorm
			end_precompute_gm = time.time()
			self.__runtime_precompute_gm = end_precompute_gm - start
		else:
			if self.__runtime_precompute_gm is None:
				raise Exception('Parameter "runtime_precompute_gm" must be given when using pre-computed Gram matrix.')
			self._graph_kernel.gram_matrix_unnorm = self.__gram_matrix_unnorm
			if self._kernel_options['normalize']:
				self._graph_kernel.gram_matrix = self._graph_kernel.normalize_gm(np.copy(self.__gram_matrix_unnorm))
			else:
				self._graph_kernel.gram_matrix = np.copy(self.__gram_matrix_unnorm)
			end_precompute_gm = time.time()
			start -= self.__runtime_precompute_gm
			
		# 2. compute k nearest neighbors of phi in D_N.
		if self._verbose >= 2:
			print('\nstart computing k nearest neighbors of phi in D_N...\n')
		D_N = self._dataset.graphs
		if self.__alphas is None:
			self.__alphas = [1 / len(D_N)] * len(D_N)
		k_dis_list = [] # distance between g_star and each graph.
		term3 = 0
		for i1, a1 in enumerate(self.__alphas):
			for i2, a2 in enumerate(self.__alphas):
				term3 += a1 * a2 * self._graph_kernel.gram_matrix[i1, i2]
		for idx in range(len(D_N)):
			k_dis_list.append(compute_k_dis(idx, range(0, len(D_N)), self.__alphas, self._graph_kernel.gram_matrix, term3=term3, withterm3=True))
			
		# sort.
		sort_idx = np.argsort(k_dis_list)
		dis_gs = [k_dis_list[idis] for idis in sort_idx[0:self.__k]] # the k shortest distances.
		nb_best = len(np.argwhere(dis_gs == dis_gs[0]).flatten().tolist())
		g0hat_list = [D_N[idx].copy() for idx in sort_idx[0:nb_best]] # the nearest neighbors of phi in D_N
		self.__best_from_dataset = g0hat_list[0] # get the first best graph if there are muitlple.
		self.__k_dis_dataset = dis_gs[0]
		
		if self.__k_dis_dataset == 0: # get the exact pre-image.
			end_generate_preimage = time.time()
			self.__runtime_generate_preimage = end_generate_preimage - end_precompute_gm
			self.__runtime_total = end_generate_preimage - start
			self.__preimage = self.__best_from_dataset.copy()	
			self.__k_dis_preimage = self.__k_dis_dataset
			if self._verbose:
				print()
				print('=============================================================================')
				print('The exact pre-image is found from the input dataset.')
				print('-----------------------------------------------------------------------------')
				print('Distance in kernel space for the best graph from dataset and for preimage:', self.__k_dis_dataset)
				print('Time to pre-compute Gram matrix:', self.__runtime_precompute_gm)
				print('Time to generate pre-images:', self.__runtime_generate_preimage)
				print('Total time:', self.__runtime_total)
				print('=============================================================================')
				print()
			return
		
		dhat = dis_gs[0] # the nearest distance
		Gk = [D_N[ig].copy() for ig in sort_idx[0:self.__k]] # the k nearest neighbors
		Gs_nearest = [nx.convert_node_labels_to_integers(g) for g in Gk] # [g.copy() for g in Gk]
		
		# 3. start iterations.
		if self._verbose >= 2:
			print('starting iterations...')
		gihat_list = []
		dihat_list = []
		r = 0
		dis_of_each_itr = [dhat]
		if self.__parallel:
			self._kernel_options['parallel'] = None
		self.__itrs = 0
		self.__num_updates = 0
		timer = Timer(self.__time_limit_in_sec)
		while not self.__termination_criterion_met(timer, self.__itrs, r):
			print('\n- r =', r)
			found = False
			dis_bests = dis_gs + dihat_list
			
			# compute numbers of edges to be inserted/deleted.
			# @todo what if the log is negetive? how to choose alpha (scalar)?
			fdgs_list = np.array(dis_bests)
			if np.min(fdgs_list) < 1: # in case the log is negetive.
				fdgs_list /= np.min(fdgs_list)
			fdgs_list = [int(item) for item in np.ceil(np.log(fdgs_list))]
			if np.min(fdgs_list) < 1: # in case the log is smaller than 1.
				fdgs_list = np.array(fdgs_list) + 1
			# expand the number of modifications to increase the possiblity.
			nb_vpairs_list = [nx.number_of_nodes(g) * (nx.number_of_nodes(g) - 1) for g in (Gs_nearest + gihat_list)]
			nb_vpairs_min = np.min(nb_vpairs_list)
			idx_fdgs_max = np.argmax(fdgs_list)
			fdgs_max_old = fdgs_list[idx_fdgs_max]
			fdgs_max = fdgs_max_old
			nb_modif = 1
			for idx, nb in enumerate(range(nb_vpairs_min, nb_vpairs_min - fdgs_max, -1)):
				nb_modif *= nb / (fdgs_max - idx)
			while fdgs_max < nb_vpairs_min and nb_modif < self.__l:
				fdgs_max += 1
				nb_modif *= (nb_vpairs_min - fdgs_max + 1) / fdgs_max
			nb_increase = int(fdgs_max - fdgs_max_old)
			if nb_increase > 0:
				fdgs_list += 1
				
				
			for ig, gs in enumerate(Gs_nearest + gihat_list):
				if self._verbose >= 2:
					print('-- computing', ig + 1, 'graphs out of', len(Gs_nearest) + len(gihat_list))
				gnew, dhat, found = self.__generate_l_graphs(gs, fdgs_list[ig], dhat, ig, found, term3)
						  
			if found:
				r = 0
				gihat_list = [gnew]
				dihat_list = [dhat]
			else:
				r += 1
				
			dis_of_each_itr.append(dhat)
			self.__itrs += 1
			if self._verbose >= 2:
				print('Total number of iterations is', self.__itrs, '.')
				print('The preimage is updated', self.__num_updates, 'times.')
				print('The shortest distances for previous iterations are', dis_of_each_itr, '.')
			
			
		# get results and print.
		end_generate_preimage = time.time()
		self.__runtime_generate_preimage = end_generate_preimage - end_precompute_gm
		self.__runtime_total = end_generate_preimage - start
		self.__preimage = (g0hat_list[0] if len(gihat_list) == 0 else gihat_list[0])
		self.__k_dis_preimage = dhat
		if self._verbose:
			print()
			print('=============================================================================')
			print('Finished generation of preimages.')
			print('-----------------------------------------------------------------------------')
			print('Distance in kernel space for the best graph from dataset:', self.__k_dis_dataset)
			print('Distance in kernel space for the preimage:', self.__k_dis_preimage)
			print('Total number of iterations for optimizing:', self.__itrs)
			print('Total number of updating preimage:', self.__num_updates)
			print('Time to pre-compute Gram matrix:', self.__runtime_precompute_gm)
			print('Time to generate pre-images:', self.__runtime_generate_preimage)
			print('Total time:', self.__runtime_total)
			print('=============================================================================')
			print()	
def xp_simple_preimage():
    import numpy as np
    """**1.   Get dataset.**"""

    from gklearn.utils import Dataset, split_dataset_by_target

    # Predefined dataset name, use dataset "MAO".
    ds_name = 'MAO'
    # The node/edge labels that will not be used in the computation.
    irrelevant_labels = {
        'node_attrs': ['x', 'y', 'z'],
        'edge_labels': ['bond_stereo']
    }

    # Initialize a Dataset.
    dataset_all = Dataset()
    # Load predefined dataset "MAO".
    dataset_all.load_predefined_dataset(ds_name)
    # Remove irrelevant labels.
    dataset_all.remove_labels(**irrelevant_labels)
    # Split the whole dataset according to the classification targets.
    datasets = split_dataset_by_target(dataset_all)
    # Get the first class of graphs, whose median preimage will be computed.
    dataset = datasets[0]
    len(dataset.graphs)
    """**2.  Set parameters.**"""

    import multiprocessing

    # Parameters for MedianPreimageGenerator (our method).
    mpg_options = {
        'fit_method':
        'k-graphs',  # how to fit edit costs. "k-graphs" means use all graphs in median set when fitting.
        'init_ecc': [4, 4, 2, 1, 1, 1],  # initial edit costs.
        'ds_name': ds_name,  # name of the dataset.
        'parallel': True,  # whether the parallel scheme is to be used.
        'time_limit_in_sec':
        0,  # maximum time limit to compute the preimage. If set to 0 then no limit.
        'max_itrs':
        10,  # maximum iteration limit to optimize edit costs. If set to 0 then no limit.
        'max_itrs_without_update':
        3,  # If the times that edit costs is not update is more than this number, then the optimization stops.
        'epsilon_residual':
        0.01,  # In optimization, the residual is only considered changed if the change is bigger than this number.
        'epsilon_ec':
        0.1,  # In optimization, the edit costs are only considered changed if the changes are bigger than this number.
        'verbose': 2  # whether to print out results.
    }
    # Parameters for graph kernel computation.
    kernel_options = {
        'name': 'PathUpToH',  # use path kernel up to length h.
        'depth': 9,
        'k_func': 'MinMax',
        'compute_method': 'trie',
        'parallel': 'imap_unordered',  # or None
        'n_jobs': multiprocessing.cpu_count(),
        'normalize':
        True,  # whether to use normalized Gram matrix to optimize edit costs.
        'verbose': 2  # whether to print out results.
    }
    # Parameters for GED computation.
    ged_options = {
        'method': 'IPFP',  # use IPFP huristic.
        'initialization_method': 'RANDOM',  # or 'NODE', etc.
        'initial_solutions':
        10,  # when bigger than 1, then the method is considered mIPFP.
        'edit_cost': 'CONSTANT',  # use CONSTANT cost.
        'attr_distance':
        'euclidean',  # the distance between non-symbolic node/edge labels is computed by euclidean distance.
        'ratio_runs_from_initial_solutions': 1,
        'threads': multiprocessing.cpu_count(
        ),  # parallel threads. Do not work if mpg_options['parallel'] = False.
        'init_option': 'EAGER_WITHOUT_SHUFFLED_COPIES'
    }
    # Parameters for MedianGraphEstimator (Boria's method).
    mge_options = {
        'init_type':
        'MEDOID',  # how to initial median (compute set-median). "MEDOID" is to use the graph with smallest SOD.
        'random_inits':
        10,  # number of random initialization when 'init_type' = 'RANDOM'.
        'time_limit':
        600,  # maximum time limit to compute the generalized median. If set to 0 then no limit.
        'verbose': 2,  # whether to print out results.
        'refine': False  # whether to refine the final SODs or not.
    }
    print('done.')
    """**3.   Compute the Gram matrix and distance matrix.**"""

    from gklearn.utils.utils import get_graph_kernel_by_name

    # Get a graph kernel instance.
    graph_kernel = get_graph_kernel_by_name(
        kernel_options['name'],
        node_labels=dataset.node_labels,
        edge_labels=dataset.edge_labels,
        node_attrs=dataset.node_attrs,
        edge_attrs=dataset.edge_attrs,
        ds_infos=dataset.get_dataset_infos(keys=['directed']),
        kernel_options=kernel_options)
    # Compute Gram matrix.
    gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
                                                 **kernel_options)

    # Compute distance matrix.
    from gklearn.utils import compute_distance_matrix
    dis_mat, _, _, _ = compute_distance_matrix(gram_matrix)

    print('done.')
    """**4.   Find the candidate graph.**"""

    from gklearn.preimage.utils import compute_k_dis

    # Number of the nearest neighbors.
    k_neighbors = 10

    # For each graph G in dataset, compute the distance between its image \Phi(G) and the mean of its neighbors' images.
    dis_min = np.inf  # the minimum distance between possible \Phi(G) and the mean of its neighbors.
    for idx, G in enumerate(dataset.graphs):
        # Find the k nearest neighbors of G.
        dis_list = dis_mat[
            idx]  # distance between \Phi(G) and image of each graphs.
        idx_sort = np.argsort(
            dis_list)  # sort distances and get the sorted indices.
        idx_nearest = idx_sort[1:k_neighbors +
                               1]  # indices of the k-nearest neighbors.
        dis_k_nearest = [dis_list[i] for i in idx_nearest
                         ]  # k-nearest distances, except the 0.
        G_k_nearest = [dataset.graphs[i]
                       for i in idx_nearest]  # k-nearest neighbors.

        # Compute the distance between \Phi(G) and the mean of its neighbors.
        dis_tmp = compute_k_dis(
            idx,  # the index of G in Gram matrix.
            idx_nearest,  # the indices of the neighbors
            [1 / k_neighbors] * k_neighbors,  # coefficients for neighbors. 
            gram_matrix,
            withterm3=False)
        # Check if the new distance is smallers.
        if dis_tmp < dis_min:
            dis_min = dis_tmp
            G_cand = G
            G_neighbors = G_k_nearest

    print('The minimum distance is', dis_min)
    """**5.   Run median preimage generator.**"""

    from gklearn.preimage import MedianPreimageGenerator

    # Set the dataset as the k-nearest neighbors.
    dataset.load_graphs(G_neighbors)

    # Create median preimage generator instance.
    mpg = MedianPreimageGenerator()
    # Add dataset.
    mpg.dataset = dataset
    # Set parameters.
    mpg.set_options(**mpg_options.copy())
    mpg.kernel_options = kernel_options.copy()
    mpg.ged_options = ged_options.copy()
    mpg.mge_options = mge_options.copy()
    # Run.
    mpg.run()
    """**4. Get results.**"""

    # Get results.
    import pprint
    pp = pprint.PrettyPrinter(indent=4)  # pretty print
    results = mpg.get_results()
    pp.pprint(results)

    draw_graph(mpg.set_median)
    draw_graph(mpg.gen_median)
    draw_graph(G_cand)
示例#7
0
# Compute Gram matrix.
gram_matrix, run_time = graph_kernel.compute(
    dataset.graphs,
    parallel='imap_unordered',  # or None.
    n_jobs=multiprocessing.cpu_count(),  # number of parallel jobs.
    normalize=True,  # whether to return normalized Gram matrix.
    verbose=2  # whether to print out results.
)
"""**3.   Compute distance in kernel space.**

Given a dataset $\mathcal{G}_N$, compute the distance in kernel space between the image of $G_1 \in \mathcal{G}_N$ and the mean of images of $\mathcal{G}_k \subset \mathcal{G}_N$.
"""

from gklearn.preimage.utils import compute_k_dis

# Index of $G_1$.
idx_1 = 10
# Indices of graphs in $\mathcal{G}_k$.
idx_graphs = range(0, 10)

# Compute the distance in kernel space.
dis_k = compute_k_dis(
    idx_1,
    idx_graphs,
    [1 / len(idx_graphs)] * len(
        idx_graphs
    ),  # weights for images of graphs in $\mathcal{G}_k$; all equal when computing the mean.
    gram_matrix,  # gram matrix of al graphs.
    withterm3=False)
print(dis_k)