def _compute(self, data_1, data_2): data_1 = basic.graphs_to_adjacency_lists(data_1) data_2 = basic.graphs_to_adjacency_lists(data_2) d1 = np.zeros((data_1.shape[0], _number_of_graphlets(self.k))) d2 = np.zeros((data_2.shape[0], _number_of_graphlets(self.k))) for i, g in enumerate(data_1): d1[i] = _count_graphlets(g, self.k, self.graphlet_array) for i, g in enumerate(data_2): d2[i] = _count_graphlets(g, self.k, self.graphlet_array) return d1.dot(d2.T)
def _compute(self, data_1, data_2): ams_1 = basic.graphs_to_adjacency_lists(data_1) ams_2 = basic.graphs_to_adjacency_lists(data_2) sp_1, max1 = _apply_floyd_warshall(np.array(ams_1)) sp_2, max2 = _apply_floyd_warshall(np.array(ams_2)) maxpath = max(max1, max2) if not self.labeled: accum_list_1 = self._create_accum_list(sp_1, maxpath) accum_list_2 = self._create_accum_list(sp_2, maxpath) else: labels_1, labels_2, numlabels = basic.relabel( [G.node_labels for G in data_1], [G.node_labels for G in data_2]) accum_list_1 = self._create_accum_list_labeled(sp_1, maxpath, labels_1, numlabels) accum_list_2 = self._create_accum_list_labeled(sp_2, maxpath, labels_2, numlabels) return np.asarray(accum_list_1.dot(accum_list_2.T).todense())
def _compute(self, data_1, data_2): ams_1 = basic.graphs_to_adjacency_lists(data_1) ams_2 = basic.graphs_to_adjacency_lists(data_2) sp_1, max1 = _apply_floyd_warshall(np.array(ams_1)) sp_2, max2 = _apply_floyd_warshall(np.array(ams_2)) maxpath = max(max1, max2) if not self.labeled: accum_list_1 = self._create_accum_list(sp_1, maxpath) accum_list_2 = self._create_accum_list(sp_2, maxpath) else: labels_1, labels_2, numlabels = basic.relabel( [G.node_labels for G in data_1], [G.node_labels for G in data_2]) accum_list_1 = self._create_accum_list_labeled( sp_1, maxpath, labels_1, numlabels) accum_list_2 = self._create_accum_list_labeled( sp_2, maxpath, labels_2, numlabels) return np.asarray(accum_list_1.dot(accum_list_2.T).todense())
def _compute(self, data_1, data_2): self.is_training = False if data_1 is data_2: self.is_training = True self.res = np.zeros((len(data_1), len(data_2))) data_1 = basic.graphs_to_adjacency_lists(data_1) data_2 = data_1 if self.is_training else basic.graphs_to_adjacency_lists( data_2) data_1, max1 = self._apply_floyd_warshall(np.array(data_1)) data_2, max2 = ( data_1, max1) if self.is_training else self._apply_floyd_warshall( np.array(data_2)) divided = 20 jump_step_i = int(len(data_1) / divided) jump_step_j = int(len(data_2) / divided) threads = [] for i in range(0, divided): for j in range(0, divided): i_from = i * jump_step_i j_from = j * jump_step_j i_to = len(data_1) if i == (divided - 1) else i_from + jump_step_i j_to = len(data_2) if j == (divided - 1) else j_from + jump_step_j thread = self.CompareThread(i + j, data_1, data_2, i_from, i_to, j_from, j_to, self) thread.start() threads.append(thread) for t in threads: t.join() return self.res
def _compute(self, data_1, data_2): data_1 = basic.graphs_to_adjacency_lists(data_1) data_2 = basic.graphs_to_adjacency_lists(data_2) res = np.zeros((len(data_1), len(data_2))) N = len(data_1) * len(data_2) for i, graph1 in enumerate(data_1): for j, graph2 in enumerate(data_2): # norm1, norm2 - normalized adjacency matrixes norm1 = _norm(graph1) norm2 = _norm(graph2) # if graph is unweighted, W_prod = kron(a_norm(g1)*a_norm(g2)) w_prod = kron(lil_matrix(norm1), lil_matrix(norm2)) starting_prob = np.ones(w_prod.shape[0]) / (w_prod.shape[0]) stop_prob = starting_prob # first solve (I - lambda * W_prod) * x = p_prod A = identity(w_prod.shape[0]) - (w_prod * self._lmb) x = lsqr(A, starting_prob) res[i, j] = stop_prob.T.dot(x[0]) # print float(len(data_2)*i + j)/float(N), "%" return res
def _compute(self, data_1, data_2): self.is_training = False if data_1 is data_2: self.is_training = True data_1 = basic.graphs_to_adjacency_lists(data_1) data_2 = data_1 if self.is_training else basic.graphs_to_adjacency_lists( data_2) self.res = np.zeros((len(data_1), len(data_2))) # for i, graph1 in enumerate(data_1): # for j, graph2 in enumerate(data_2): # self._calculate(graph1, graph2, i, j) devided = 20 jump_step_i = int(len(data_1) / devided) jump_step_j = int(len(data_2) / devided) threads = [] for i in range(0, devided): for j in range(0, devided): i_from = i * jump_step_i j_from = j * jump_step_j i_to = len(data_1) if i == (devided - 1) else i_from + jump_step_i j_to = len(data_2) if j == (devided - 1) else j_from + jump_step_j thread = self.MyThread(i + j, data_1, data_2, i_from, i_to, j_from, j_to, self) thread.start() threads.append(thread) for t in threads: t.join() return self.res
def _compute(self, data_1, data_2): self.is_training = False if data_1 is data_2: self.is_training = True data_1 = basic.graphs_to_adjacency_lists(data_1) data_2 = data_1 if self.is_training else basic.graphs_to_adjacency_lists( data_2) sp_1, max1 = self._apply_floyd_warshall(np.array(data_1)) sp_2, max2 = ( sp_1, max1) if self.is_training else self._apply_floyd_warshall( np.array(data_2)) max_path = max(max1, max2) accum_list_1 = self._create_accum_list(sp_1, max_path) accum_list_2 = accum_list_1 if self.is_training else self._create_accum_list( sp_2, max_path) return np.asarray(accum_list_1.dot(accum_list_2.T).todense())