def _call_one_graph(self, input): x = input[0] self.laplacians = input[1] num_features_nonzero = input[2] # dropout if self.sparse_inputs: x = sparse_dropout(x, 1 - self.dropout, num_features_nonzero) else: x = tf.nn.dropout(x, 1 - self.dropout) # convolve support_list = [] for i in range(len(self.laplacians)): if not self.featureless: pre_sup = dot(x, self.vars['weights_' + str(i)], sparse=self.sparse_inputs) else: pre_sup = self.vars['weights_' + str(i)] support = dot(self.laplacians[i], pre_sup, sparse=True) support_list.append(support) output = tf.add_n(support_list) # bias if self.bias: output += self.vars['bias'] return self.act(output)
def _gcn(self, input): x = input[0] self.laplacians = input[1] num_features_nonzero = input[2] # dropout if self.sparse_inputs: x = sparse_dropout(x, 1 - self.dropout, num_features_nonzero) else: x = tf.nn.dropout(x, 1 - self.dropout) # convolve support_list = [] for i in range(len(self.laplacians)): if not self.featureless: pre_sup = dot(x, self.vars['weights_' + str(i)], sparse=self.sparse_inputs) else: pre_sup = self.vars['weights_' + str(i)] support = dot(self.laplacians[i], pre_sup, sparse=True) support_list.append(support) output = tf.add_n(support_list) # bias if self.bias: output += self.vars['bias'] # normalize if FLAGS.node_embs_norm: output = tf.nn.l2_normalize(output, axis=1) # along each row return self.act(output)
def _gmn(self, g1, g2): if logging_enabled == True: print("- Entered layers::GraphConvolution::_gmn Private Method") g1x = g1[0] g1_edge_index = self._to_dense(tf.sparse_transpose(g1[3])) incidence_mat1 = tf.cast(g1[4], tf.float32) g2x = g2[0] g2_edge_index = self._to_dense(tf.sparse_transpose(g2[3])) incidence_mat2 = tf.cast(g2[4], tf.float32) if self.sparse_inputs: g1x = self._to_dense(g1x) g2x = self._to_dense(g2x) # tf.concat([tf.gather(g1_edge_index, tf.constant(0)), tf.gather(g2_edge_index, tf.constant(0))], 1) row1 = tf.gather(g1_edge_index, tf.constant(0)) # tf.concat([tf.gather(g1_edge_index, tf.constant(1)), tf.gather(g2_edge_index, tf.constant(1))], 1) col1 = tf.gather(g1_edge_index, tf.constant(1)) row2 = tf.gather(g2_edge_index, tf.constant(0)) col2 = tf.gather(g2_edge_index, tf.constant(1)) h1_i = tf.gather(g1x, row1) h1_j = tf.gather(g1x, col1) h2_i = tf.gather(g2x, row2) h2_j = tf.gather(g2x, col2) m1 = tf.concat([h1_i, h1_j], 1) m2 = tf.concat([h2_i, h2_j], 1) for l in self.f_message: m1 = l(m1) m2 = l(m2) m1_sum = dot(incidence_mat1, m1, sparse=True) m2_sum = dot(incidence_mat2, m2, sparse=True) u1_sum = self._gmn_f_match(g1x, g2x) u2_sum = self._gmn_f_match(g2x, g1x) h1_prime = tf.concat([g1x, m1_sum, u1_sum], axis=1) h2_prime = tf.concat([g2x, m2_sum, u2_sum], axis=1) for l in self.f_node: h1_prime = l(h1_prime) h2_prime = l(h2_prime) return (h1_prime, h2_prime)
def _gcn(self, input): if logging_enabled == True: print("- Entered layers::GraphConvolution::_gcn Private Method") x = input[0] self.laplacians = input[1] num_features_nonzero = input[2] # dropout if self.sparse_inputs: x = sparse_dropout(x, 1 - self.dropout, num_features_nonzero) else: x = tf.nn.dropout(x, rate=ec.dropout_rate) # convolve support_list = [] for i in range(len(self.laplacians)): if not self.featureless: pre_sup = dot(x, self.vars['weights_' + str(i)], sparse=self.sparse_inputs) else: pre_sup = self.vars['weights_' + str(i)] support = dot(self.laplacians[i], pre_sup, sparse=True) support_list.append(support) output = tf.add_n(support_list) # bias if self.bias: output += self.vars['bias'] # normalize if ec.node_embs_norm: output = tf.nn.l2_normalize(output, axis=1) # along each row return self.act(output)
def _call_one_mat(self, inputs): x = inputs # dropout x = tf.nn.dropout(x, 1 - self.dropout) # transform output = dot(x, self.vars['weights']) # bias if self.bias: output += self.vars['bias'] return self.act(output)
def _call_one_mat(self, inputs): if logging_enabled == True: print("- Entered layers::Layer::_call_one_mat Private Method") x = inputs # dropout x = tf.nn.dropout(x, rate=ec.dropout_rate) # transform output = dot(x, self.vars['weights']) # bias if self.bias: output += self.vars['bias'] return self.act(output)