def call(self, inputs): batch_size = inputs.shape[0] neighbors = euler_ops.sample_neighbor(inputs, [self.edge_type], self.nb_num)[0] node_feats = euler_ops.get_dense_feature(tf.reshape(inputs, [-1]), [self.feature_idx], [self.feature_dim])[0] neighbor_feats = euler_ops.get_dense_feature( tf.reshape(neighbors, [-1]), [self.feature_idx], [self.feature_dim])[0] node_feats = tf.reshape(node_feats, [batch_size, 1, self.feature_dim]) neighbor_feats = tf.reshape( neighbor_feats, [batch_size, self.nb_num, self.feature_dim]) seq = tf.concat([node_feats, neighbor_feats], 1) #[bz,nb+1,fdim] hidden = [] for i in range(0, self.head_num): #hidden_val = self.att_head_v2(tf.reshape(inputs,[batch_size,1]),neighbors) hidden_val = self.att_head(seq, self.hidden_dim, tf.nn.elu) print('hidden shape', hidden_val.shape) hidden_val = tf.reshape( hidden_val, [batch_size, self.nb_num + 1, self.hidden_dim]) hidden.append(hidden_val) h_1 = tf.concat(hidden, -1) out = [] for i in range(0, self.head_num): out_val = self.att_head(h_1, self.out_dim, tf.nn.elu) out_val = tf.reshape(out_val, [batch_size, self.nb_num + 1, self.out_dim]) out.append(out_val) out = tf.add_n(out) / self.head_num out = tf.reshape(out, [batch_size, self.nb_num + 1, self.out_dim]) out = tf.slice(out, [0, 0, 0], [batch_size, 1, self.out_dim]) print('out shape', out.shape) return tf.reshape(out, [batch_size, self.out_dim])
def call(self, inputs): batch_size = tf.shape(inputs)[0] neighbors = euler_ops.sample_neighbor( inputs, self.edge_type, self.nb_num)[0] node_feats = euler_ops.get_dense_feature( tf.reshape(inputs, [-1]), [self.feature_idx], [self.feature_dim])[0] neighbor_feats = euler_ops.get_dense_feature( tf.reshape(neighbors, [-1]), [self.feature_idx], [self.feature_dim])[0] node_feats = tf.reshape(node_feats, [batch_size, 1, self.feature_dim]) neighbor_feats = tf.reshape( neighbor_feats, [batch_size, self.nb_num, self.feature_dim]) nbs = tf.concat([node_feats, neighbor_feats], 1) topk, _ = tf.nn.top_k(tf.transpose(neighbor_feats, [0, 2, 1]), k=self.k) topk = tf.transpose(topk, [0, 2, 1]) topk = tf.concat([node_feats, topk], 1) hidden = tf.layers.conv1d(topk, self.hidden_dim, self.k // 2 + 1, use_bias=True) out = tf.layers.conv1d(hidden, self.out_dim, self.k // 2 + 1, use_bias=True) out = tf.slice(out, [0, 0, 0], [batch_size, 1, self.out_dim]) return tf.reshape(out, [batch_size, self.out_dim])
def to_sample(self, inputs, edge_type): batch_size = tf.size(inputs) src = tf.expand_dims(inputs, -1) pos = euler_ops.sample_neighbor(inputs, edge_type, 1, self.max_id + 1)[0] negs = euler_ops.sample_node(batch_size * self.num_negs, self.node_type) negs = tf.reshape(negs, [batch_size, self.num_negs]) return src, pos, negs
def sample_sim_cor_fanout(nodes, edge_types, counts, default_node=-1): neighbors_list = [tf.reshape(nodes, [-1])] for hop_edge_types, count in zip(edge_types, counts): sim_edge_type = cor_edge_type = None if len(hop_edge_types) == 2: sim_edge_type, cor_edge_type = hop_edge_types elif len(hop_edge_types) == 1: sim_edge_type = cor_edge_type = hop_edge_types[0] sim_neighbors, _, _ = euler_ops.sample_neighbor( neighbors_list[-1], [sim_edge_type], count, default_node=default_node) cor_neighbors, _, _ = euler_ops.sample_neighbor( neighbors_list[-1], [cor_edge_type], count, default_node=default_node) sim_neighbors = tf.reshape(sim_neighbors, [-1]) cor_neighbors = tf.reshape(cor_neighbors, [-1]) neighbors = tf.concat([sim_neighbors, cor_neighbors], axis=-1) neighbors_list.append(neighbors) return [neighbors_list]
def sample_patterned_metapaths(nodes, patterns, count_per_path, default_node=-1): all_neighbors = [] for meta_pattern in patterns: pattern_neighbors = nodes last_neighbors = tf.reshape(nodes, [-1]) counts = [int(count_per_path // len(meta_pattern)) ] + [1] * (len(meta_pattern) - 1) for hop_edge_types, count in zip(meta_pattern, counts): neighbors, _, _ = euler_ops.sample_neighbor( last_neighbors, [hop_edge_types], count, default_node=default_node) last_neighbors = tf.reshape(neighbors, [-1]) neighbors = tf.reshape(neighbors, [-1, counts[0]]) pattern_neighbors = tf.concat([pattern_neighbors, neighbors], axis=-1) all_neighbors.append(pattern_neighbors) all_nodes = [nodes] * len(patterns) return all_nodes, all_neighbors
def sample_positives(self, inputs): batch_size = tf.size(inputs) src = tf.expand_dims(inputs, -1) pos = euler_ops.sample_neighbor(inputs, self.edge_type, 1, self.max_id + 1)[0] return src, pos