def _build(self): with tf.variable_scope('Encoder', reuse=None): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.noise = gaussian_noise_layer(self.hidden1, 0.1) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.noise) self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) # TODO: output the hidden vector z, which is the node embedding vector self.z = self.z_mean + tf.random_normal( [self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) logging.info('Finish calculating the latent vector!!!!!!!!!') self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)( self.z)
def _build(self): self.mylayer1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging) self.hidden1 = self.mylayer1(self.inputs) self.mylayer2 = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging) self.embeddings = self.mylayer2(self.hidden1) self.W1 = self.mylayer1.get_weight() self.W2 = self.mylayer2.get_weight() self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
def build(self): self.adj = dropout_sparse(self.adj, 1-self.adjdp, self.adj_nonzero) self.hidden1 = GraphConvolutionSparse( name='gcn_sparse_layer', input_dim=self.input_dim, output_dim=self.emb_dim, adj=self.adj, features_nonzero=self.features_nonzero, dropout=self.dropout, act=self.act)(self.inputs) self.hidden2 = GraphConvolution( name='gcn_dense_layer', input_dim=self.emb_dim, output_dim=self.emb_dim, adj=self.adj, dropout=self.dropout, act=self.act)(self.hidden1) self.emb = GraphConvolution( name='gcn_dense_layer2', input_dim=self.emb_dim, output_dim=self.emb_dim, adj=self.adj, dropout=self.dropout, act=self.act)(self.hidden2) self.embeddings = self.hidden1 * \ self.att[0]+self.hidden2*self.att[1]+self.emb*self.att[2] self.reconstructions = InnerProductDecoder( name='gcn_decoder', input_dim=self.emb_dim, num_r=self.num_r, act=tf.nn.sigmoid)(self.embeddings)
def __init__(self, input_dim, features_nonezero, adj,hidden1=32, embedding_dim=16): super(GAE, self).__init__() self.input_dim = input_dim self.hidden1 = hidden1 self.embedding_dim = embedding_dim self.features_nonezero = features_nonezero indices= np.array(adj[0]) values = np.array(adj[1]) dense_shape = np.array(adj[2]) sparse_adj = tf.SparseTensor(indices = indices, values = values, dense_shape = dense_shape) self.adj = tf.cast(sparse_adj, dtype=tf.float32) # GAE encoder with 1 sparseconvolution layer, 1 convolution layers self.conv1 = ConvolutionSparseLayer(self.input_dim, self.hidden1, self.adj, self.features_nonezero, dropout=0, activation='relu', input_shape=(None,self.input_dim)) self.conv2 = ConvolutionLayer(self.hidden1, self.embedding_dim, self.adj, dropout=0, activation='relu') # GAE decoder self.reconstruct = InnerProductDecoder(self.embedding_dim,dropout=0,act=tf.nn.sigmoid)
def _build(self): with tf.variable_scope('Encoder'): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_3')(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.z) self.embeddings = self.z
def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=tf.float64) * tf.exp(self.z_log_std) self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)(self.z)
def _build(self): # First GCN Layer: (A, X) --> H (hidden layer features) self.hidden1 = GraphConvolution( input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, # features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) # Second GCN Layer: (A, H) --> Z (mode embeddings) self.embeddings = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) # Z_mean for AE. No noise added (because not a VAE) self.z_mean = self.embeddings # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.) self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)( self.embeddings)
def _build(self): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.hidden2 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.hidden1) # self.z_mean = self.embeddings # decoder1 self.attribute_decoder_layer1 = GraphConvolution( input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.embeddings) self.attribute_decoder_layer2 = GraphConvolution( input_dim=FLAGS.hidden1, output_dim=self.input_dim, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.attribute_decoder_layer1) # decoder2 self.structure_decoder_layer1 = GraphConvolution( input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.embeddings) self.structure_decoder_layer2 = InnerProductDecoder( input_dim=FLAGS.hidden1, act=tf.nn.sigmoid, logging=self.logging)(self.structure_decoder_layer1) self.attribute_reconstructions = self.attribute_decoder_layer2 self.structure_reconstructions = self.structure_decoder_layer2
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.embeddings = [] for v in range(self.numView): self.hidden1 = GraphConvolution(input_dim=self.num_features, output_dim=FLAGS.hidden1, adj=self.adjs[v], act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1_'+str(v))(self.inputs) self.noise = gaussian_noise_layer(self.hidden1, 0.1) embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adjs[v], act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2_'+str(v))(self.noise) self.embeddings.append(embeddings) print('embeddings', self.embeddings) #Fuze layer self.cluster_layer = ClusteringLayer(input_dim=FLAGS.hidden2, n_clusters=self.num_clusters, name='clustering') self.cluster_layer_q = self.cluster_layer(self.embeddings[FLAGS.input_view]) self.reconstructions = [] for v in range(self.numView): view_reconstruction = InnerProductDecoder(input_dim=FLAGS.hidden2, name = 'e_weight_single_', v = v, act=lambda x: x, logging=self.logging)(self.embeddings[v]) self.reconstructions.append(view_reconstruction) self.reconstructions_fuze = [] for v in range(self.numView): view_reconstruction = InnerProductDecoder(input_dim=FLAGS.hidden2, name = 'e_weight_multi_', v = v, act=lambda x: x, logging=self.logging)(self.embeddings[FLAGS.input_view]) self.reconstructions_fuze.append(view_reconstruction)
def decoder(self, z): reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, dropout=0., logging=self.logging)(z) reconstructions = tf.reshape(reconstructions, [-1]) return reconstructions
def _build(self): attns = [] for _ in range(FLAGS.num_heads): attns.append( AttentiveGraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.head_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.inputs)) if FLAGS.average_attn: self.hidden1 = tf.add_n(attns) / FLAGS.num_heads attns = [] for _ in range(FLAGS.num_heads_layer2): attns.append( AttentiveGraphConvolution(input_dim=FLAGS.head_dim, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)( self.hidden1)) else: self.hidden1 = tf.concat(attns, axis=1) attns = [] for _ in range(FLAGS.num_heads_layer2): attns.append( AttentiveGraphConvolution( input_dim=FLAGS.num_heads * FLAGS.head_dim, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, in_drop=self.in_drop, attn_drop=self.attn_drop, feat_drop=self.feat_drop, logging=self.logging)(self.hidden1)) self.embeddings = tf.add_n(attns) / FLAGS.num_heads_layer2 self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)( self.embeddings)
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.embeddings = [] self.reconstructions = [] for ts, (struct_adj_norm, struct_feature) in enumerate( zip(self.struct_adj_norms, self.struct_features)): features_nonzero = self.features_nonzeros[ts] self.hidden1 = GraphConvolutionSparse( input_dim=self.feature_dim, output_dim=FLAGS.hidden1, adj=struct_adj_norm, features_nonzero=features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1_{}'.format(ts))(struct_feature) self.noise = gaussian_noise_layer(self.hidden1, 0.1) embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=struct_adj_norm, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2_{}'.format(ts))( self.noise) # for auxilary loss reconstructions = InnerProductDecoder( input_dim=FLAGS.hidden2, logging=self.logging)(embeddings) self.embeddings.append( tf.reshape(embeddings, [self.num_node, 1, FLAGS.hidden2])) self.reconstructions.append(reconstructions) # TCN sequence = tf.concat(self.embeddings, axis=1, name='concat_embedding') self.sequence_out = TCN(num_channels=FLAGS.hidden3, sequence_length=self.seq_len)(sequence) self.reconstructions_tss = [] # Dense for ts in range(self.seq_len): reconstructions_ts = Dense(input_dim=FLAGS.hidden3[-1], classes=self.num_node)( self.sequence_out[:, ts, :]) reconstructions_ts = tf.reshape(reconstructions_ts, [-1]) self.reconstructions_tss.append(reconstructions_ts)
def make_decoder(self): self.l0 = Dense(input_dim=self.input_dim, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=0., bias=True, logging=self.logging) self.l1 = Dense(input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=0., bias=True, logging=self.logging) self.l2 = Dense(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden2, act=lambda x: x, dropout=self.dropout, bias=True, logging=self.logging) self.l3 = Dense(input_dim=2 * FLAGS.hidden2, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=self.dropout, bias=True, logging=self.logging) self.l3p5 = Dense(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=self.dropout, bias=True, logging=self.logging) self.l4 = Dense(input_dim=FLAGS.hidden3, output_dim=1, act=lambda x: x, dropout=self.dropout, bias=True, logging=self.logging) self.l5 = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) self.noise = gaussian_noise_layer(self.hidden1, 0.1) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, #act=lambda x: x, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.noise) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=tf.nn.softsign, dropout=self.dropout, logging=self.logging, name='e_dense_3')(self.embeddings) self.a = tf.sign(self.embeddings) self.z_mean = self.a #add softmax #self.binary_embeddings = Binarize(input_dim=FLAGS.hidden2, # output_dim=FLAGS.hidden2, # dropout=self.dropout, # logging=self.logging, # name='binary_dense_3')(self.embeddings) #self.binary_embeddings = tf.layers.dense(self.embeddings, FLAGS.hidden2, tf.nn.softsign) #self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, # act=lambda x: x, # logging=self.logging)(self.binary_embeddings) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings)
def _build(self): # First GCN Layer: (A, X) --> H (hidden layer features) self.hidden1 = GraphConvolution( input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, # features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, dtype=self.dtype, logging=self.logging)(self.inputs) # Second GCN Layer: (A, H) --> Z_mean (node embeddings) self.z_mean = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, dtype=self.dtype, logging=self.logging)(self.hidden1) # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise) self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, dtype=self.dtype, logging=self.logging)(self.hidden1) # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev self.z = self.z_mean + tf.random_normal( [self.n_samples, self.hidden2_dim], dtype=self.dtype) * tf.exp( self.z_log_std) # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.) self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, flatten=self.flatten_output, logging=self.logging)( self.z)
def _build(self): self.hidden1 = SparseLayer(input_dim=self.input_dim, output_dim=FLAGS.hidden1, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = DenseLayer(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_mean = self.embeddings self.reconstruction_adjacency = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings) self.reconstructions = tf.reshape(self.reconstruction_adjacency, [-1])
def _build(self, args): self.hidden1 = GraphConvolution(batch_size=self.batch_size, input_dim=self.input_dim, output_dim=args.hidden_dim_1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(batch_size=self.batch_size, input_dim=args.hidden_dim_1, output_dim=args.hidden_dim_2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.reconstructions = InnerProductDecoder(input_dim=args.hidden_dim_2, act=tf.nn.tanh, logging=self.logging)( self.z_mean)
def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden1_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.embeddings = GraphConvolution(input_dim=self.hidden1_dim, output_dim=self.hidden2_dim, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_mean = self.embeddings self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim, act=lambda x: x, logging=self.logging)(self.embeddings)
def _build(self): with tf.variable_scope('Encoder', reuse=None): self.hidden1 = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging, name='e_dense_1')(self.inputs) #self.noise = gaussian_noise_layer(self.hidden1, 0.1) self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging, name='e_dense_2')(self.hidden1) self.z_mean = self.embeddings self.embeddings = tf.identity(self.embeddings, name="emb") self.embeddings_long = tf.layers.dense(inputs=self.embeddings, units=64, activation=tf.nn.relu) self.embeddings_concat = tf.concat( [self.privacy_attr, self.embeddings_long], 1) self.reconstructions = InnerProductDecoder( input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging)(self.embeddings_concat) self.attr_logits = tf.layers.dense(inputs=self.embeddings_concat, units=self.dim_attr[0]) self.pri_logits = dense(self.embeddings_long, 64, self.dim_attr[1], name='pri_den')
def _build(self, args): self.hidden1 = GraphConvolution(batch_size=self.batch_size, input_dim=self.input_dim, output_dim=args.hidden_dim_1, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.hidden2 = GraphConvolution(batch_size=self.batch_size, input_dim=args.hidden_dim_1, output_dim=args.hidden_dim_2, adj=self.adj, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_mean = GraphConvolution(batch_size=self.batch_size, input_dim=args.hidden_dim_2, output_dim=args.hidden_dim_3, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden2) self.z_log_std = GraphConvolution(batch_size=self.batch_size, input_dim=args.hidden_dim_2, output_dim=args.hidden_dim_3, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden2) self.z = self.z_mean + tf.random_normal( [self.n_samples, args.hidden_dim_3]) * tf.exp(self.z_log_std / 2.) self.reconstructions = InnerProductDecoder(input_dim=args.hidden_dim_3, act=tf.nn.tanh, logging=self.logging)( self.z)
def __init__(self, modes, num_features, encode_features, gcn_batch_norm, gcn_hiddens, gcn_aggs, gcn_relus, z_dim, \ z_agg, graphite_relu, graphite_layers, dropout, autoregressive): super(MultiGCNModelFeedback, self).__init__(modes, num_features, encode_features, gcn_batch_norm, gcn_hiddens, gcn_aggs, gcn_relus, z_dim, z_agg, dropout) self.graphites = [] for i in range(graphite_layers): self.graphites.append( MyMultiGraphite(modes, num_features, z_dim, z_dim, relu=i != graphite_layers - 1 and graphite_relu)) self.inner = InnerProductDecoder() for i, graphite in enumerate(self.graphites): self.add_module('graphite{}'.format(i), graphite) self.autoregressive = autoregressive self.modes = modes
def __init__(self, adj, input_dim, output_dim, dropout, hidden_dim, features_nonzero): """ GCN Network constructor Parameters: adj (sparse torch FloatTensor): dropout (float): dropout rate input_dim (int): number of features output_dim (int): output feature dimension hidden_dim (int): hidden feature dimension features_nonzero (int): number of non-zero features """ super(GCNModel, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim self.features_nonzero = features_nonzero self.adj = adj self.dropout = dropout self.graph_conv_sparse = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=self.hidden_dim, adj=self.adj, features_nonzero=self.features_nonzero, act=nn.ReLU(), dropout=self.dropout) self.graph_conv = GraphConvolution(input_dim=self.hidden_dim, output_dim=self.output_dim, adj=self.adj, act=dummy_func, dropout=self.dropout) self.inner_prod = InnerProductDecoder(act=dummy_func)
def make_decoder(self): self.l0 = GraphiteSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden3, act=tf.nn.relu, dropout=0., logging=self.logging) self.l1 = Graphite(input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden3, act=tf.nn.relu, dropout=0., logging=self.logging) self.l2 = Graphite(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden2, act=lambda x: x, dropout=self.dropout, logging=self.logging) self.l3 = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x, logging=self.logging) self.l4 = Scale(input_dim=FLAGS.hidden2, logging=self.logging)
def __init__(self, ninput, nhid, nout, dropout): super(GCIM, self).__init__() #保存聚类中心的映射表,是一个元组的列表,元组中是聚类中心的特征向量和对饮的类标签 self.class_centers_map = None #聚类中心列表,保存所有的聚类中心 self.clusting_center_list = None #输入向量的维度 self.ninput = ninput #隐层向量的维度 self.nhid = nhid #输出向量的维度,因为要使用softmax,与class的数目一致 self.nout = nout #解码器 self.decoder = InnerProductDecoder() #随机失活率 self.dropout = dropout #编码器 self.encoder = GCN(ninput=self.ninput, nhid=2 * self.nhid, nout=self.nhid, dropout=self.dropout) #全连接层->待删除 self.fc = nn.Linear(self.nhid, self.nout)
def build_generative_network(self, z): self.x_reconstructed = InnerProductDecoder(input_dim=self.latent_dim, act=lambda x: x)(z) return self.x_reconstructed
output_dim=FLAGS.hidden1, adj=placeholders['adj_norm'], features_nonzero=features_nonzero, act=tf.nn.relu, dropout=placeholders['dropout'])( placeholders['feature']) noise = gaussian_noise_layer(hidden1, 0.1) embeddings = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=placeholders['adj_norm'], act=lambda x: x, dropout=placeholders['dropout'])(noise) reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=lambda x: x)(embeddings) label = tf.reshape( tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]) # loss cost = norm * tf.reduce_mean( tf.nn.weighted_cross_entropy_with_logits( logits=reconstructions, targets=label, pos_weight=pos_weight)) # optimizer optimizer = tf.train.AdamOptimizer( learning_rate=FLAGS.learning_rate) # Adam Optimizer opt_op = optimizer.minimize(cost) # Initialize session sess = tf.Session()
with tf.variable_scope("encoder"): H = Xrt first_layer, attention_weights_0 = encoder(Art, H, weight_layer_0, weight_atten_00, weight_atten_01, 0) z_mean, attention_weights_1 = encoder(Art, first_layer, weight_layer_1, weight_atten_10, weight_atten_11, 1) z_log_stf, attention_weights_1s = encoder(Art, first_layer, weight_layer_1, weight_atten_10, weight_atten_11, 2) z = z_mean + tf.random_normal([node_no, 512]) * tf.exp(z_log_stf) reconstructions = InnerProductDecoder(weight_layer_1)(z) opt = OptimizerVAEt( reconstructions, tf.reshape(tf.sparse_tensor_to_dense(adj_origr, False), [-1]), num_nodes, pos_weight, norm, z_log_stf, z_mean) import time sess = tf.Session() sess.run(tf.global_variables_initializer()) cost_val = [] acc_val = []
def _build(self): self.hidden1 = defaultdict(list) for i, j in self.edge_types: self.hidden1[i].append( GraphConvolutionSparseMulti(input_dim=self.input_dim, output_dim=FLAGS.hidden1, edge_type=(i, j), num_types=self.edge_types[i, j], adj_mats=self.adj_mats, nonzero_feat=self.nonzero_feat, act=lambda x: x, dropout=self.dropout, logging=self.logging)( self.inputs[j])) for i, hid1 in self.hidden1.iteritems(): self.hidden1[i] = tf.nn.relu(tf.add_n(hid1)) self.embeddings = defaultdict(list) for i, j in self.edge_types: self.embeddings[i].append( GraphConvolutionMulti(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, edge_type=(i, j), num_types=self.edge_types[i, j], adj_mats=self.adj_mats, act=lambda x: x, dropout=self.dropout, logging=self.logging)(self.hidden1[j])) for i, embeds in self.embeddings.iteritems(): # self.embeddings[i] = tf.nn.relu(tf.add_n(embeds)) self.embeddings[i] = tf.add_n(embeds) self.row_embeds, self.col_embeds = [None] * self.num_row_types, [ None ] * self.num_col_types for i, j in self.edge_types: self.row_embeds[i] = self.embeddings[i] self.col_embeds[j] = self.embeddings[j] self.edge_type2decoder = {} for i, j in self.edge_types: decoder = self.decoders[i, j] if decoder == 'innerproduct': self.edge_type2decoder[i, j] = InnerProductDecoder( input_dim=FLAGS.hidden2, logging=self.logging, edge_type=(i, j), num_types=self.edge_types[i, j], act=lambda x: x, dropout=self.dropout) elif decoder == 'distmult': self.edge_type2decoder[i, j] = DistMultDecoder( input_dim=FLAGS.hidden2, logging=self.logging, edge_type=(i, j), num_types=self.edge_types[i, j], act=lambda x: x, dropout=self.dropout) elif decoder == 'bilinear': self.edge_type2decoder[i, j] = BilinearDecoder( input_dim=FLAGS.hidden2, logging=self.logging, edge_type=(i, j), num_types=self.edge_types[i, j], act=lambda x: x, dropout=self.dropout) elif decoder == 'dedicom': self.edge_type2decoder[i, j] = DEDICOMDecoder( input_dim=FLAGS.hidden2, logging=self.logging, edge_type=(i, j), num_types=self.edge_types[i, j], act=lambda x: x, dropout=self.dropout) else: raise ValueError('Unknown decoder type') self.latent_inters = [] self.latent_varies = [] for edge_type in self.edge_types: decoder = self.decoders[edge_type] for k in range(self.edge_types[edge_type]): if decoder == 'innerproduct': glb = tf.eye(FLAGS.hidden2, FLAGS.hidden2) loc = tf.eye(FLAGS.hidden2, FLAGS.hidden2) elif decoder == 'distmult': glb = tf.diag( self.edge_type2decoder[edge_type].vars['relation_%d' % k]) loc = tf.eye(FLAGS.hidden2, FLAGS.hidden2) elif decoder == 'bilinear': glb = self.edge_type2decoder[edge_type].vars['relation_%d' % k] loc = tf.eye(FLAGS.hidden2, FLAGS.hidden2) elif decoder == 'dedicom': glb = self.edge_type2decoder[edge_type].vars[ 'global_interaction'] loc = tf.diag(self.edge_type2decoder[edge_type].vars[ 'local_variation_%d' % k]) else: raise ValueError('Unknown decoder type') self.latent_inters.append(glb) self.latent_varies.append(loc)
def _build(self): # total bias not being used for now entity_bias = self.e entity_bias_matrix = tf.tile(entity_bias, [1, self.n_samples]) entity_bias_matrix += tf.transpose(entity_bias_matrix) self.total_bias = entity_bias_matrix + tf.tile( self.c, [self.n_samples, self.n_samples]) for idx, hidden_layer in enumerate(self.hidden): if idx == 0: if self.num_hidden_layers == 1: activ = lambda x: x else: activ = lambda x: tf.nn.leaky_relu(x, alpha=0.2) h = GraphConvolutionSparse( input_dim=self.input_dim, output_dim=hidden_layer, adj=self.adj, features_nonzero=self.features_nonzero, act=activ, dropout=self.dropout, logging=self.logging)(self.inputs) elif idx == self.num_hidden_layers - 1: h = GraphConvolution(input_dim=self.hidden[idx - 1], output_dim=hidden_layer, adj=self.adj, act=lambda x: x, dropout=self.dropout, logging=self.logging)(h) else: h = GraphConvolution( input_dim=self.hidden[idx - 1], output_dim=hidden_layer, adj=self.adj, act=lambda x: tf.nn.leaky_relu(x, alpha=0.2), dropout=self.dropout, logging=self.logging)(h) self.pi_logit = h # See this 0.01 beta_a = tf.nn.softplus(self.a) + 0.01 beta_b = tf.nn.softplus(self.b) + 0.01 beta_a = tf.expand_dims(beta_a, 0) beta_b = tf.expand_dims(beta_b, 0) self.beta_a = tf.tile(beta_a, [self.n_samples, 1]) self.beta_b = tf.tile(beta_b, [self.n_samples, 1]) self.v = kumaraswamy_sample(self.beta_a, self.beta_b) v_term = tf.log(self.v + SMALL) self.log_prior = tf.cumsum(v_term, axis=1) self.logit_post = self.pi_logit + logit(tf.exp(self.log_prior)) # note: logsample is just logit(z_discrete), unless we've rounded self.z, _, _, self.y_sample = sample(None, None, self.logit_post, None, None, FLAGS.temp_post, calc_v=False, calc_real=False) self.z = tf.cond(tf.equal(self.training, tf.constant(False)), lambda: tf.round(self.z), lambda: self.z) if FLAGS.deep_decoder: f = tf.nn.leaky_relu(tf.matmul(self.z, self.w_gen_1) + self.b_gen_1, alpha=0.2) f = tf.matmul(f, self.w_gen_2) + self.b_gen_2 self.reconstructions = InnerProductDecoder(act=lambda x: x, logging=self.logging)(f) else: f = tf.matmul(self.z, self.w_gen_1) + self.b_gen_1 self.reconstructions = InnerProductDecoder(act=lambda x: x, logging=self.logging)(f) self.x_hat = tf.reshape( tf.matmul(self.z, self.w_gen_x) + self.b_gen_x, [-1])