コード例 #1
0
    def unlabel_vae_loss(self, x_word, decoder_word_mean, _z_mean,_z_log_var):

        self.word_loss = K.mean( self.entity_sequence_length * metrics.sparse_categorical_crossentropy(x_word, decoder_word_mean))

        self.kl_loss = K.mean(- 0.5 * K.sum(1 + _z_log_var - K.square(_z_mean) - K.exp(_z_log_var), axis=-1))

        return self.kl_loss + self.word_loss
コード例 #2
0
ファイル: vae_bsg.py プロジェクト: edwinlima/ull
decoder_h = Dense(original_dim, name="decoder")
decoder_mean = Dense(original_dim, activation='softmax')
h_decoded = decoder_h(z)
probs = decoder_mean(h_decoded)
# need to recover the corpus size here
print('probs=', probs.shape)
#probs = Lambda(lambda y: K.repeat_elements(y, context_sz, axis=0))(x_decoded_mean )

vae = Model(inputs=[x_con, x_tar],outputs=probs)

x_tar_2=K.reshape(x_tar, (-1,))

# VAE loss = xent_loss + kl_loss
# this will reintroduce batch size
probs = K.repeat_elements(probs, context_sz, axis=0)
negloglikelihood = metrics.sparse_categorical_crossentropy(x_con, probs)
print("neg_log=", negloglikelihood.shape)#]
negloglikelihood=K.reshape(negloglikelihood, (-1,context_sz))
print("neg_log=", negloglikelihood.shape)
loglikelihood = -K.mean(K.sum(negloglikelihood, axis=1),axis=0)

print("neg_log=", negloglikelihood.shape)

L = Embedding(input_dim=original_dim,output_dim=emb_sz, name="embedding_means")
S = Embedding(input_dim=original_dim,output_dim=emb_sz)
# Prior for target word
prior_loc = L(x_tar_2)
prior_scale = Dense(emb_sz, activation='softplus')(S(x_tar_2))

print("z_log_var=",z_log_var.shape)
print("z_mean=",z_mean.shape)