예제 #1
0
파일: vae.py 프로젝트: YuanKQ/DDI-VAE
	def _objective( self ):

		############
		''' Cost '''
		############

		self.z_sample, self.z_mu, self.z_lsgms = self._generate_zx( self.x )
		self.x_recon, self.x_recon_logits = self._generate_xz( self.z_sample )

		if self.distributions['p_z'] == 'gaussian_marg':

			prior_z = tf.reduce_sum( utils.tf_gaussian_marg( self.z_mu, self.z_lsgms ), 1 )

		if self.distributions['q_z'] == 'gaussian_marg':
			
			post_z = tf.reduce_sum( utils.tf_gaussian_ent( self.z_lsgms ), 1 )

		if self.distributions['p_x'] == 'bernoulli':

			self.log_lik = - tf.reduce_sum( utils.tf_binary_xentropy( self.x, self.x_recon ), 1 )

		l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])

		self.cost = tf.reduce_mean( post_z - prior_z - self.log_lik ) + self.l2_loss * l2

		##################
		''' Evaluation '''
		##################

		self.z_sample_eval, _, _ = self._generate_zx( self.x, phase = pt.Phase.test, reuse = True )
		self.x_recon_eval, _ = self._generate_xz( self.z_sample_eval, phase = pt.Phase.test, reuse = True )

		self.eval_log_lik = - tf.reduce_mean( tf.reduce_sum( utils.tf_binary_xentropy( self.x, self.x_recon_eval ), 1 ) )
예제 #2
0
	def _objective( self ):

		############
		''' Cost '''
		############

		self.z_sample, self.z_mu, self.z_lsgms = self._generate_zx( self.x )
		self.x_recon, self.x_recon_logits = self._generate_xz( self.z_sample )

		if self.distributions['p_z'] == 'gaussian_marg':

			prior_z = tf.reduce_sum( utils.tf_gaussian_marg( self.z_mu, self.z_lsgms ), 1 )

		if self.distributions['q_z'] == 'gaussian_marg':
			
			post_z = tf.reduce_sum( utils.tf_gaussian_ent( self.z_lsgms ), 1 )

		if self.distributions['p_x'] == 'bernoulli':

			self.log_lik = - tf.reduce_sum( utils.tf_binary_xentropy( self.x, self.x_recon ), 1 )

		l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])

		self.cost = tf.reduce_mean( post_z - prior_z - self.log_lik ) + self.l2_loss * l2

		##################
		''' Evaluation '''
		##################

		self.z_sample_eval, _, _ = self._generate_zx( self.x, phase = pt.Phase.test, reuse = True )
		self.x_recon_eval, _ = self._generate_xz( self.z_sample_eval, phase = pt.Phase.test, reuse = True )

		self.eval_log_lik = - tf.reduce_mean( tf.reduce_sum( utils.tf_binary_xentropy( self.x, self.x_recon_eval ), 1 ) )
    def _objective(self):

        ############
        ''' Cost '''
        ############

        self.z_sample, self.z_mu, self.z_lsgms = self._generate_zx(self.x)

        self.x_hat = self._generate_xz(self.z_sample)

        self.z_tau, _, _ = self._generate_zx(self.x_hat, reuse=True)

        if self.distributions['p_z'] == 'gaussian_marg':
            prior_z = tf.reduce_sum(
                utils.tf_gaussian_marg(self.z_mu, self.z_lsgms), 1)

        if self.distributions['q_z'] == 'gaussian_marg':
            post_z = tf.reduce_sum(utils.tf_gaussian_ent(self.z_lsgms), 1)

        if self.distributions['p_x'] == 'bernoulli':
            self.log_lik = -tf.reduce_sum(
                utils.tf_binary_xentropy(self.x, self.x_hat), 1)

        l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])

        latent_cost = -0.5 * tf.reduce_sum(
            1 + self.z_lsgms - tf.square(self.z_mu) - tf.exp(self.z_lsgms),
            axis=1)
        latent_loss = tf.reduce_mean(latent_cost)

        z_mean, z_var = tf.nn.moments(self.z_sample, axes=[0], keep_dims=True)
        z_tau_mean, z_tau_var = tf.nn.moments(self.z_tau,
                                              axes=[0],
                                              keep_dims=True)

        num = tf.reduce_mean(tf.multiply(tf.transpose(self.z_sample - z_mean),
                                         (self.z_tau - z_tau_mean)),
                             axis=[0, 1])
        den = tf.reduce_mean(tf.multiply(z_var, tf.transpose(z_tau_var)))

        self.corr_loss = -num / (den + 1e-6)

        self.mse_loss = tf.losses.mean_squared_error(labels=self.y,
                                                     predictions=self.x_hat)

        # self.cost = tf.reduce_mean(post_z - prior_z) + self.corr_loss + self.mse_loss + self.l2_loss * l2

        self.cost = self.mse_loss + latent_loss

        ##################
        ''' Evaluation '''
        ##################

        self.z_sample_eval, _, _ = self._generate_zx(self.x, reuse=True)
        self.x_hat_eval = self._generate_xz(self.z_sample_eval, reuse=True)

        self.eval_log_lik = -tf.reduce_mean(
            tf.reduce_sum(utils.tf_binary_xentropy(self.x, self.x_hat_eval),
                          1))
예제 #4
0
    def _objective(self):

        ############
        ''' Cost '''
        ############

        self.z_sample, self.z_mu, self.z_lsgms = self._generate_zx(self.x)

        self.x_hat = self._generate_xz(self.z_sample)

        self.z_tau, _, _ = self._generate_zx(self.x_hat, reuse=True)

        # if self.distributions['p_z'] == 'gaussian_marg':
        #     prior_z = tf.reduce_sum(utils.tf_gaussian_marg(self.z_mu, self.z_lsgms), 1)
        #
        # if self.distributions['q_z'] == 'gaussian_marg':
        #     post_z = tf.reduce_sum(utils.tf_gaussian_ent(self.z_lsgms), 1)
        #
        # if self.distributions['p_x'] == 'bernoulli':
        #     self.log_lik = - tf.reduce_sum(utils.tf_binary_xentropy(self.x, self.x_hat), 1)

        l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])

        latent_cost = -0.5 * tf.reduce_sum(
            1 + self.z_lsgms - tf.square(self.z_mu) - tf.exp(self.z_lsgms),
            axis=1)
        latent_loss = tf.reduce_mean(latent_cost)

        z_mean, z_var = tf.nn.moments(self.z_sample, axes=[0], keep_dims=True)
        z_tau_mean, z_tau_var = tf.nn.moments(self.z_tau,
                                              axes=[0],
                                              keep_dims=True)

        num = tf.reduce_mean(tf.multiply((self.z_sample - z_mean),
                                         (self.z_tau - z_tau_mean)),
                             axis=[0, 1])
        den = tf.reduce_mean(tf.multiply(z_var, tf.transpose(z_tau_var)))

        self.y_pred = self._generate_yz(self.z_sample)  # _, T, C

        # # TODO format code into metric
        # eval_metric_ops = {
        #     "accuracy": tf.metrics.accuracy(labels=self.y_one_hot, predictions=self.predictions["classes"])
        # }
        #
        # self.predictions = {
        #     "classes": tf.argmax(input=self.y_pred, axis=2),
        #     "class_target": tf.argmax(input=self.y_one_hot, axis=2),
        #     "probabilities": tf.nn.softmax(self.y_pred, name="softmax")
        # }

        _, self.accuracy = tf.metrics.accuracy(
            labels=tf.argmax(self.y_one_hot, 2),
            predictions=tf.argmax(self.y_pred, 2))

        # classify
        self.predict_loss = tf.losses.softmax_cross_entropy(
            onehot_labels=self.y_one_hot, logits=self.y_pred)

        self.corr_loss = -num / (den + 1e-6)

        self.mse_loss = tf.losses.mean_squared_error(labels=self.x_con,
                                                     predictions=self.x_hat)

        # self.cost = tf.reduce_mean(post_z - prior_z) + self.corr_loss + self.mse_loss + self.l2_loss * l2

        self.cost = latent_loss + self.mse_loss + self.predict_loss + self.corr_loss

        ##################
        ''' Evaluation '''
        ##################

        self.z_sample_eval, _, _ = self._generate_zx(self.x, reuse=True)
        self.x_hat_eval = self._generate_xz(self.z_sample_eval, reuse=True)

        self.eval_log_lik = -tf.reduce_mean(
            tf.reduce_sum(utils.tf_binary_xentropy(self.x, self.x_hat_eval),
                          1))