def _mc_loss(self, y_target, y_pred):

        az_target, el_target, ti_target = self.unpack_target(y_target)

        sample_az_likelihoods = []
        sample_el_likelihoods = []
        sample_ti_likelihoods = []

        n_feat = 9

        for sid in range(0, self.n_samples):

            az_mean, az_kappa, el_mean, el_kappa, ti_mean, ti_kappa = self.unpack_sample_preds(y_pred[:, sid * n_feat:sid * n_feat + n_feat])

            sample_az_likelihoods.append(K.exp(von_mises_log_likelihood_tf(az_target,
                                                                           az_mean,
                                                                           az_kappa)))

            sample_el_likelihoods.append(K.exp(von_mises_log_likelihood_tf(el_target,
                                                                           el_mean,
                                                                           el_kappa)))

            sample_ti_likelihoods.append(K.exp(von_mises_log_likelihood_tf(ti_target,
                                                                           ti_mean,
                                                                           ti_kappa)))
        az_likelihood = -K.log(P_UNIFORM * self.az_gamma +
                               (1 - self.az_gamma) * K.mean(concatenate(sample_az_likelihoods), axis=1))

        el_likelihood = -K.log(P_UNIFORM * self.el_gamma +
                               (1 - self.el_gamma) * K.mean(concatenate(sample_el_likelihoods), axis=1))

        ti_likelihood = -K.log(P_UNIFORM * self.ti_gamma +
                               (1 - self.ti_gamma) * K.mean(concatenate(sample_ti_likelihoods), axis=1))

        return az_likelihood+el_likelihood+ti_likelihood
 def _von_mises_neg_log_likelihood_keras_fixed(y_true, y_pred):
     mu_pred = y_pred[:, 0:2]
     kappa_pred = tf.ones([tf.shape(y_pred[:, 2:])[0], 1
                           ]) * fixed_kappa_value
     return -K.mean(
         von_mises_log_likelihood_tf(y_true, mu_pred, kappa_pred,
                                     net_output))
Ejemplo n.º 3
0
    def likelihood_loss(self, y_target, y_pred):

        az_mean, az_kappa, el_mean, el_kappa, ti_mean, ti_kappa = self.unpack_preds(
            y_pred)
        az_target, el_target, ti_target = self.unpack_target(y_target)

        az_loss = -K.log(
            P_UNIFORM * self.az_gamma + (1 - self.az_gamma) *
            K.exp(von_mises_log_likelihood_tf(az_target, az_mean, az_kappa)))
        el_loss = -K.log(
            P_UNIFORM * self.el_gamma + (1 - self.el_gamma) *
            K.exp(von_mises_log_likelihood_tf(el_target, el_mean, el_kappa)))
        ti_loss = -K.log(
            P_UNIFORM * self.ti_gamma + (1 - self.ti_gamma) *
            K.exp(von_mises_log_likelihood_tf(ti_target, ti_mean, ti_kappa)))

        return az_loss + el_loss + ti_loss
Ejemplo n.º 4
0
    def importance_log_likelihood_tf(self, y_true, y_preds):
        """ Compute importance log-likelihood for CVAE-based Von-Mises mixture model

        Parameters
        ----------

        y_true: Tensor of size [n_points, 2]
            true values of angle phi (cos, sin) that would be used to compute Von-Mises log-likelihood
        y_preds: Tensor of size [n_points, n_outputs]
            full output of the CVAE model (prior, encoder, decoder)

        Returns
        -------

        importance_log_likelihood: numpy array of size [n_points]
            log-likelihood estimation for points based on samples
        """

        out_parsed = self.parse_output_tf(y_preds)

        u_samples = out_parsed['u_samples']
        mu_encoder = out_parsed['mu_encoder']
        std_encoder = tf.exp(out_parsed['log_sigma_encoder'] / 2)
        mu_prior = out_parsed['mu_prior']
        std_prior = tf.exp(out_parsed['log_sigma_prior'] / 2)
        mu_decoder = out_parsed['mu_preds']
        kappa_decoder = out_parsed['kappa_preds']

        n_points, n_samples, _ = u_samples.shape

        vm_likelihoods = []

        for sid in range(0, n_samples):
            vm_likelihood = tf.exp(
                von_mises_log_likelihood_tf(y_true, mu_decoder[:, sid, :],
                                            kappa_decoder[:, sid, :]))
            vm_likelihoods.append(vm_likelihood)

        vm_likelihoods = tf.squeeze(tf.stack(vm_likelihoods, axis=1), axis=2)

        prior_log_likelihood = gaussian_log_likelihood_tf(
            mu_prior, std_prior, u_samples)
        encoder_log_likelihood = gaussian_log_likelihood_tf(
            mu_encoder, std_encoder, u_samples)

        sample_weight = tf.exp(prior_log_likelihood - encoder_log_likelihood)

        importance_log_likelihood = tf.log(
            tf.reduce_mean(vm_likelihoods * sample_weight, axis=1))

        # log_likelihood = von_mises_log_likelihood_tf(y_true, mu_decoder[:, 0, :], kappa_decoder[:, 0, :])
        kl = gaussian_kl_divergence_tf(mu_encoder,
                                       out_parsed['log_sigma_encoder'],
                                       mu_prior, out_parsed['log_sigma_prior'])

        # elbo = log_likelihood - kl

        return K.mean(-importance_log_likelihood + kl)
Ejemplo n.º 5
0
 def _cvae_elbo_loss_tf(self, y_true, model_output):
     mu_prior = model_output[:, 0:self.n_u]
     log_sigma_prior = model_output[:, self.n_u:self.n_u * 2]
     mu_encoder = model_output[:, self.n_u * 2:self.n_u * 3]
     log_sigma_encoder = model_output[:, self.n_u * 3:self.n_u * 4]
     mu_pred = model_output[:, self.n_u * 5:self.n_u * 5 + 2]
     kappa_pred = model_output[:, self.n_u * 5 + 2:]
     log_likelihood = von_mises_log_likelihood_tf(y_true, mu_pred,
                                                  kappa_pred)
     kl = gaussian_kl_divergence_tf(mu_encoder, log_sigma_encoder, mu_prior,
                                    log_sigma_prior)
     elbo = log_likelihood - self.kl_weight * kl
     return K.mean(-elbo)
Ejemplo n.º 6
0
    def _loss(y_target, y_pred):

        theta_mean, theta_kappa, phi_mean, phi_kappa, psi_mean, psi_kappa = _unpack_preds(
            y_pred)
        theta_target, phi_target, psi_target = _unpack_target(y_target)

        if loss_type == 'cosine':
            theta_loss = cosine_loss_tf(theta_target, theta_mean)
            phi_loss = cosine_loss_tf(phi_target, phi_mean)
            psi_loss = cosine_loss_tf(psi_target, psi_mean)
            loss = theta_loss + phi_loss + psi_loss

        elif loss_type == 'vm_likelihood':
            theta_loss = von_mises_log_likelihood_tf(theta_target, theta_mean,
                                                     theta_kappa)
            phi_loss = von_mises_log_likelihood_tf(phi_target, phi_mean,
                                                   phi_kappa)
            psi_loss = von_mises_log_likelihood_tf(psi_target, psi_mean,
                                                   psi_kappa)
            loss = -theta_loss - phi_loss - psi_loss

        return loss
Ejemplo n.º 7
0
    def _von_mises_mixture_log_likelihood_tf(self, y_true, y_pred):

        component_log_likelihoods = []

        mu, kappa, comp_probs = self.parse_output_tf(y_pred)

        for cid in range(0, self.n_components):
            component_log_likelihoods.append(von_mises_log_likelihood_tf(y_true, mu[:, cid], kappa[:, cid]))

        component_log_likelihoods = tf.concat(component_log_likelihoods, axis=1, name='component_likelihoods')

        log_likelihoods = tf.log(tf.reduce_sum(comp_probs*tf.exp(component_log_likelihoods), axis=1))

        return log_likelihoods