Exemple #1
0
def calculate_kumar_entropy(log_a, log_b):
    a, b = tf.math.exp(log_a), tf.math.exp(log_b)
    # a, b = tf.math.softplus(log_a), tf.math.softplus(log_b)
    n_required = log_a.shape[1]
    ans = 0.
    for k in range(n_required):
        dist = tfpd.Kumaraswamy(concentration0=a[:, k, 0, 0],
                                concentration1=b[:, k, 0, 0])
        ans += dist.entropy()
    ans = tf.reduce_mean(ans)
    return ans
Exemple #2
0
 def test_kumaraswamy_reparam(self):
     log_a = tf.random.normal(shape=(2, 4, 1, 1))
     log_b = tf.random.normal(shape=(2, 4, 1, 1))
     with tf.GradientTape() as tape:
         tape.watch([log_a, log_b])
         kumar = tfpd.Kumaraswamy(concentration0=tf.math.exp(log_a),
                                  concentration1=tf.math.exp(log_b))
         z_kumar = kumar.sample()
     grad = tape.gradient(target=z_kumar, sources=[log_a, log_b])
     print('\nTEST: Kumaraswamy Reparameterization Gradient')
     self.assertTrue(expr=grad is not None)
Exemple #3
0
    def test_loss(self):
        test_tolerance = 1.e-2
        tf.random.set_seed(seed=21)
        batch_n, n_required, sample_size, dim = 2, 4, 10, 3
        shape = (batch_n, n_required, sample_size, dim)
        log_a = tf.random.normal(shape=(batch_n, n_required - 1, 1, 1))
        log_b = tf.random.normal(shape=log_a.shape)
        kumar = tfpd.Kumaraswamy(concentration0=tf.math.exp(log_a),
                                 concentration1=tf.math.exp(log_b))
        z_kumar = kumar.sample()
        z_norm = tf.random.normal(shape=shape)
        mean = tf.random.normal(shape=shape)
        log_var = tf.zeros_like(z_norm)
        pi = iterative_sb(z_kumar)
        x = tf.random.uniform(shape=(batch_n, 4, 4, 1))
        x_logit = tf.random.normal(shape=(batch_n, 4, 4, 1, sample_size, n_required))
        self.hyper['n_required'] = n_required
        self.hyper['sample_size'] = sample_size
        optvae = OptDLGMM(nets=[], optimizer=[], hyper=self.hyper)
        z = [z_kumar, z_norm]
        params_broad = [log_a, log_b, mean, log_var]
        optvae.batch_size, optvae.n_required = batch_n, n_required
        optvae.sample_size, optvae.num_of_vars = sample_size, dim
        optvae.mu_prior = optvae.create_separated_prior_means()
        optvae.log_var_prior = tf.zeros_like(z_norm)

        approx = optvae.compute_loss(x, x_logit, z, params_broad,
                                     True, True, True, True)
        # ans = -calculate_kumar_entropy(log_a, log_b)
        ans = compute_kld(log_a, log_b)
        ans += calculate_log_qz_x(z_norm, pi, mean, log_var)
        ans -= calculate_log_px_z(x, x_logit, pi)
        ans -= calculate_log_pz(z_norm, pi, optvae.mu_prior, optvae.log_var_prior)
        diff = tf.linalg.norm(approx - ans) / tf.linalg.norm(ans)
        print('\nTEST: Loss')
        print(f'Diff {diff:1.3e}')
        self.assertTrue(expr=diff < test_tolerance)
Exemple #4
0
 def _init_distribution(conditions, **kwargs):
     concentration0, concentration1 = conditions[
         "concentration0"], conditions["concentration1"]
     return tfd.Kumaraswamy(concentration0=concentration0,
                            concentration1=concentration1,
                            **kwargs)
Exemple #5
0
 def _base_dist(self, a: TensorLike, b: TensorLike, *args, **kwargs):
     return tfd.Kumaraswamy(concentration0=a,
                            concentration1=b,
                            *args,
                            **kwargs)