Example #1
0
def test_sum():
    """Tests sum"""

    # Should sum along the last dimension by default
    ones = tf.ones([5, 4, 3])
    val = ops.sum(ones)
    assert isinstance(val, tf.Tensor)
    assert val.ndim == 2
    assert val.shape[0] == 5
    assert val.shape[1] == 4
    assert np.all(val.numpy() == 3.0)

    # But can change that w/ the axis kwarg
    ones = tf.ones([5, 4, 3])
    val = ops.sum(ones, axis=1)
    assert isinstance(val, tf.Tensor)
    assert val.ndim == 2
    assert val.shape[0] == 5
    assert val.shape[1] == 3
    assert np.all(val.numpy() == 4.0)

    # Should sum along all dimensions w/ axis=None
    ones = tf.ones([5, 4, 3])
    val = ops.sum(ones, axis=None)
    assert isinstance(val, tf.Tensor)
    assert val.ndim == 0
    assert val.numpy() == 60

    # Actually test values
    val = ops.sum(tf.constant([1.1, 2.0, 3.3]))
    assert is_close(val.numpy(), 6.4)
Example #2
0
 def log_likelihood(self, x_data, y_data):
     """Compute the sum log likelihood of the model given a batch of data"""
     if x_data is None:
         log_likelihoods = self().log_prob(y_data)
     else:
         log_likelihoods = self(x_data).log_prob(y_data)
     return O.sum(log_likelihoods, axis=None)
Example #3
0
 def kl_loss(self):
     """Compute the sum of the Kullback–Leibler divergences between this
     parameter's priors and its variational posteriors."""
     if self.prior is None:
         return O.zeros([])
     else:
         return O.sum(O.kl_divergence(self.posterior, self.prior),
                      axis=None)
Example #4
0
 def __call__(self, x):
     return O.sum(self.p2(), axis=None) + x * self.p1()
Example #5
0
 def __call__(self, x):
     return self.mod(x) + O.sum(self.p3(), axis=None)
Example #6
0
 def add_kl_loss(self, loss, d2=None):
     """Add additional loss due to KL divergences."""
     if d2 is None:
         self._kl_losses += [O.sum(loss, axis=None)]
     else:
         self._kl_losses += [O.sum(O.kl_divergence(loss, d2), axis=None)]