Пример #1
0
def test_sum():
    """Tests sum"""

    # Should sum along the last dimension by default
    ones = tf.ones([5, 4, 3])
    val = ops.sum(ones)
    assert isinstance(val, tf.Tensor)
    assert val.ndim == 2
    assert val.shape[0] == 5
    assert val.shape[1] == 4
    assert np.all(val.numpy() == 3.0)

    # But can change that w/ the axis kwarg
    ones = tf.ones([5, 4, 3])
    val = ops.sum(ones, axis=1)
    assert isinstance(val, tf.Tensor)
    assert val.ndim == 2
    assert val.shape[0] == 5
    assert val.shape[1] == 3
    assert np.all(val.numpy() == 4.0)

    # Should sum along all dimensions w/ axis=None
    ones = tf.ones([5, 4, 3])
    val = ops.sum(ones, axis=None)
    assert isinstance(val, tf.Tensor)
    assert val.ndim == 0
    assert val.numpy() == 60

    # Actually test values
    val = ops.sum(tf.constant([1.1, 2.0, 3.3]))
    assert is_close(val.numpy(), 6.4)
Пример #2
0
 def kl_loss(self):
     """Compute the sum of the Kullback–Leibler divergences between this
     parameter's priors and its variational posteriors."""
     if self.prior is None:
         return O.sum([], axis=None)
     else:
         return O.sum(O.kl_divergence(self.posterior(), self.prior()),
                      axis=None)
Пример #3
0
 def __call__(self, x):
     return self.mod(x) + O.sum(self.p3(), axis=None)
Пример #4
0
 def __call__(self, x):
     return O.sum(self.p2(), axis=None) + x*self.p1()
Пример #5
0
 def add_kl_loss(self, loss, d2=None):
     """Add additional loss due to KL divergences."""
     if d2 is None:
         self._kl_losses += [O.sum(loss, axis=None)]
     else:
         self._kl_losses += [O.sum(O.kl_divergence(loss, d2), axis=None)]
Пример #6
0
 def kl_loss(self):
     """Compute the sum of the Kullback-Leibler divergences between
     priors and their variational posteriors for all |Parameters| in this
     |Module| and its sub-Modules."""
     return O.sum([p.kl_loss() for p in self.parameters])