Example #1
0
 def test_logit_0d(self):
     with self.test_session():
         x = tf.constant(0.01)
         self.assertAllClose(logit(x).eval(), -4.5951198501345898)
         x = tf.constant(0.25)
         self.assertAllClose(logit(x).eval(), -1.0986122886681096)
         x = tf.constant(0.5)
         self.assertAllEqual(logit(x).eval(), 0.0)
         x = tf.constant(0.75)
         self.assertAllClose(logit(x).eval(), 1.0986122886681096)
         x = tf.constant(0.99)
         self.assertAllClose(logit(x).eval(), 4.5951198501345898)
Example #2
0
 def test_logit_1d(self):
     with self.test_session():
         x = tf.constant([0.01, 0.01])
         self.assertAllClose(logit(x).eval(), [-4.5951198501345898, -4.5951198501345898])
         x = tf.constant([0.25, 0.25])
         self.assertAllClose(logit(x).eval(), [-1.0986122886681096, -1.0986122886681096])
         x = tf.constant([0.5, 0.5])
         self.assertAllEqual(logit(x).eval(), [0.0, 0.0])
         x = tf.constant([0.75, 0.75])
         self.assertAllClose(logit(x).eval(), [1.0986122886681096, 1.0986122886681096])
         x = tf.constant([0.99, 0.99])
         self.assertAllClose(logit(x).eval(), [4.5951198501345898, 4.5951198501345898])
Example #3
0
 def test_logit_0d(self):
     with self.test_session():
         x = tf.constant(0.01)
         self.assertAllClose(logit(x).eval(), -4.5951198501345898)
         x = tf.constant(0.25)
         self.assertAllClose(logit(x).eval(), -1.0986122886681096)
         x = tf.constant(0.5)
         self.assertAllEqual(logit(x).eval(), 0.0)
         x = tf.constant(0.75)
         self.assertAllClose(logit(x).eval(), 1.0986122886681096)
         x = tf.constant(0.99)
         self.assertAllClose(logit(x).eval(), 4.5951198501345898)
Example #4
0
 def test_logit_1d(self):
     with self.test_session():
         x = tf.constant([0.01, 0.01])
         self.assertAllClose(
             logit(x).eval(), [-4.5951198501345898, -4.5951198501345898])
         x = tf.constant([0.25, 0.25])
         self.assertAllClose(
             logit(x).eval(), [-1.0986122886681096, -1.0986122886681096])
         x = tf.constant([0.5, 0.5])
         self.assertAllEqual(logit(x).eval(), [0.0, 0.0])
         x = tf.constant([0.75, 0.75])
         self.assertAllClose(
             logit(x).eval(), [1.0986122886681096, 1.0986122886681096])
         x = tf.constant([0.99, 0.99])
         self.assertAllClose(
             logit(x).eval(), [4.5951198501345898, 4.5951198501345898])
Example #5
0
 def sample_n(self, n, seed=None):
   if self.n != 1:
     logits = logit(tf.ones(self.n, dtype=tf.float32) /
                    tf.cast(self.n, dtype=tf.float32))
     cat = tf.contrib.distributions.Categorical(logits=logits)
     indices = cat.sample_n(n, seed)
     return tf.gather(self._params, indices)
   else:
     multiples = tf.concat(0, [tf.expand_dims(n, 0),
                               [1] * len(self.get_event_shape())])
     return tile(self._params, multiples)
Example #6
0
 def _sample_n(self, n, seed=None):
     if self.n != 1:
         logits = logit(
             tf.ones(self.n, dtype=tf.float32) /
             tf.cast(self.n, dtype=tf.float32))
         cat = tf.contrib.distributions.Categorical(logits=logits)
         indices = cat._sample_n(n, seed)
         return tf.gather(self._params, indices)
     else:
         multiples = tf.concat(
             [tf.expand_dims(n, 0), [1] * len(self.get_event_shape())], 0)
         return tile(self._params, multiples)
Example #7
0
def binary_crossentropy(y_true, y_pred):
    """
    Parameters
    ----------
    y_true : tf.Tensor
        Tensor of 0s and 1s.
    y_pred : tf.Tensor
        Tensor of probabilities.
    """
    y_true = tf.cast(y_true, tf.float32)
    y_pred = logit(tf.cast(y_pred, tf.float32))
    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_pred, y_true))
Example #8
0
def binary_crossentropy(y_true, y_pred):
    """
    Parameters
    ----------
    y_true : tf.Tensor
        Tensor of 0s and 1s.
    y_pred : tf.Tensor
        Tensor of probabilities.
    """
    y_true = tf.cast(y_true, tf.float32)
    y_pred = logit(tf.cast(y_pred, tf.float32))
    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_pred, y_true))
Example #9
0
 def test_contraint_raises(self):
     with self.test_session():
         x = tf.constant([0.01, -20])
         with self.assertRaisesOpError("Condition"):
             logit(x).eval()
         x = tf.constant([0.01, 20])
         with self.assertRaisesOpError("Condition"):
             logit(x).eval()
         x = tf.constant([0.01, np.inf])
         with self.assertRaisesOpError("Condition"):
             logit(x).eval()
         x = tf.constant([0.01, np.nan])
         with self.assertRaisesOpError("Condition"):
             logit(x).eval()
Example #10
0
 def test_contraint_raises(self):
   with self.test_session():
     x = tf.constant([0.01, -20])
     with self.assertRaisesOpError('Condition'):
       logit(x).eval()
     x = tf.constant([0.01, 20])
     with self.assertRaisesOpError('Condition'):
       logit(x).eval()
     x = tf.constant([0.01, np.inf])
     with self.assertRaisesOpError('Condition'):
       logit(x).eval()
     x = tf.constant([0.01, np.nan])
     with self.assertRaisesOpError('Condition'):
       logit(x).eval()
Example #11
0
def sparse_categorical_crossentropy(y_true, y_pred):
    """Multi-class cross entropy. Label {0, 1, .., K-1} representation
    for ``y_true.``

    Parameters
    ----------
    y_true : tf.Tensor
        Tensor of integers {0, 1, ..., K-1}.
    y_pred : tf.Tensor
        Tensor of probabilities, with shape ``(y_true.get_shape(), K)``.
        The outermost dimension are the categorical probabilities for
        that data point.
    """
    y_true = tf.cast(y_true, tf.int64)
    y_pred = logit(tf.cast(y_pred, tf.float32))
    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y_pred, y_true))
Example #12
0
def categorical_crossentropy(y_true, y_pred):
    """Multi-class cross entropy. One-hot representation for ``y_true``.

    Parameters
    ----------
    y_true : tf.Tensor
        Tensor of 0s and 1s, where the outermost dimension of size K
        has only one 1 per row.
    y_pred : tf.Tensor
        Tensor of probabilities, with same shape as y_true.
        The outermost dimension denote the categorical probabilities for
        that data point per row.
    """
    y_true = tf.cast(y_true, tf.float32)
    y_pred = logit(tf.cast(y_pred, tf.float32))
    return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred, y_true))
Example #13
0
def categorical_crossentropy(y_true, y_pred):
  """Multi-class cross entropy. One-hot representation for ``y_true``.

  Parameters
  ----------
  y_true : tf.Tensor
    Tensor of 0s and 1s, where the outermost dimension of size K
    has only one 1 per row.
  y_pred : tf.Tensor
    Tensor of probabilities, with same shape as y_true.
    The outermost dimension denote the categorical probabilities for
    that data point per row.
  """
  y_true = tf.cast(y_true, tf.float32)
  y_pred = logit(tf.cast(y_pred, tf.float32))
  return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred, y_true))
Example #14
0
def sparse_categorical_crossentropy(y_true, y_pred):
  """Multi-class cross entropy. Label {0, 1, .., K-1} representation
  for ``y_true.``

  Parameters
  ----------
  y_true : tf.Tensor
    Tensor of integers {0, 1, ..., K-1}.
  y_pred : tf.Tensor
    Tensor of probabilities, with shape ``(y_true.get_shape(), K)``.
    The outermost dimension are the categorical probabilities for
    that data point.
  """
  y_true = tf.cast(y_true, tf.int64)
  y_pred = logit(tf.cast(y_pred, tf.float32))
  return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=y_pred, labels=y_true))