def testSecondGradient(self):
    images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
    labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
    weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
    weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
                                         axis=1)
    logits = math_ops.matmul(images_placeholder, weights_with_zeros)
    cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
        labels=labels_placeholder, logits=logits)
    loss = math_ops.reduce_mean(cross_entropy)

    # Taking ths second gradient should fail, since it is not
    # yet supported.
    with self.assertRaisesRegexp(LookupError,
                                 "explicitly disabled"):
      _ = gradients_impl.hessians(loss, [weights])
Esempio n. 2
0
  def testSecondGradient(self):
    images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
    labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
    weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
    weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
                                         axis=1)
    logits = math_ops.matmul(images_placeholder, weights_with_zeros)
    cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
        labels=labels_placeholder, logits=logits)
    loss = math_ops.reduce_mean(cross_entropy)

    # Taking ths second gradient should fail, since it is not
    # yet supported.
    with self.assertRaisesRegexp(LookupError,
                                 "explicitly disabled"):
      _ = gradients_impl.hessians(loss, [weights])
Esempio n. 3
0
  def testSecondGradient(self):
    with self.test_session():
      l = constant_op.constant([0.0, 0.0, 1.0, 0.0,
                                1.0, 0.0, 0.0, 0.0,
                                0.0, 0.5, 0.0, 0.5], shape=[12],
                               dtype=dtypes.float64, name="l")
      f = constant_op.constant([0.1, 0.2, 0.3, 0.4,
                                0.1, 0.4, 0.9, 1.6,
                                0.1, 0.8, 2.7, 6.4], shape=[12],
                               dtype=dtypes.float64, name="f")
      x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=f,
                                                   name="xent")
      loss = math_ops.reduce_mean(x)

    # Taking ths second gradient should fail, since it is not
    # yet supported.
    with self.assertRaisesRegexp(LookupError,
                                 "explicitly disabled"):
      _ = gradients_impl.hessians(loss, [f])
Esempio n. 4
0
    def testSecondGradient(self):
        with self.test_session():
            l = constant_op.constant(
                [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
                shape=[12],
                dtype=dtypes.float64,
                name="l")
            f = constant_op.constant(
                [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
                shape=[12],
                dtype=dtypes.float64,
                name="f")
            x = nn_ops.softmax_cross_entropy_with_logits(labels=l,
                                                         logits=f,
                                                         name="xent")
            loss = math_ops.reduce_mean(x)

        # Taking ths second gradient should fail, since it is not
        # yet supported.
        with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
            _ = gradients_impl.hessians(loss, [f])