def _tf_sparsemax_loss(self, z, q, dtype, use_gpu):
    z = z.astype(dtype)
    q = q.astype(dtype)

    with self.test_session(use_gpu=use_gpu):
      tf_sparsemax_op = sparsemax(z)
      tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
      tf_loss_out = tf_loss_op.eval()

    return tf_loss_op, tf_loss_out
    def _tf_sparsemax_loss(self, z, q, dtype, use_gpu):
        z = z.astype(dtype)
        q = q.astype(dtype)

        with self.test_session(use_gpu=use_gpu):
            tf_sparsemax_op = sparsemax(z)
            tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
            tf_loss_out = tf_loss_op.eval()

        return tf_loss_op, tf_loss_out
  def _test_gradient_against_estimate(self, dtype, random, use_gpu):
    """check sparsemax-loss Rop, against estimated-loss Rop"""
    z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
    q = np.zeros((test_obs, 10)).astype(dtype)
    q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1

    logits = array_ops.placeholder(dtype, name='z')
    sparsemax_op = sparsemax(logits)
    loss_op = sparsemax_loss(logits, sparsemax_op, q)

    with self.test_session(use_gpu=use_gpu):
      err = gradient_checker.compute_gradient_error(
          logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)

    self.assertLess(err, 1e-4)
  def _test_gradient_against_numpy(self, dtype, random, use_gpu):
    """check sparsemax-loss Rop, against numpy Rop"""
    z = random.uniform(low=-3, high=3, size=(test_obs, 10))
    q = np.zeros((test_obs, 10))
    q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1

    logits = constant_op.constant(z.astype(dtype), name='z')
    sparsemax_op = sparsemax(logits)
    loss_op = sparsemax_loss(logits, sparsemax_op, q.astype(dtype))
    loss_grad_op = gradients_impl.gradients(loss_op, [logits])[0]

    with self.test_session(use_gpu=use_gpu):
      tf_grad = loss_grad_op.eval()
      np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)

      self.assertAllCloseAccordingToType(
          np_grad, tf_grad, half_atol=1e-2, half_rtol=5e-3)
      self.assertShapeEqual(np_grad, loss_grad_op)
    def _test_gradient_against_estimate(self, dtype, random, use_gpu):
        """check sparsemax-loss Rop, against estimated-loss Rop"""
        z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
        q = np.zeros((test_obs, 10)).astype(dtype)
        q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1

        logits = array_ops.placeholder(dtype, name='z')
        sparsemax_op = sparsemax(logits)
        loss_op = sparsemax_loss(logits, sparsemax_op, q)

        with self.test_session(use_gpu=use_gpu):
            err = gradient_checker.compute_gradient_error(logits,
                                                          z.shape,
                                                          loss_op,
                                                          (test_obs, ),
                                                          x_init_value=z,
                                                          delta=1e-9)

        self.assertLess(err, 1e-4)
    def _test_gradient_against_numpy(self, dtype, random, use_gpu):
        """check sparsemax-loss Rop, against numpy Rop"""
        z = random.uniform(low=-3, high=3, size=(test_obs, 10))
        q = np.zeros((test_obs, 10))
        q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1

        logits = constant_op.constant(z.astype(dtype), name='z')
        sparsemax_op = sparsemax(logits)
        loss_op = sparsemax_loss(logits, sparsemax_op, q.astype(dtype))
        loss_grad_op = gradients_impl.gradients(loss_op, [logits])[0]

        with self.test_session(use_gpu=use_gpu):
            tf_grad = loss_grad_op.eval()
            np_grad = self._np_sparsemax_loss_grad(z, q).astype(dtype)

            self.assertAllCloseAccordingToType(np_grad,
                                               tf_grad,
                                               half_atol=1e-2,
                                               half_rtol=5e-3)
            self.assertShapeEqual(np_grad, loss_grad_op)