Exemple #1
0
    def _tf_sparsemax_loss(self, z, q, dtype):
        z = z.astype(dtype)
        q = q.astype(dtype)

        tf_sparsemax_op = sparsemax(z)
        tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
        tf_loss_out = self.evaluate(tf_loss_op)

        return tf_loss_op, tf_loss_out
Exemple #2
0
def _tf_sparsemax_loss(z, q, dtype):
    z = z.astype(dtype)
    q = q.astype(dtype)

    tf_sparsemax_op = sparsemax(z)
    tf_loss_op = sparsemax_loss(z, tf_sparsemax_op, q)
    tf_loss_out = tf_loss_op

    return tf_loss_op, tf_loss_out
Exemple #3
0
    def test_gradient_against_estimate(self, dtype=None):
        """check sparsemax-loss Rop, against estimated-loss Rop."""
        random = np.random.RandomState(7)

        # sparsemax is not a smooth function so gradient estimation is only
        # possible for float64.
        if dtype != "float64":
            return

        z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
        q = np.zeros((test_obs, 10)).astype(dtype)
        q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1

        (jacob_sym, ), (jacob_num, ) = tf.test.compute_gradient(
            lambda logits: sparsemax_loss(logits, sparsemax(logits), q), [z])
        self.assertAllCloseAccordingToType(jacob_sym, jacob_num)