def test_loss_gradient(self):
        # Get MNIST
        (_, _), (x_test, y_test), _, _ = load_mnist()
        x_test, y_test = x_test[:NB_TEST], np.argmax(y_test[:NB_TEST], axis=1)
        x_test = np.swapaxes(x_test, 1, 3)

        # Test gradient
        ptc = PyTorchClassifier(None, self._model, self._loss_fn,
                                self._optimizer, (1, 28, 28), (10, ))
        grads = ptc.loss_gradient(x_test, y_test)

        self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 28, 28)).all())
        self.assertTrue(np.sum(grads) != 0)
示例#2
0
    def test_loss_gradient(self):
        # Get MNIST
        (_, _), (x_test, y_test), _, _ = load_mnist()
        x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]
        x_test = np.swapaxes(x_test, 1, 3)

        # Create model
        model, loss_fn, optimizer = self._model_setup_module()

        # Test gradient
        ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28),
                                10)
        grads = ptc.loss_gradient(x_test, y_test)

        self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 28, 28)).all())
        self.assertTrue(np.sum(grads) != 0)