示例#1
0
    def test_backward_on_X(self):
        downstream_gradient = np.ones_like(self.x)
        self.batch_norm(self.x)
        d_X = self.batch_norm.backward(downstream_gradient)

        np.testing.assert_almost_equal(
            d_X, numeric_gradient(self.batch_norm, self.x))
示例#2
0
    def test_backward_on_X(self):
        self.linear(self.matrix)
        d_X = self.linear.backward(self.downstream_gradient)

        np.testing.assert_almost_equal(
            numeric_gradient(self.linear, self.matrix),
            d_X
        )
示例#3
0
    def test_gradient(self):
        y_hat = np.array([.8, .75, .25, .01, .99])
        y = np.array([1, 0, 0, 1, 1])

        bce = BinaryCrossEntropy()
        bce(y_hat, y)

        np.testing.assert_almost_equal(
            bce.gradient(), numeric_gradient(partial(bce, y=y), y_hat))
示例#4
0
    def test_backward_on_beta(self):
        downstream_gradient = np.ones_like(self.x)
        self.batch_norm(self.x)
        self.batch_norm.backward(downstream_gradient)

        def _forward_wrt_beta(new_beta):
            self.batch_norm.beta[:] = new_beta
            return self.batch_norm(self.x)

        np.testing.assert_almost_equal(
            self.batch_norm.beta.grad,
            numeric_gradient(_forward_wrt_beta, self.batch_norm.beta).ravel())
示例#5
0
    def test_backward_on_b(self):
        self.linear(self.matrix)
        self.linear.backward(self.downstream_gradient)

        def forward_wrt_b(new_b):
            self.linear.b[:] = new_b
            return self.linear(self.matrix)

        np.testing.assert_almost_equal(
            numeric_gradient(forward_wrt_b, self.linear.b),
            self.linear.b.grad
        )
示例#6
0
    def test_backward_on_W(self):
        self.linear(self.matrix)
        self.linear.backward(self.downstream_gradient)

        def forward_wrt_W(new_W):
            self.linear.W[:] = new_W
            return self.linear(self.matrix)

        np.testing.assert_almost_equal(
            numeric_gradient(forward_wrt_W, self.linear.W),
            self.linear.W.grad
        )
示例#7
0
    def test_gradient(self):
        y_hat = np.array([
            [.2, .2, .2, .2, .2],
            [.1, .1, .1, .6, .1],
            [.1, .1, .1, .1, .6],
        ],
                         dtype=np.float64)
        y = np.array([0, 3, 4], dtype=np.float64)

        cce = CategoricalCrossEntropy()
        cce(y_hat, y)

        np.testing.assert_almost_equal(
            cce.gradient(), numeric_gradient(partial(cce, y=y), y_hat))