def test_output_linear_backward(self):
        print_test("Testing linear backward function:")
        np.random.seed(395)
        x = np.random.randn(10, 2, 3)
        w = np.random.randn(6, 5)
        b = np.random.randn(5)
        dout = np.random.randn(10, 5)

        dx_num = eval_numerical_gradient_array(
            lambda x: layers.linear_forward(x, w, b), x, dout)
        dw_num = eval_numerical_gradient_array(
            lambda w: layers.linear_forward(x, w, b), w, dout)
        db_num = eval_numerical_gradient_array(
            lambda b: layers.linear_forward(x, w, b), b, dout)

        dx, dw, db = layers.linear_backward(dout, x, w, b)

        dX_e = rel_error(dx_num, dx)
        dW_e = rel_error(dw_num, dw)
        db_e = rel_error(db_num, db)
        print('dX relative error: ', dX_e)
        print('dW relative error: ', dW_e)
        print('db realtive error: ', db_e)
        self.assertTrue(dX_e <= 5e-10)
        self.assertTrue(dW_e <= 5e-10)
        self.assertTrue(db_e <= 5e-10)
 def test_output_dropout_forward(self):
     print_test("Testing dropout forward function:")
     seed = 395
     np.random.seed(seed)
     p = 0.7
     x = np.linspace(-0.7, 0.5, num=10).reshape(2, 5)
     out_train, _ = layers.dropout_forward(x, p=p, seed=seed, train=True)
     out_test, _ = layers.dropout_forward(x, p=p, seed=seed, train=False)
     correct_out = np.asarray([[-0., -0., -0., -1., -0.],
                               [-0.,  0.,  0.,  0.,  0.]])
     e_train = rel_error(out_train, correct_out)
     e_test = rel_error(out_test, x)
     print("Relative difference train:", e_train)
     print("Relative difference test:", e_test)
     self.assertTrue(e_train < 1e-12)
     self.assertTrue(e_test < 1e-12)
    def test_output_linear_forward(self):
        print_test("Testing linear forward function:")
        n_X = 10
        X_shape = [4, 2, 5]
        n_out = 3
        n_input = n_X * np.prod(X_shape)
        n_weights = np.prod(X_shape) * n_out
        X = np.linspace(-0.2, 0.5, num=n_input).reshape([n_X] + X_shape)
        W = np.linspace(-0.4, 0.2, num=n_weights).reshape(
            np.prod(X_shape), n_out)
        b = np.linspace(0.5, 1, num=n_out)

        # test
        print ("n_input: ", n_input)
        print ("n_weights: ", n_weights)
        print ("X: ", X.shape, "\n", X)
        print ("W: ", W.shape, "\n", W)
        print ("b: ", b.shape, "\n", b)
        
        out = layers.linear_forward(X, W, b)
        correct_out = np.asarray([[1.33803627, 1.55459973, 1.7711632],
                                  [1.04318148, 1.27389798, 1.50461448],
                                  [0.7483267 , 0.99319623, 1.23806575],
                                  [0.45347192, 0.71249447, 0.97151703],
                                  [0.15861713, 0.43179272, 0.7049683],
                                  [-0.13623765, 0.15109096, 0.43841958],
                                  [-0.43109244, -0.12961079, 0.17187085],
                                  [-0.72594722, -0.41031255, -0.09467787],
                                  [-1.02080201, -0.6910143 , -0.3612266 ],
                                  [-1.31565679, -0.97171605, -0.62777532]])
        e = np.max(np.abs(correct_out - out))
        e = rel_error(out, correct_out)
        print("Relative difference", e)
        self.assertTrue(e <= 5e-08)
Ejemplo n.º 4
0
 def test_derivative_softmax(self):
     print_test("Test gradients softmax")
     np.random.seed(395)
     X = np.random.randn(5, 10)
     y = np.random.uniform(0, 10, 5).astype(np.int16)
     expected_grads = np.asarray(
         [[
             0.00322816, 0.01188412, 0.00185757, 0.04150573, -0.19303544,
             0.00260126, 0.08516417, 0.02764608, 0.00912691, 0.01002143
         ],
          [
              -0.19497483, 0.00531375, 0.04302849, 0.03511828, 0.00303517,
              0.04190564, 0.0038523, 0.01478627, 0.02427056, 0.02366437
          ],
          [
              -0.18935209, 0.00978672, 0.00897249, 0.02791175, 0.04489496,
              0.00676774, 0.0171222, 0.00539931, 0.0294198, 0.03907712
          ],
          [
              0.02925289, -0.19129923, 0.00521769, 0.00132343, 0.01317178,
              0.03008875, 0.00245856, 0.00113738, 0.01814663, 0.09050212
          ],
          [
              0.01018762, 0.0232368, 0.0049424, 0.01980616, 0.01467162,
              0.00463897, 0.03473582, 0.007073, 0.03897701, -0.15826939
          ]])
     _, grads = classifiers.softmax(X, y)
     e = rel_error(expected_grads, grads)
     print("Relative error", e)
     self.assertTrue(e <= 1e-05)
Ejemplo n.º 5
0
 def test_output_softmax(self):
     print_test("Test output softmax")
     np.random.seed(395)
     X = np.random.randn(3073, 10)
     y = np.random.uniform(0, 10, 3073).astype(np.int16)
     expected_loss = 2.7536201727859835
     loss, _ = classifiers.softmax(X, y)
     e = rel_error(loss, expected_loss)
     print("Relative error", e)
     self.assertTrue(e <= 5e-12)
 def test_output_relu_backward(self):
     print_test("Testing relu backward function:")
     np.random.seed(395)
     x = np.random.randn(10, 10)
     dout = np.random.randn(*x.shape)
     dx_num = eval_numerical_gradient_array(
         lambda x: layers.relu_forward(x), x, dout)
     dx = layers.relu_backward(dout, x)
     dx_e = rel_error(dx, dx_num)
     print('dX relative difference:', dx_e)
     self.assertTrue(dx_e <= 1e-11)
 def test_output_relu_forward(self):
     print_test("Testing relu forward function:")
     x = np.linspace(-0.7, 0.5, num=20).reshape(5, 4)
     out = layers.relu_forward(x)
     correct_out = np.array([[0., 0., 0., 0.],
                             [0., 0., 0., 0.],
                             [0., 0., 0., 0.],
                             [0.05789474, 0.12105263, 0.18421053, 0.24736842],
                             [0.31052632, 0.37368421, 0.43684211, 0.5]])
     e = rel_error(out, correct_out)
     print('Relative difference:', e)
     self.assertTrue(e <= 5e-08)
 def test_output_relu_backward(self):
     print_test("Testing dropout backward function:")
     seed = 395
     np.random.seed(seed)
     x = np.random.randn(16, 16) + 8
     dout = np.random.randn(*x.shape)
     p = 0.7
     # Test for train
     dout, mask = layers.dropout_forward(x, train=True, p=p, seed=seed)
     dx = layers.dropout_backward(dout, mask, p=p, train=True)
     dx_num = eval_numerical_gradient_array(
         lambda xx: layers.dropout_forward(
             xx, p=0.7, train=True, seed=seed)[0], x, dout)
     e_train = rel_error(dx, dx_num)
     print('dx train relative error: ', e_train)
     self.assertTrue(e_train <= 5e-11)
     # Test for test
     dx_test = layers.dropout_backward(dout, mask, train=False, p=p)
     e_test = rel_error(dout, dx_test)
     print('dx test relative error: ', e_test)
     self.assertTrue(e_test <= 1e-12)
Ejemplo n.º 9
0
from src import fcnet
from test.utils import rel_error
from test.gradient_check import eval_numerical_gradient

if __name__ == "__main__":
    N, D, H1, H2, C = 2, 15, 20, 30, 10
    X = np.random.randn(N, D)
    y = np.random.randint(C, size=(N, ))

    for reg in [0, 3.14]:
        print('Running check with reg = ', reg)
        model = fcnet.FullyConnectedNet([H1, H2],
                                        input_dim=D,
                                        num_classes=C,
                                        reg=reg,
                                        dtype=np.float64)
        loss, grads = model.loss(X, y)
        print('Initial loss: ', loss)
        print(model.params.keys())
        print(grads.keys())
        for name in sorted(grads):
            f = lambda _: model.loss(X, y)[0]
            grad_num = eval_numerical_gradient(f,
                                               model.params[name],
                                               verbose=False,
                                               h=1e-5)
            print('{name} relative error: {rel:.2e}'.format(name=name,
                                                            rel=rel_error(
                                                                grad_num,
                                                                grads[name])))