Esempio n. 1
0
def test_compute_cost():
    y = np.asarray([[1, 1, 1]])
    al = np.array([[.8, .9, 0.4]])

    nn = NeuralNetwork(layer_dims=(3, 1), params_ok=True)
    cost = nn.compute_cost(al, y)
    assert_eq(cost, 0.41493159961539694)
Esempio n. 2
0
def test_update_parameters():
    np.random.seed(2)
    w1 = np.random.randn(3, 4)
    b1 = np.random.randn(3, 1)
    w2 = np.random.randn(1, 3)
    b2 = np.random.randn(1, 1)

    np.random.seed(3)
    dw1 = np.random.randn(3, 4)
    db1 = np.random.randn(3, 1)
    dw2 = np.random.randn(1, 3)
    db2 = np.random.randn(1, 1)

    nn = NeuralNetwork(w=[w1, w2], b=[b1, b2], learning_rate=0.1, params_ok=True)

    nn.update_parameters(([dw1, dw2], [db1, db2]))

    assert_eq(nn.w[0], [
        [-0.5956206947, -0.09991781, -2.14584584, 1.82662008],
        [-1.7656967649, -0.80627147, 0.5111555653, -1.18258802],
        [-1.0535704, -0.86128581, 0.68284052, 2.2037457748]
    ])
    assert_eq(nn.w[1], [
        [-0.55569196, 0.0354055, 1.32964895]
    ])
    assert_eq(nn.b[0], [
        [-0.04659241], [-1.28888275], [0.53405496]
    ])
    assert_eq(nn.b[1], [[-0.84610769]])
Esempio n. 3
0
def test_initialize():
    np.random.seed(1)
    nn = NeuralNetwork(layer_dims=[3, 2, 1], params_ok=True)
    assert_eq(nn.w[0], [
        [0.01624345, -0.00611756, -0.00528172],
        [-0.01072969, 0.00865408, -0.02301539]
    ], rtol=1e-6)
    assert_eq(nn.b[0], [[0], [0]])
    assert_eq(nn.w[1], [[0.01744812, -0.00761207]], rtol=1e-6)
    nt.eq_(nn.b[1].shape, (1, 1))
    assert_eq(nn.b[1], [[0.]])

    np.random.seed(3)
    nn = NeuralNetwork(layer_dims=[5, 4, 3], params_ok=True)
    assert_eq(nn.w[0], [
        [0.01788628, 0.0043651, 0.00096497, -0.01863493, -0.00277388],
        [-0.00354759, -0.00082741, -0.00627001, -0.00043818, -0.00477218],
        [-0.01313865, 0.00884622, 0.00881318, 0.01709573, 0.00050034],
        [-0.00404677, -0.0054536, -0.01546477, 0.00982367, -0.01101068]
    ], rtol=1e-5)
    assert_eq(nn.b[0], [[0.], [0.], [0.], [0.]])
    assert_eq(nn.w[1], [
        [-0.01185047, -0.0020565, 0.01486148, 0.00236716],
        [-0.01023785, -0.00712993, 0.00625245, -0.00160513],
        [-0.00768836, -0.00230031, 0.00745056, 0.01976111]
    ], rtol=1e-5)
    assert_eq(nn.b[1], [[0.], [0.], [0.]])
Esempio n. 4
0
def test_backward_propagation_dropout():
    np.random.seed(1)

    x = np.random.randn(3, 5)
    y = np.array([[1, 1, 0, 1, 0]])

    w1 = np.array([[-1.09989127, -0.17242821, -0.87785842],
                   [0.04221375, 0.58281521, -1.10061918]])
    b1 = np.array([[1.14472371], [0.90159072]])

    w2 = np.array([[0.50249434, 0.90085595], [-0.68372786, -0.12289023],
                   [-0.93576943, -0.26788808]])
    b2 = np.array([[0.53035547], [-0.69166075], [-0.39675353]])

    w3 = np.array([[-0.6871727, -0.84520564, -0.67124613]])
    b3 = np.array([[-0.0126646]])

    nn = NeuralNetwork(w=[w1, w2, w3], b=[b1, b2, b3], params_ok=True)

    z1 = np.array(
        [[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
         [-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]])
    d1 = np.array(
        [[True, False, True, True, True], [True, True, True, True, False]],
        dtype=bool)
    a1 = np.array([[0., 0., 4.27989081, 5.21401307, 0.],
                   [0., 8.32019881, 1.58102041, 2.92987024, 0.]])

    z2 = np.array(
        [[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547],
         [-0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075],
         [-0.39675353, -2.62563561, -4.82528105, -6.0607449, -0.39675353]])
    d2 = np.array(
        [[True, False, True, False, True], [False, True, False, True, True],
         [False, False, True, False, False]],
        dtype=bool)
    a2 = np.array([
        [1.06071093, 0., 8.21049603, 0., 1.06071093],
        [0., 0., 0., 0., 0.],
        [0., 0., 0., 0., 0.],
    ])

    z3 = np.array(
        [[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]])
    a3 = np.array(
        [[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]])

    nn.cache = [(x, z1, d1), (a1, z2, d2), (a2, z3)]
    gradw, _ = nn.model_backward(a3, y, keep_prop=0.8)
    assert_eq(gradw[0], [[0.0001988393, 0.0002865694, 0.0001213795],
                         [0.0003564729, 0.0005137526, 0.0002176053]],
              rtol=1e-5)
    assert_eq(gradw[1], [[-0.0025651848, -0.0009475965], [0., 0.], [0., 0.]],
              rtol=1e-5)
    assert_eq(gradw[2], [[-0.0695119105, 0., 0.]])
Esempio n. 5
0
def train(train_x, train_y, layers):
    nn = NeuralNetwork(layer_dims=layers,
                       keep_prop=0.8,
                       weight_factor=len(layers) > 2 and 'deep' or 0.01)

    def print_cost(iteration_num, cost):
        if iteration_num % 100 == 0:
            print("Cost after iteration %d: %f" % (iteration_num, cost))

    nn.train(train_x, train_y, num_iterations=2500, callback=print_cost)
    return nn
Esempio n. 6
0
def test_model_forward():
    np.random.seed(6)
    x = np.random.randn(5, 4)
    w1 = np.random.randn(4, 5)
    b1 = np.random.randn(4, 1)
    w2 = np.random.randn(3, 4)
    b2 = np.random.randn(3, 1)
    w3 = np.random.randn(1, 3)
    b3 = np.random.randn(1, 1)

    nn = NeuralNetwork(w=[w1, w2, w3], b=[b1, b2, b3], params_ok=True)
    al = nn.model_forward(x)
    nt.eq_(len(nn.cache), 3)
    assert_eq(al, [[0.03921668, 0.70498921, 0.19734387, 0.04728177]])
Esempio n. 7
0
def test_backward_propagation_regulation():
    np.random.seed(1)
    x = np.random.randn(3, 5)
    y = np.array([[1, 1, 0, 1, 0]])

    w1 = np.array([[-1.09989127, -0.17242821, -0.87785842],
                   [0.04221375, 0.58281521, -1.10061918]])
    b1 = np.array([[1.14472371], [0.90159072]])

    w2 = np.array([[0.50249434, 0.90085595], [-0.68372786, -0.12289023],
                   [-0.93576943, -0.26788808]])
    b2 = np.array([[0.53035547], [-0.69166075], [-0.39675353]])

    w3 = np.array([[-0.6871727, -0.84520564, -0.67124613]])
    b3 = np.array([[-0.0126646]])

    z1 = np.array(
        [[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
         [-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]])
    a1 = np.array([[0., 3.32524635, 2.13994541, 2.60700654, 0.],
                   [0., 4.1600994, 0.79051021, 1.46493512, 0.]])

    z2 = np.array(
        [[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
         [-0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075],
         [-0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]])
    a2 = np.array(
        [[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
         [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]])

    z3 = np.array(
        [[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]])
    a3 = np.array(
        [[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])

    nn = NeuralNetwork(w=[w1, w2, w3],
                       b=[b1, b2, b3],
                       lambd=0.7,
                       params_ok=True)
    nn.cache = [(x, z1), (a1, z2), (a2, z3)]
    gradw, gradb = nn.model_backward(a3, y)

    assert_eq(gradw[0], [[-0.256046467, 0.122988299, -0.28297132],
                         [-0.17706304, 0.34536100, -0.4410572]],
              rtol=1e-5)
    assert_eq(gradw[1], [[0.792764876, 0.85133918], [-0.0957219, -0.01720463],
                         [-0.13100772, -0.03750433]],
              rtol=1e-5)
    assert_eq(gradw[2], [[-1.77691347, -0.11832879, -0.09397446]], rtol=1e-5)
Esempio n. 8
0
def test_forward_propagation_dropout():
    np.random.seed(1)
    x = np.random.randn(3, 5)
    w1 = np.random.randn(2, 3)
    b1 = np.random.randn(2, 1)
    w2 = np.random.randn(3, 2)
    b2 = np.random.randn(3, 1)
    w3 = np.random.randn(1, 3)
    b3 = np.random.randn(1, 1)

    np.random.seed(1)
    nn = NeuralNetwork(w=[w1, w2, w3], b=[b1, b2, b3], params_ok=True)
    a3 = nn.model_forward(x, keep_prop=0.7)
    assert_eq(a3,
              [[0.36974721, 0.00305176, 0.04565099, 0.49683389, 0.36974721]],
              rtol=1e-5)
Esempio n. 9
0
def test_cost_regulation():
    np.random.seed(1)
    y = np.array([[1, 1, 0, 1, 0]])
    w1 = np.random.randn(2, 3)
    b1 = np.random.randn(2, 1)
    w2 = np.random.randn(3, 2)
    b2 = np.random.randn(3, 1)
    w3 = np.random.randn(1, 3)
    b3 = np.random.randn(1, 1)
    a3 = np.array(
        [[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])

    nn = NeuralNetwork(w=[w1, w2, w3],
                       b=[b1, b2, b3],
                       lambd=0.1,
                       params_ok=True)
    cost = nn.compute_cost(a3, y)
    assert_eq(cost, 1.78648594516)
Esempio n. 10
0
def test_initialize_he():
    np.random.seed(3)
    nn = NeuralNetwork(layer_dims=[2, 4, 1], weight_factor='he', params_ok=True)
    assert_eq(nn.w[0], [
        [1.78862847, 0.43650985],
        [0.09649747, -1.8634927],
        [-0.2773882, -0.35475898],
        [-0.08274148, -0.62700068]
    ], rtol=1e-5)
    assert_eq(nn.w[1], [
        [-0.03098412, -0.33744411, -0.92904268, 0.62552248]
    ], rtol=1e-5)
Esempio n. 11
0
def test_linear_activate_backward():
    np.random.seed(2)
    da = np.random.randn(1, 2)
    a = np.random.randn(3, 2)
    w = np.random.randn(1, 3)
    b = np.random.randn(1, 1)
    z = np.random.randn(1, 2)

    nn = NeuralNetwork(w=[w], b=[b], params_ok=True)
    da_prev, dw, db = nn.backward(da, w, b, (a, z), nn.activation_funcs['sigmoid'][1])
    assert_eq(da_prev, [
        [0.110179936, 0.011053395],
        [0.094668170, 0.009497234],
        [-0.057430922, -0.005761545]
    ])
    assert_eq(dw, [[0.102667864, 0.09778551, -0.019680842]])
    nt.eq_(db.shape, (1, 1))
    assert_eq(db, -0.057296222)

    nn = NeuralNetwork(w=[w], b=[b], params_ok=True)
    da_prev, dw, db = nn.backward(da, w, b, (a, z), nn.activation_funcs['relu'][1])
    assert_eq(da_prev, [
        [0.44090989, 0.],
        [0.37883606, 0.],
        [-0.2298228, -0.]
    ])
    assert_eq(dw, [[0.44513824, 0.37371418, -0.10478989]])
    nt.eq_(db.shape, (1, 1))
    assert_eq(db, -0.20837892)
Esempio n. 12
0
def test_model_backward():
    np.random.seed(3)
    al = np.random.randn(1, 2)
    y = np.array([[1, 0]])

    a1 = np.random.randn(4, 2)
    w1 = np.random.randn(3, 4)
    b1 = np.random.randn(3, 1)
    z1 = np.random.randn(3, 2)

    a2 = np.random.randn(3, 2)
    w2 = np.random.randn(1, 3)
    b2 = np.random.randn(1, 1)
    z2 = np.random.randn(1, 2)

    nn = NeuralNetwork(w=[w1, w2], b=[b1, b2], params_ok=True)
    nn.cache = [(a1, z1), (a2, z2)]
    nn.w = [w1, w2]
    nn.b = [b1, b2]
    grads = nn.model_backward(al, y)

    assert_eq(grads[0][0], [
        [0.41010002, 0.07807203, 0.1379844364, 0.1050216745],
        [0., 0., 0., 0.],
        [0.05283652, 0.0100586544, 0.017777656, 0.0135307956]
    ])
    assert_eq(grads[1][0], [
        [-0.2200706339], [0.], [-0.02835349]
    ])
Esempio n. 13
0
def test_linear_activate_forward():
    np.random.seed(2)
    a_prev = np.random.randn(3, 2)
    w = np.random.randn(1, 3)
    b = np.random.randn(1, 1)

    nn = NeuralNetwork(w=[w], b=[b], params_ok=True)
    a = nn.forward(a_prev, w, b, nn.activation_funcs['sigmoid'][0])
    assert_eq(a, [[0.96890023, 0.11013289]])

    nn = NeuralNetwork(w=[w], b=[b], params_ok=True)
    a = nn.forward(a_prev, w, b, nn.activation_funcs['relu'][0])
    assert_eq(a, [[3.43896131, 0.]])
Esempio n. 14
0
def train(train_x, train_y, test_x, test_y):
    def print_cost(iteration_num, cost):
        if iteration_num % 100 == 0:
            print("Cost after iteration %d: %f" % (iteration_num, cost))

    nn = NeuralNetwork(layer_dims=[20, 3, 1],
                       num_iterations=3000,
                       weight_factors='deep',
                       callback=print_cost)
    nn.train(train_x, train_y)
    p = nn.predict(train_x)
    print('Accuracy: %f' % np.mean(p == train_y))
    p = nn.predict(test_x)
    print('Accuracy: %f' % np.mean(p == test_y))