Esempio n. 1
0
def test_forward_pass():
    npr.seed(1)

    N   = 15
    D   = 10

    data = 0.5*npr.rand(N,D)

    norm      = Normalization(3)
    norm_inds = [1,3,5]

    bw      = BetaWarp(2)
    bw_inds = [0,2]

    lin      = Linear(3)
    lin_inds = [6,8,9]

    t = Transformer(D)
    t.add_layer((norm, norm_inds), (bw, bw_inds), (lin, lin_inds))

    new_data = t.forward_pass(data)
    assert new_data.shape[1] == 9
    assert np.all(new_data[:,7:] == data[:,[4,7]])
    assert np.linalg.norm(new_data[:,0:3].sum(1) - 1) < 1e-10

    bw = BetaWarp(9)
    t.add_layer(bw)
def test_forward_pass():
    npr.seed(1)

    N   = 15
    D   = 10

    data = 0.5*npr.rand(N,D)

    norm      = Normalization(3)
    norm_inds = [1,3,5]

    bw      = BetaWarp(2)
    bw_inds = [0,2]

    lin      = Linear(3)
    lin_inds = [6,8,9]

    t = Transformer(D)
    t.add_layer((norm, norm_inds), (bw, bw_inds), (lin, lin_inds))

    new_data = t.forward_pass(data)
    assert new_data.shape[1] == 9
    assert np.all(new_data[:,7:] == data[:,[4,7]])
    assert np.linalg.norm(new_data[:,0:3].sum(1) - 1) < 1e-10

    bw = BetaWarp(9)
    t.add_layer(bw)
Esempio n. 3
0
def test_backward_pass_2():
    t = Transformer(10)

    st1 = (SimpleTransformation(3),[0,2,4])
    st2 = (SimpleTransformation(4),[1,3,5,7])
    st3 = SimpleTransformation(10)

    t.add_layer(st1,st2)
    t.add_layer(st3)

    inputs = np.ones((2,10))
    inputs[1,:] *= 2
    t.forward_pass(inputs)
    
    V = np.ones((2,10))
    V[1,:] *= 2

    grad = t.backward_pass(V)

    assert np.all(grad == np.array([[8, 8, 8, 8, 8, 8, 2, 8, 2, 2],
            [64, 64, 64, 64, 64, 64, 8, 64, 8, 8]]))
def test_backward_pass_2():
    t = Transformer(10)

    st1 = (SimpleTransformation(3),[0,2,4])
    st2 = (SimpleTransformation(4),[1,3,5,7])
    st3 = SimpleTransformation(10)

    t.add_layer(st1,st2)
    t.add_layer(st3)

    inputs = np.ones((2,10))
    inputs[1,:] *= 2
    t.forward_pass(inputs)
    
    V = np.ones((2,10))
    V[1,:] *= 2

    grad = t.backward_pass(V)

    assert np.all(grad == np.array([[8, 8, 8, 8, 8, 8, 2, 8, 2, 2],
            [64, 64, 64, 64, 64, 64, 8, 64, 8, 8]]))
Esempio n. 5
0
def test_backward_pass():
    npr.seed(1)

    eps = 1e-5
    N   = 15
    D   = 10

    data = 0.5*npr.rand(N,D)

    norm      = Normalization(3)
    norm_inds = [1,3,5]

    bw      = BetaWarp(2)
    bw_inds = [0,2]

    lin      = Linear(3)
    lin_inds = [6,8,9]

    t = Transformer(D)

    # Add a layer and test the gradient
    t.add_layer((norm, norm_inds), (bw, bw_inds), (lin, lin_inds))
    new_data = t.forward_pass(data)
    loss     = np.sum(new_data**2)
    V        = 2*new_data

    dloss = t.backward_pass(V)
    
    dloss_est = np.zeros(dloss.shape)
    for i in xrange(N):
        for j in xrange(D):
            data[i,j] += eps
            loss_1 = np.sum(t.forward_pass(data)**2)
            data[i,j] -= 2*eps
            loss_2 = np.sum(t.forward_pass(data)**2)
            data[i,j] += eps
            dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))

    assert np.linalg.norm(dloss - dloss_est) < 1e-6

    # Add a second layer and test the gradient
    t.add_layer(Linear(9))

    new_data = t.forward_pass(data)
    loss     = np.sum(new_data**2)
    V        = 2*new_data

    dloss = t.backward_pass(V)
    
    dloss_est = np.zeros(dloss.shape)
    for i in xrange(N):
        for j in xrange(D):
            data[i,j] += eps
            loss_1 = np.sum(t.forward_pass(data)**2)
            data[i,j] -= 2*eps
            loss_2 = np.sum(t.forward_pass(data)**2)
            data[i,j] += eps
            dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))

    assert np.linalg.norm(dloss - dloss_est) < 1e-6
def test_backward_pass():
    npr.seed(1)

    eps = 1e-5
    N   = 15
    D   = 10

    data = 0.5*npr.rand(N,D)

    norm      = Normalization(3)
    norm_inds = [1,3,5]

    bw      = BetaWarp(2)
    bw_inds = [0,2]

    lin      = Linear(3)
    lin_inds = [6,8,9]

    t = Transformer(D)

    # Add a layer and test the gradient
    t.add_layer((norm, norm_inds), (bw, bw_inds), (lin, lin_inds))
    new_data = t.forward_pass(data)
    loss     = np.sum(new_data**2)
    V        = 2*new_data

    dloss = t.backward_pass(V)
    
    dloss_est = np.zeros(dloss.shape)
    for i in xrange(N):
        for j in xrange(D):
            data[i,j] += eps
            loss_1 = np.sum(t.forward_pass(data)**2)
            data[i,j] -= 2*eps
            loss_2 = np.sum(t.forward_pass(data)**2)
            data[i,j] += eps
            dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))

    assert np.linalg.norm(dloss - dloss_est) < 1e-6

    # Add a second layer and test the gradient
    t.add_layer(Linear(9))

    new_data = t.forward_pass(data)
    loss     = np.sum(new_data**2)
    V        = 2*new_data

    dloss = t.backward_pass(V)
    
    dloss_est = np.zeros(dloss.shape)
    for i in xrange(N):
        for j in xrange(D):
            data[i,j] += eps
            loss_1 = np.sum(t.forward_pass(data)**2)
            data[i,j] -= 2*eps
            loss_2 = np.sum(t.forward_pass(data)**2)
            data[i,j] += eps
            dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))

    assert np.linalg.norm(dloss - dloss_est) < 1e-6
Esempio n. 7
0
def test_forward_pass_2():
    t = Transformer(10)

    st1 = (SimpleTransformation(3),[0,2,4])
    st2 = (SimpleTransformation(4),[1,3,5,7])
    st3 = SimpleTransformation(10)

    t.add_layer(st1,st2)
    t.add_layer(st3)

    inputs = np.ones((2,10))
    inputs[1,:] *= 2
    outputs = t.forward_pass(inputs)
    assert np.all(outputs == np.array([[4, 4, 4, 4, 4, 4, 4, 2, 2, 2],
            [8, 8, 8, 8, 8, 8, 8, 4, 4, 4]]))
def test_forward_pass_2():
    t = Transformer(10)

    st1 = (SimpleTransformation(3),[0,2,4])
    st2 = (SimpleTransformation(4),[1,3,5,7])
    st3 = SimpleTransformation(10)

    t.add_layer(st1,st2)
    t.add_layer(st3)

    inputs = np.ones((2,10))
    inputs[1,:] *= 2
    outputs = t.forward_pass(inputs)
    assert np.all(outputs == np.array([[4, 4, 4, 4, 4, 4, 4, 2, 2, 2],
            [8, 8, 8, 8, 8, 8, 8, 4, 4, 4]]))