Пример #1
0
def test_nested_value_2():
    npr.seed(11)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X, axis=1)
        Z = kayak.MatSum(Y)

        assert np.all(close_float(Y.value.ravel(), np.sum(npX, axis=1)))
        assert close_float(Z.value, np.sum(npX))
Пример #2
0
def test_keepdims_grad_1():
    npr.seed(10)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X, axis=0, keepdims=False)
        Z = kayak.MatSum(Y)

        assert Z.grad(X).shape == npX.shape
        assert np.all(close_float(Z.grad(X), np.ones(npX.shape)))
        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Пример #3
0
def test_keepdims_value_1():
    npr.seed(9)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X, axis=0, keepdims=False)
        Z = kayak.MatSum(Y)

        assert Y.shape == np.sum(npX, axis=0, keepdims=False).shape
        assert np.all(close_float(Y.value, np.sum(npX, axis=0,
                                                  keepdims=False)))
        assert close_float(Z.value, np.sum(npX))
Пример #4
0
def test_keepdims_grad_2():
    npr.seed(10)

    for ii in xrange(NUM_TRIALS):
        npW = npr.randn(5, 10, 20)
        npX = npr.randn(5, 10, 20)
        W = kayak.Parameter(npW)
        X = kayak.Parameter(npX)
        Y = W * X
        Z = kayak.MatSum(Y, axis=2, keepdims=False)
        S = kayak.MatSum(Z)

        assert S.grad(W).shape == npW.shape
        # assert np.all(close_float(Z.grad(X), np.ones(npX.shape)))
        assert kayak.util.checkgrad(X, S) < MAX_GRAD_DIFF
Пример #5
0
def test_batcher_clears_shape_cache():
    batcher = kayak.Batcher(2, 3)
    X = kayak.Inputs(np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]]), batcher)
    Y = kayak.MatSum(X, axis=1)
    correct_shapes = [(2, 1), (1, 1)]
    for ii, batch in enumerate(batcher):
        assert Y.shape == correct_shapes[ii]
Пример #6
0
def test_batcher_clears_value_cache():
    batcher = kayak.Batcher(1, 2)
    X = kayak.Inputs(np.array([[1, 2, 3], [2, 3, 4]]), batcher)
    Y = kayak.MatSum(X)
    correct_vals = [6, 9]
    for ii, batch in enumerate(batcher):
        assert Y.value == correct_vals[ii]
Пример #7
0
def test_graph_diamond():
    npr.seed(2)

    N  = 10
    D  = 5
    H1 = 6
    H2 = 7

    X   = kayak.Inputs(npr.randn(N,D))
    W1  = kayak.Parameter(npr.randn(D,H1))
    W2a = kayak.Parameter(npr.randn(H1,H2))
    W2b = kayak.Parameter(npr.randn(H1,H2))
    W3  = kayak.Parameter(npr.randn(H2,1))

    U1 = kayak.SoftReLU(kayak.MatMult(X, W1))
    U2a = kayak.SoftReLU(kayak.MatMult(U1, W2a))
    U2b = kayak.SoftReLU(kayak.MatMult(U1, W2b))
    U3a = kayak.SoftReLU(kayak.MatMult(U2a, W3))
    U3b = kayak.SoftReLU(kayak.MatMult(U2b, W3))
    
    out = kayak.MatSum(kayak.MatAdd(U3a, U3b))

    out.value
    print kayak.util.checkgrad(W1, out)
    print kayak.util.checkgrad(W2a, out)
    print kayak.util.checkgrad(W2b, out)
    print kayak.util.checkgrad(W3, out)
    assert kayak.util.checkgrad(W1, out) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(W2a, out) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(W2b, out) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(W3, out) < MAX_GRAD_DIFF
Пример #8
0
def test_dropout_clears_value_cache():
    X = kayak.Inputs(np.random.randn(10,10))
    Y = kayak.Dropout(X)
    Z = kayak.MatSum(Y, axis=1)
    val1 = Z.value
    Y.draw_new_mask()
    val2 = Z.value
    assert not np.all(val1 == val2)
    assert np.all(Z.value == Z.value)
Пример #9
0
def test_vector_value_1():
    npr.seed(3)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 1)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X)
        # Verify the sum.
        assert close_float(Y.value, np.sum(npX))
Пример #10
0
def test_tensor_value_1():
    npr.seed(13)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20, 30)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X)

        assert X.shape == npX.shape
        assert close_float(Y.value, np.sum(npX))
Пример #11
0
def test_tensor_value_4():
    npr.seed(16)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20, 30)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X, axis=0)

        assert np.all(
            close_float(Y.value, np.expand_dims(np.sum(npX, axis=0), axis=0)))
Пример #12
0
def test_matrix_value():
    npr.seed(7)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X)

        # Verify the value.
        assert close_float(Y.value, np.sum(npX))
Пример #13
0
def test_scalar_value():
    npr.seed(1)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn()
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X)

        # Verify that a scalar is reproduced.
        assert close_float(Y.value, npX)
Пример #14
0
def test_logsoftmax_grad_2():
    npr.seed(4)

    for ii in xrange(NUM_TRIALS):

        np_X = npr.randn(5, 6)
        X = kayak.Parameter(np_X)
        Y = kayak.LogSoftMax(X, axis=0)
        Z = kayak.MatSum(Y)

        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Пример #15
0
def test_alldropout_grad():
    npr.seed(5)

    np_X = npr.randn(10,20)
    X    = kayak.Parameter(np_X)
    Y    = kayak.Dropout(X, drop_prob=1.0)
    Z    = kayak.MatSum(Y)
    
    Z.value
    assert Z.grad(X).shape == np_X.shape
    assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Пример #16
0
def test_softmax_grad_1():
    npr.seed(3)

    for ii in xrange(NUM_TRIALS):

        np_X = npr.randn(5, 6)
        X = kayak.Parameter(np_X)
        Y = kayak.SoftMax(X)
        Z = kayak.MatSum(Y * Y)

        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Пример #17
0
def test_scalar_grad():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn()
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X)

        # Verify that the gradient is one.
        Y.value
        assert Y.grad(X) == 1.0
        assert kayak.util.checkgrad(X, Y) < MAX_GRAD_DIFF
Пример #18
0
def test_matrix_grad():
    npr.seed(6)

    for ii in xrange(NUM_TRIALS):
        np_pred = npr.randn(10,20)
        np_targ = npr.randn(10,20)

        pred = kayak.Parameter(np_pred)
        targ = kayak.Targets(np_targ)
        out  = kayak.MatSum(kayak.LogMultinomialLoss(pred, targ))

        assert kayak.util.checkgrad(pred, out) < MAX_GRAD_DIFF
Пример #19
0
def test_elemmult_grad_8():
    npr.seed(15)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5,6)
        A    = kayak.Parameter(np_A)
        D    = kayak.ElemMult(A, A)
        E    = kayak.MatSum(D)

        assert E.grad(A).shape == np_A.shape
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
Пример #20
0
def test_neg_grad_1():
    npr.seed(8)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        C = -A
        D = kayak.MatSum(C)

        D.value
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
Пример #21
0
def test_indexing_grad_2():
    npr.seed(3)

    for ii in xrange(NUM_TRIALS):
        np_X = npr.randn(6, 2, 7, 3)
        inds = npr.permutation(7)[:5]
        X    = kayak.Parameter(np_X)
        Y    = kayak.Take(X, inds,axis=2)
        Z    = kayak.MatSum(Y)

        Z.value
        assert_less(kayak.util.checkgrad(X, Z), MAX_GRAD_DIFF)
Пример #22
0
def test_softrelu_grad():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):
        np_X = npr.randn(6, 5)
        X = kayak.Parameter(np_X)
        Y = kayak.SoftReLU(X)
        Z = kayak.MatSum(Y)

        Z.value
        assert np.all(Z.grad(X) >= 0.0)
        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Пример #23
0
def test_logistic_grad():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):
        np_X = npr.randn(6, 5)
        X = kayak.Parameter(np_X)
        Y = kayak.Logistic(X)
        Z = kayak.MatSum(Y)

        Z.value
        assert np.all(Z.grad(X) >= 0.0)
        assert_less(kayak.util.checkgrad(X, Z), MAX_GRAD_DIFF)
Пример #24
0
def test_elemabs_grad_2():
    npr.seed(9)

    for ii in xrange(NUM_TRIALS):
        
        np_A = npr.randn(1)
        A    = kayak.Parameter(np_A)
        D    = kayak.ElemAbs(A)
        E    = kayak.MatSum(D)

        E.value
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
Пример #25
0
def test_elemabs_grad_1():
    npr.seed(3)

    for ii in xrange(NUM_TRIALS):
        
        np_A = npr.randn(5,6)

        A    = kayak.Parameter(np_A)
        C    = kayak.ElemAbs(A)
        D    = kayak.MatSum(C)

        D.value
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
Пример #26
0
def test_logsoftmax_grad_3():
    npr.seed(5)

    for ii in xrange(NUM_TRIALS):

        np_X = npr.randn(5, 6)
        np_T = npr.randint(0, 10, np_X.shape)
        X = kayak.Parameter(np_X)
        T = kayak.Targets(np_T)
        Y = kayak.LogSoftMax(X)
        Z = kayak.MatSum(kayak.LogMultinomialLoss(Y, T))

        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Пример #27
0
 def __init__(self, maxnum, reduced_dims):
     self.threshold = 1e-2
     dummyword = np.zeros((maxnum, 1))
     W1 = np.random.randn(reduced_dims, maxnum) * 0.1
     W2 = np.random.randn(maxnum, reduced_dims) * 0.1
     self.input = ky.Parameter(dummyword)
     self.W1 = ky.Parameter(W1)
     self.W2 = ky.Parameter(W2)
     self.output = ky.MatMult(self.W1, self.input)
     self.recons = ky.MatMult(self.W2, self.output)
     self.loss = ky.MatSum(ky.L2Loss(self.recons, self.input))
     #self.totloss = ky.MatAdd(self.loss,ky.L2Norm(self.W2,weight=1e-2),ky.L2Norm(self.W1,weight = 1e-2))
     self.totloss = self.loss
Пример #28
0
def test_matrix_grad():
    npr.seed(8)

    for ii in xrange(NUM_TRIALS):
        npX = npr.randn(10, 20)
        X = kayak.Parameter(npX)
        Y = kayak.MatSum(X)

        # Verify the value.
        Y.value
        assert Y.grad(X).shape == npX.shape
        assert np.all(close_float(Y.grad(X), np.ones(npX.shape)))
        assert kayak.util.checkgrad(X, Y) < MAX_GRAD_DIFF
Пример #29
0
def check_tensormult(A_shape, B_shape, axes):

    np_A = npr.randn(*A_shape)
    np_B = npr.randn(*B_shape)
    A = kayak.Parameter(np_A)
    B = kayak.Parameter(np_B)
    C = kayak.TensorMult(A, B, axes)
    D = kayak.Parameter(npr.randn(*C.shape))
    L = kayak.MatSum(kayak.ElemMult(C, D))
    
    assert np.all(close_float(C.value, np.tensordot(np_A, np_B, axes)))
    assert kayak.util.checkgrad(A, L) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(B, L) < MAX_GRAD_DIFF
Пример #30
0
def test_matmult_grad_2():
    npr.seed(4)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 5)
        A = kayak.Parameter(np_A)
        C = kayak.MatMult(A, A)
        D = kayak.MatSum(C)

        D.value
        assert D.grad(A).shape == (5, 5)
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF