Ejemplo n.º 1
0
def test_elemmult_grad_8():
    npr.seed(15)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5,6)
        A    = kayak.Parameter(np_A)
        D    = kayak.ElemMult(A, A)
        E    = kayak.MatSum(D)

        assert E.grad(A).shape == np_A.shape
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
Ejemplo n.º 2
0
def test_elemmult_values_6():
    npr.seed(7)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(1,1)
        np_B = npr.randn(5,6)
        A    = kayak.Parameter(np_A)
        B    = kayak.Parameter(np_B)
        D    = kayak.ElemMult(A, B, A)

        assert D.shape == (5,6)
        assert np.all( close_float(D.value, np_A**2 * np_B))
Ejemplo n.º 3
0
def test_elemmult_values_1():
    npr.seed(1)

    for ii in xrange(NUM_TRIALS):
        
        np_A = npr.randn(5,6)
        np_B = npr.randn(5,6)
        A    = kayak.Parameter(np_A)
        B    = kayak.Parameter(np_B)
        C    = kayak.ElemMult(A, B)

        assert C.shape == np_A.shape
        assert np.all( close_float(C.value, np_A*np_B))
Ejemplo n.º 4
0
def check_tensormult(A_shape, B_shape, axes):

    np_A = npr.randn(*A_shape)
    np_B = npr.randn(*B_shape)
    A = kayak.Parameter(np_A)
    B = kayak.Parameter(np_B)
    C = kayak.TensorMult(A, B, axes)
    D = kayak.Parameter(npr.randn(*C.shape))
    L = kayak.MatSum(kayak.ElemMult(C, D))
    
    assert np.all(close_float(C.value, np.tensordot(np_A, np_B, axes)))
    assert kayak.util.checkgrad(A, L) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(B, L) < MAX_GRAD_DIFF
Ejemplo n.º 5
0
def test_elemmult_grad_1():
    npr.seed(8)

    for ii in xrange(NUM_TRIALS):
        
        np_A = npr.randn(5,6)
        np_B = npr.randn(5,6)
        A    = kayak.Parameter(np_A)
        B    = kayak.Parameter(np_B)
        C    = kayak.ElemMult(A, B)
        D    = kayak.MatSum(C)

        D.value
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
Ejemplo n.º 6
0
def test_elemmult_values_5():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5,1)
        np_B = npr.randn(1,6)
        np_C = npr.randn(1,1)
        A    = kayak.Parameter(np_A)
        B    = kayak.Parameter(np_B)
        C    = kayak.Parameter(np_C)
        D    = kayak.ElemMult(A, B, C)

        assert D.shape == (5,6)
        assert np.all( close_float(D.value, np_A*np_B*np_C))
Ejemplo n.º 7
0
def test_elemmult_grad_7():
    npr.seed(14)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5,6)
        np_B = npr.randn(1,1)
        A    = kayak.Parameter(np_A)
        B    = kayak.Parameter(np_B)
        D    = kayak.ElemMult(A, B, A)
        E    = kayak.MatSum(D)

        E.value
        assert E.grad(A).shape == np_A.shape
        assert E.grad(B).shape == np_B.shape
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
Ejemplo n.º 8
0
def test_matmult_grad_vect_mat():
    npr.seed(5)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(6, )
        np_B = npr.randn(6, 7)
        np_C = npr.randn(7, )
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        C = kayak.Parameter(np_C)
        D = kayak.MatMult(A, B)
        E = kayak.MatSum(kayak.ElemMult(C, D))

        assert E.grad(A).shape == (6, )
        assert E.grad(B).shape == (6, 7)
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
Ejemplo n.º 9
0
def initial_latent_trace(body, inpt, voltage, t):
    I_true = np.diff(voltage) * body.C
    T = I_true.shape[0]
    gs = np.diag([c.g for c in body.children])
    D = int(sum([c.D for c in body.children]))

    driving_voltage = np.dot(np.ones((len(body.children), 1)),
                             np.array([voltage]))[:, :T]

    child_i = 0
    for i in range(D):
        driving_voltage[i, :] = voltage[:T] - body.children[child_i].E

    K = np.array([[max(i - j, 0) for i in range(T)] for j in range(T)])
    K = K.T + K
    K = -1 * (K**2)
    K = np.exp(K / 2)

    L = np.linalg.cholesky(K + (1e-7) * np.eye(K.shape[0]))
    Linv = scipy.linalg.solve_triangular(L.transpose(),
                                         np.identity(K.shape[0]))

    N = 1
    batch_size = 5000
    learn = .0000001
    runs = 10000

    batcher = kayak.Batcher(batch_size, N)

    inputs = kayak.Parameter(driving_voltage)
    targets = kayak.Targets(np.array([I_true]), batcher)

    g_params = kayak.Parameter(gs)
    I_input = kayak.Parameter(inpt.T[:, :T])
    Kinv = kayak.Parameter(np.dot(Linv.transpose(), Linv))

    initial_latent = np.random.randn(D, T)
    latent_trace = kayak.Parameter(initial_latent)
    sigmoid = kayak.Logistic(latent_trace)

    quadratic = kayak.ElemMult(
        sigmoid,
        kayak.MatMult(
            kayak.Parameter(np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])),
            sigmoid))
    three_quadratic = kayak.MatMult(
        kayak.Parameter(np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0]])),
        quadratic)
    linear = kayak.MatMult(
        kayak.Parameter(np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])), sigmoid)

    leak_open = kayak.Parameter(np.vstack((np.ones((1, T)), np.ones((2, T)))))
    open_fractions = kayak.ElemAdd(leak_open,
                                   kayak.ElemAdd(three_quadratic, linear))

    I_channels = kayak.ElemMult(kayak.MatMult(g_params, inputs),
                                open_fractions)

    I_ionic = kayak.MatMult(kayak.Parameter(np.array([[1, 1, 1]])), I_channels)

    predicted = kayak.MatAdd(I_ionic, I_input)

    nll = kayak.ElemPower(predicted - targets, 2)

    hack_vec = kayak.Parameter(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]))
    kyk_loss = kayak.MatSum(nll) + kayak.MatMult(
        kayak.Reshape(
            kayak.MatMult(kayak.MatMult(latent_trace, Kinv),
                          kayak.Transpose(latent_trace)),
            (9, )), hack_vec) + kayak.MatSum(kayak.ElemPower(I_channels, 2))

    grad = kyk_loss.grad(latent_trace)
    for ii in xrange(runs):
        for batch in batcher:
            loss = kyk_loss.value
            if ii % 100 == 0:
                print ii, loss, np.sum(np.power(predicted.value - I_true,
                                                2)) / T
            grad = kyk_loss.grad(latent_trace) + .5 * grad
            latent_trace.value -= learn * grad

    return sigmoid.value