def test_transpose_1(): npr.seed(1) np_A = npr.randn(5, 10) A = kayak.Parameter(np_A) B = kayak.Transpose(A) B.value assert B.shape == (10, 5) for ii in xrange(np_A.shape[0]): for jj in xrange(np_A.shape[1]): assert np_A[ii, jj] == B.value[jj, ii]
def test_transpose_3(): npr.seed(3) np_A = npr.randn(5, 10) A = kayak.Parameter(np_A) B = kayak.Transpose(A) C = kayak.Parameter(npr.randn(5, 5)) D = kayak.MatMult(B, C) out = kayak.MatSum(D) out.value assert out.grad(A).shape == np_A.shape assert kayak.util.checkgrad(A, out) < MAX_GRAD_DIFF
def test_transpose_2(): npr.seed(2) np_A = npr.randn(5, 10, 15) A = kayak.Parameter(np_A) B = kayak.Transpose(A) B.value assert B.shape == (15, 10, 5) for ii in xrange(np_A.shape[0]): for jj in xrange(np_A.shape[1]): for kk in xrange(np_A.shape[2]): assert np_A[ii, jj, kk] == B.value[kk, jj, ii]
def initial_latent_trace(body, inpt, voltage, t): I_true = np.diff(voltage) * body.C T = I_true.shape[0] gs = np.diag([c.g for c in body.children]) D = int(sum([c.D for c in body.children])) driving_voltage = np.dot(np.ones((len(body.children), 1)), np.array([voltage]))[:, :T] child_i = 0 for i in range(D): driving_voltage[i, :] = voltage[:T] - body.children[child_i].E K = np.array([[max(i - j, 0) for i in range(T)] for j in range(T)]) K = K.T + K K = -1 * (K**2) K = np.exp(K / 2) L = np.linalg.cholesky(K + (1e-7) * np.eye(K.shape[0])) Linv = scipy.linalg.solve_triangular(L.transpose(), np.identity(K.shape[0])) N = 1 batch_size = 5000 learn = .0000001 runs = 10000 batcher = kayak.Batcher(batch_size, N) inputs = kayak.Parameter(driving_voltage) targets = kayak.Targets(np.array([I_true]), batcher) g_params = kayak.Parameter(gs) I_input = kayak.Parameter(inpt.T[:, :T]) Kinv = kayak.Parameter(np.dot(Linv.transpose(), Linv)) initial_latent = np.random.randn(D, T) latent_trace = kayak.Parameter(initial_latent) sigmoid = kayak.Logistic(latent_trace) quadratic = kayak.ElemMult( sigmoid, kayak.MatMult( kayak.Parameter(np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])), sigmoid)) three_quadratic = kayak.MatMult( kayak.Parameter(np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0]])), quadratic) linear = kayak.MatMult( kayak.Parameter(np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])), sigmoid) leak_open = kayak.Parameter(np.vstack((np.ones((1, T)), np.ones((2, T))))) open_fractions = kayak.ElemAdd(leak_open, kayak.ElemAdd(three_quadratic, linear)) I_channels = kayak.ElemMult(kayak.MatMult(g_params, inputs), open_fractions) I_ionic = kayak.MatMult(kayak.Parameter(np.array([[1, 1, 1]])), I_channels) predicted = kayak.MatAdd(I_ionic, I_input) nll = kayak.ElemPower(predicted - targets, 2) hack_vec = kayak.Parameter(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])) kyk_loss = kayak.MatSum(nll) + kayak.MatMult( kayak.Reshape( kayak.MatMult(kayak.MatMult(latent_trace, Kinv), kayak.Transpose(latent_trace)), (9, )), hack_vec) + kayak.MatSum(kayak.ElemPower(I_channels, 2)) grad = kyk_loss.grad(latent_trace) for ii in xrange(runs): for batch in batcher: loss = kyk_loss.value if ii % 100 == 0: print ii, loss, np.sum(np.power(predicted.value - I_true, 2)) / T grad = kyk_loss.grad(latent_trace) + .5 * grad latent_trace.value -= learn * grad return sigmoid.value