Exemple #1
0
def test_graph_diamond():
    npr.seed(2)

    N  = 10
    D  = 5
    H1 = 6
    H2 = 7

    X   = kayak.Inputs(npr.randn(N,D))
    W1  = kayak.Parameter(npr.randn(D,H1))
    W2a = kayak.Parameter(npr.randn(H1,H2))
    W2b = kayak.Parameter(npr.randn(H1,H2))
    W3  = kayak.Parameter(npr.randn(H2,1))

    U1 = kayak.SoftReLU(kayak.MatMult(X, W1))
    U2a = kayak.SoftReLU(kayak.MatMult(U1, W2a))
    U2b = kayak.SoftReLU(kayak.MatMult(U1, W2b))
    U3a = kayak.SoftReLU(kayak.MatMult(U2a, W3))
    U3b = kayak.SoftReLU(kayak.MatMult(U2b, W3))
    
    out = kayak.MatSum(kayak.MatAdd(U3a, U3b))

    out.value
    print kayak.util.checkgrad(W1, out)
    print kayak.util.checkgrad(W2a, out)
    print kayak.util.checkgrad(W2b, out)
    print kayak.util.checkgrad(W3, out)
    assert kayak.util.checkgrad(W1, out) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(W2a, out) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(W2b, out) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(W3, out) < MAX_GRAD_DIFF
Exemple #2
0
def test_matadd_values_5():
    npr.seed(5)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(1, 6)
        np_B = npr.randn(5, 1)
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        C = kayak.MatAdd(A, B)

        assert C.shape == (5, 6)
        assert np.all(close_float(C.value, np_A + np_B))
Exemple #3
0
def test_matadd_values_7():
    npr.seed(7)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        np_B = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        D = kayak.MatAdd(A, B, A)

        assert D.shape == (5, 6)
        assert np.all(close_float(D.value, 2 * np_A + np_B))
Exemple #4
0
def test_matadd_grad_8():
    npr.seed(15)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        np_B = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        D = kayak.MatAdd(A, A)
        E = kayak.MatSum(D)

        E.value
        assert E.grad(A).shape == np_A.shape
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
Exemple #5
0
def test_graph_dag():
    npr.seed(3)

    num_layers = 7
    num_dims   = 5
    
    for ii in xrange(NUM_TRIALS):
        probs = npr.rand()

        X = kayak.Inputs(npr.randn(25,num_dims))

        wts    = []
        layers = []
        for jj in xrange(num_layers):

            U = kayak.Constant(np.zeros((25,num_dims)))

            if npr.rand() < probs:
                W = kayak.Parameter(0.1*npr.randn(num_dims, num_dims))
                wts.append(W)
                U = kayak.MatAdd( U, kayak.SoftReLU(kayak.MatMult(X, W)) )

            for kk in xrange(jj):
                if npr.rand() < probs:
                    W = kayak.Parameter(0.1*npr.randn(num_dims, num_dims))
                    wts.append(W)
                    U = kayak.MatAdd( U, kayak.SoftReLU(kayak.MatMult(layers[kk], W)) )
            
            layers.append(U)
            
        out = kayak.MatSum(layers[-1])

        out.value
        for jj, wt in enumerate(wts):
            diff = kayak.util.checkgrad(wt, out, 1e-4)
            print diff
            assert diff < 1e-4
Exemple #6
0
def test_matadd_values_2():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        np_B = npr.randn(5, 6)
        np_C = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        C = kayak.Parameter(np_C)
        D = kayak.MatAdd(A, B, C)

        assert D.shape == np_A.shape
        assert np.all(close_float(D.value, np_A + np_B + np_C))
Exemple #7
0
def test_matadd_grad_1():
    npr.seed(8)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        np_B = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        C = kayak.MatAdd(A, B)
        D = kayak.MatSum(C)

        D.value
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
Exemple #8
0
def test_matadd_grad_5():
    npr.seed(12)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 1)
        np_B = npr.randn(1, 6)
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        C = kayak.MatAdd(A, B)
        D = kayak.MatSum(C)

        D.value
        assert D.grad(A).shape == np_A.shape
        assert D.grad(B).shape == np_B.shape
        assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
Exemple #9
0
def test_cache_utility():
    npr.seed(3)

    num_layers = 17
    num_dims   = 3
    
    X = kayak.Inputs(npr.randn(10, num_dims))
    W1 = kayak.Parameter(npr.randn(num_dims, num_dims))
    W2 = kayak.Parameter(npr.randn(num_dims, num_dims))

    Z = kayak.MatMult(X, W1)

    for jj in xrange(num_layers):
        Z = kayak.SoftReLU(kayak.MatAdd(kayak.MatMult(Z, W2),
                                        kayak.MatMult(Z, W2)))

    out = kayak.MatSum(Z)
    assert kayak.util.checkgrad(W1, out) < 1e-4
Exemple #10
0
def test_matadd_grad_2():
    npr.seed(9)

    for ii in xrange(NUM_TRIALS):

        np_A = npr.randn(5, 6)
        np_B = npr.randn(5, 6)
        np_C = npr.randn(5, 6)
        A = kayak.Parameter(np_A)
        B = kayak.Parameter(np_B)
        C = kayak.Parameter(np_C)
        D = kayak.MatAdd(A, B, C)
        E = kayak.MatSum(D)

        E.value
        assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
        assert kayak.util.checkgrad(C, E) < MAX_GRAD_DIFF
Exemple #11
0
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight,
          l2_weight, dropout, improvement_thresh):

    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, inputs.shape[0])

    # Inputs and targets need access to the batcher.
    X = kayak.Inputs(inputs, batcher)
    T = kayak.Targets(targets, batcher)

    # Put some dropout regularization on the inputs
    H = kayak.Dropout(X, dropout)

    # Weights and biases, with random initializations.
    W = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], 10))
    B = kayak.Parameter(0.1 * npr.randn(1, 10))

    # Nothing fancy here: inputs times weights, plus bias, then softmax.
    Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H, W), B))

    # The training loss is negative multinomial log likelihood.
    loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)),
                        kayak.L2Norm(W, l2_weight), kayak.L1Norm(W, l1_weight))

    # Use momentum for the gradient-based optimization.
    mom_grad_W = np.zeros(W.shape)

    best_loss = np.inf
    best_epoch = -1

    # Loop over epochs.
    for epoch in range(100):

        # Track the total loss.
        total_loss = 0.0

        # Loop over batches -- using batcher as iterator.
        for batch in batcher:

            # Draw new random dropouts
            H.draw_new_mask()

            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W = loss.grad(W)
            grad_B = loss.grad(B)

            # Use momentum on the weight gradient.
            mom_grad_W *= momentum
            mom_grad_W += (1.0 - momentum) * grad_W

            # Now make the actual parameter updates.
            W.value -= learn_rate * mom_grad_W
            B.value -= learn_rate * grad_B

        print("Epoch: %d, total loss: %f" % (epoch, total_loss))

        if not np.isfinite(total_loss):
            print("Training diverged. Returning constraint violation.")
            break

        if total_loss < best_loss:
            best_epoch = epoch
        else:
            if (epoch - best_epoch) > improvement_thresh:
                print("Has been %d epochs without improvement. Aborting." %
                      (epoch - best_epoch))
                break

    # After we've trained, we return a sugary little function handle
    # that makes things easy.  Basically, what we're doing here is
    # simply replacing the inputs in the above defined graph and then
    # running through it to produce the outputs.
    # The point here is that we wind up with a function
    # handle the can be called with a numpy object and it produces the
    # target values for novel data, using the parameters we just learned.
    def predict(x):
        X.value = x
        H.reinstate_units()
        return Y.value

    return predict
Exemple #12
0
def initial_latent_trace(body, inpt, voltage, t):
    I_true = np.diff(voltage) * body.C
    T = I_true.shape[0]
    gs = np.diag([c.g for c in body.children])
    D = int(sum([c.D for c in body.children]))

    driving_voltage = np.dot(np.ones((len(body.children), 1)),
                             np.array([voltage]))[:, :T]

    child_i = 0
    for i in range(D):
        driving_voltage[i, :] = voltage[:T] - body.children[child_i].E

    K = np.array([[max(i - j, 0) for i in range(T)] for j in range(T)])
    K = K.T + K
    K = -1 * (K**2)
    K = np.exp(K / 2)

    L = np.linalg.cholesky(K + (1e-7) * np.eye(K.shape[0]))
    Linv = scipy.linalg.solve_triangular(L.transpose(),
                                         np.identity(K.shape[0]))

    N = 1
    batch_size = 5000
    learn = .0000001
    runs = 10000

    batcher = kayak.Batcher(batch_size, N)

    inputs = kayak.Parameter(driving_voltage)
    targets = kayak.Targets(np.array([I_true]), batcher)

    g_params = kayak.Parameter(gs)
    I_input = kayak.Parameter(inpt.T[:, :T])
    Kinv = kayak.Parameter(np.dot(Linv.transpose(), Linv))

    initial_latent = np.random.randn(D, T)
    latent_trace = kayak.Parameter(initial_latent)
    sigmoid = kayak.Logistic(latent_trace)

    quadratic = kayak.ElemMult(
        sigmoid,
        kayak.MatMult(
            kayak.Parameter(np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])),
            sigmoid))
    three_quadratic = kayak.MatMult(
        kayak.Parameter(np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0]])),
        quadratic)
    linear = kayak.MatMult(
        kayak.Parameter(np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])), sigmoid)

    leak_open = kayak.Parameter(np.vstack((np.ones((1, T)), np.ones((2, T)))))
    open_fractions = kayak.ElemAdd(leak_open,
                                   kayak.ElemAdd(three_quadratic, linear))

    I_channels = kayak.ElemMult(kayak.MatMult(g_params, inputs),
                                open_fractions)

    I_ionic = kayak.MatMult(kayak.Parameter(np.array([[1, 1, 1]])), I_channels)

    predicted = kayak.MatAdd(I_ionic, I_input)

    nll = kayak.ElemPower(predicted - targets, 2)

    hack_vec = kayak.Parameter(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]))
    kyk_loss = kayak.MatSum(nll) + kayak.MatMult(
        kayak.Reshape(
            kayak.MatMult(kayak.MatMult(latent_trace, Kinv),
                          kayak.Transpose(latent_trace)),
            (9, )), hack_vec) + kayak.MatSum(kayak.ElemPower(I_channels, 2))

    grad = kyk_loss.grad(latent_trace)
    for ii in xrange(runs):
        for batch in batcher:
            loss = kyk_loss.value
            if ii % 100 == 0:
                print ii, loss, np.sum(np.power(predicted.value - I_true,
                                                2)) / T
            grad = kyk_loss.grad(latent_trace) + .5 * grad
            latent_trace.value -= learn * grad

    return sigmoid.value
def kayak_mlp(X, y):
    """
    Kayak implementation of a mlp with relu hidden layers and dropout
    """
    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, X.shape[0])

    # count number of rows and columns
    num_examples, num_features = np.shape(X)

    X = kayak.Inputs(X, batcher)
    T = kayak.Targets(y, batcher)

    # ----------------------------- first hidden layer -------------------------------

    # set up weights for our input layer
    # use the same scheme as our numpy mlp
    input_range = 1.0 / num_features**(1 / 2)
    weights_1 = kayak.Parameter(0.1 * np.random.randn(X.shape[1], layer1_size))
    bias_1 = kayak.Parameter(0.1 * np.random.randn(1, layer1_size))

    # linear combination of weights and inputs
    hidden_1_input = kayak.ElemAdd(kayak.MatMult(X, weights_1), bias_1)

    # apply activation function to hidden layer
    hidden_1_activation = kayak.HardReLU(hidden_1_input)

    # apply a dropout for regularization
    hidden_1_out = kayak.Dropout(hidden_1_activation,
                                 layer1_dropout,
                                 batcher=batcher)

    # ----------------------------- output layer -----------------------------------

    weights_out = kayak.Parameter(0.1 * np.random.randn(layer1_size, 9))
    bias_out = kayak.Parameter(0.1 * np.random.randn(1, 9))

    # linear combination of layer2 output and output weights
    out = kayak.ElemAdd(kayak.MatMult(hidden_1_out, weights_out), bias_out)

    # apply activation function to output
    yhat = kayak.SoftMax(out)

    # ----------------------------- loss function -----------------------------------

    loss = kayak.MatAdd(kayak.MatSum(kayak.L2Loss(yhat, T)),
                        kayak.L2Norm(weights_1, layer1_l2))

    # Use momentum for the gradient-based optimization.
    mom_grad_W1 = np.zeros(weights_1.shape)
    mom_grad_W2 = np.zeros(weights_out.shape)

    # Loop over epochs.
    plot_loss = np.ones((iterations, 2))
    for epoch in xrange(iterations):

        # Track the total loss.
        total_loss = 0.0

        for batch in batcher:
            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W1 = loss.grad(weights_1)
            grad_B1 = loss.grad(bias_1)
            grad_W2 = loss.grad(weights_out)
            grad_B2 = loss.grad(bias_out)

            # Use momentum on the weight gradients.
            mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1
            mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2

            # Now make the actual parameter updates.
            weights_1.value -= learn_rate * mom_grad_W1
            bias_1.value -= learn_rate * grad_B1
            weights_out.value -= learn_rate * mom_grad_W2
            bias_out.value -= learn_rate * grad_B2

        # save values into table to print learning curve at the end of trianing
        plot_loss[epoch, 0] = epoch
        plot_loss[epoch, 1] = total_loss
        print epoch, total_loss

    #pyplot.plot(plot_loss[:,0], plot_loss[:,1], linewidth=2.0)
    #pyplot.show()

    def compute_predictions(x):
        X.data = x
        batcher.test_mode()
        return yhat.value

    return compute_predictions
Exemple #14
0
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight, l2_weight, dropout):

    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, inputs.shape[0])

    # Inputs and targets need access to the batcher.
    X    = kayak.Inputs(inputs, batcher)
    T    = kayak.Targets(targets, batcher)

    # Weights and biases, with random initializations.
    W    = kayak.Parameter( 0.1*npr.randn( inputs.shape[1], 10 ))
    B    = kayak.Parameter( 0.1*npr.randn(1,10) )

    # Nothing fancy here: inputs times weights, plus bias, then softmax.
    dropout_layer = kayak.Dropout(X, dropout, batcher=batcher)
    Y    = kayak.LogSoftMax( kayak.ElemAdd( kayak.MatMult(dropout_layer, W), B ) )

    # The training loss is negative multinomial log likelihood.
    loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)),
                        kayak.L2Norm(W, l2_weight),
                        kayak.L1Norm(W, l1_weight))

    # Use momentum for the gradient-based optimization.
    mom_grad_W = np.zeros(W.shape)

    # Loop over epochs.
    for epoch in xrange(10):

        # Track the total loss and the overall gradient.
        total_loss   = 0.0
        total_grad_W = np.zeros(W.shape)

        # Loop over batches -- using batcher as iterator.
        for batch in batcher:
            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W = loss.grad(W)
            grad_B = loss.grad(B)
            
            # Use momentum on the weight gradient.
            mom_grad_W = momentum*mom_grad_W + (1.0-momentum)*grad_W

            # Now make the actual parameter updates.
            W.value -= learn_rate * mom_grad_W
            B.value -= learn_rate * grad_B

            # Keep track of the gradient to see if we're converging.
            total_grad_W += grad_W

        #print epoch, total_loss, np.sum(total_grad_W**2)

    # After we've trained, we return a sugary little function handle
    # that makes things easy.  Basically, what we're doing here is
    # handing the output object (not the loss!) a dictionary where the
    # key is the Kayak input object 'X' (that is the features being
    # used here for logistic regression) and the value in that
    # dictionary is being determined by the argument to the lambda
    # expression.  The point here is that we wind up with a function
    # handle the can be called with a numpy object and it produces the
    # target values for novel data, using the parameters we just learned.
    
    def compute_predictions(x):
        X.data = x
        batcher.test_mode()
        return Y.value

    return compute_predictions