Exemple #1
0
def test_logistic_values():
    npr.seed(1)

    for ii in xrange(NUM_TRIALS):
        np_X = npr.randn(6, 5)
        X = kayak.Parameter(np_X)
        Y = kayak.Logistic(X)

        assert np.all(close_float(1.0 / (1.0 + np.exp(-np_X)), Y.value))
Exemple #2
0
def test_logistic_grad():
    npr.seed(2)

    for ii in xrange(NUM_TRIALS):
        np_X = npr.randn(6, 5)
        X = kayak.Parameter(np_X)
        Y = kayak.Logistic(X)
        Z = kayak.MatSum(Y)

        Z.value
        assert np.all(Z.grad(X) >= 0.0)
        assert_less(kayak.util.checkgrad(X, Z), MAX_GRAD_DIFF)
Exemple #3
0
def initial_latent_trace(body, inpt, voltage, t):
    I_true = np.diff(voltage) * body.C
    T = I_true.shape[0]
    gs = np.diag([c.g for c in body.children])
    D = int(sum([c.D for c in body.children]))

    driving_voltage = np.dot(np.ones((len(body.children), 1)),
                             np.array([voltage]))[:, :T]

    child_i = 0
    for i in range(D):
        driving_voltage[i, :] = voltage[:T] - body.children[child_i].E

    K = np.array([[max(i - j, 0) for i in range(T)] for j in range(T)])
    K = K.T + K
    K = -1 * (K**2)
    K = np.exp(K / 2)

    L = np.linalg.cholesky(K + (1e-7) * np.eye(K.shape[0]))
    Linv = scipy.linalg.solve_triangular(L.transpose(),
                                         np.identity(K.shape[0]))

    N = 1
    batch_size = 5000
    learn = .0000001
    runs = 10000

    batcher = kayak.Batcher(batch_size, N)

    inputs = kayak.Parameter(driving_voltage)
    targets = kayak.Targets(np.array([I_true]), batcher)

    g_params = kayak.Parameter(gs)
    I_input = kayak.Parameter(inpt.T[:, :T])
    Kinv = kayak.Parameter(np.dot(Linv.transpose(), Linv))

    initial_latent = np.random.randn(D, T)
    latent_trace = kayak.Parameter(initial_latent)
    sigmoid = kayak.Logistic(latent_trace)

    quadratic = kayak.ElemMult(
        sigmoid,
        kayak.MatMult(
            kayak.Parameter(np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])),
            sigmoid))
    three_quadratic = kayak.MatMult(
        kayak.Parameter(np.array([[0, 0, 0], [1, 0, 0], [0, 0, 0]])),
        quadratic)
    linear = kayak.MatMult(
        kayak.Parameter(np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])), sigmoid)

    leak_open = kayak.Parameter(np.vstack((np.ones((1, T)), np.ones((2, T)))))
    open_fractions = kayak.ElemAdd(leak_open,
                                   kayak.ElemAdd(three_quadratic, linear))

    I_channels = kayak.ElemMult(kayak.MatMult(g_params, inputs),
                                open_fractions)

    I_ionic = kayak.MatMult(kayak.Parameter(np.array([[1, 1, 1]])), I_channels)

    predicted = kayak.MatAdd(I_ionic, I_input)

    nll = kayak.ElemPower(predicted - targets, 2)

    hack_vec = kayak.Parameter(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]))
    kyk_loss = kayak.MatSum(nll) + kayak.MatMult(
        kayak.Reshape(
            kayak.MatMult(kayak.MatMult(latent_trace, Kinv),
                          kayak.Transpose(latent_trace)),
            (9, )), hack_vec) + kayak.MatSum(kayak.ElemPower(I_channels, 2))

    grad = kyk_loss.grad(latent_trace)
    for ii in xrange(runs):
        for batch in batcher:
            loss = kyk_loss.value
            if ii % 100 == 0:
                print ii, loss, np.sum(np.power(predicted.value - I_true,
                                                2)) / T
            grad = kyk_loss.grad(latent_trace) + .5 * grad
            latent_trace.value -= learn * grad

    return sigmoid.value
Exemple #4
0
def kayak_mlp(X, y):
    """
    Kayak implementation of a mlp with relu hidden layers and dropout
    """
    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, X.shape[0])

    # count number of rows and columns
    num_examples, num_features = np.shape(X)

    X = kayak.Inputs(X, batcher)
    T = kayak.Targets(y, batcher)

    # ----------------------------- first hidden layer -------------------------------

    # set up weights for our input layer
    # use the same scheme as our numpy mlp
    input_range = 1.0 / num_features**(1 / 2)
    weights_1 = kayak.Parameter(0.1 * np.random.randn(X.shape[1], layer1_size))
    bias_1 = kayak.Parameter(0.1 * np.random.randn(1, layer1_size))

    # linear combination of weights and inputs
    hidden_1_input = kayak.ElemAdd(kayak.MatMult(X, weights_1), bias_1)

    # apply activation function to hidden layer
    hidden_1_activation = kayak.HardReLU(hidden_1_input)

    # apply a dropout for regularization
    hidden_1_out = kayak.Dropout(hidden_1_activation,
                                 layer1_dropout,
                                 batcher=batcher)

    # ----------------------------- second hidden layer -----------------------------

    # set up weights
    weights_2 = kayak.Parameter(0.1 *
                                np.random.randn(layer1_size, layer2_size))
    bias_2 = kayak.Parameter(0.1 * np.random.randn(1, layer2_size))

    # linear combination of weights and layer1 output
    hidden_2_input = kayak.ElemAdd(kayak.MatMult(hidden_1_out, weights_2),
                                   bias_2)

    # apply activation function to hidden layer
    hidden_2_activation = kayak.HardReLU(hidden_2_input)

    # apply a dropout for regularization
    hidden_2_out = kayak.Dropout(hidden_2_activation,
                                 layer2_dropout,
                                 batcher=batcher)

    # ----------------------------- output layer -----------------------------------

    weights_out = kayak.Parameter(0.1 * np.random.randn(layer2_size, 9))
    bias_out = kayak.Parameter(0.1 * np.random.randn(1, 9))

    # linear combination of layer2 output and output weights
    out = kayak.ElemAdd(kayak.MatMult(hidden_2_out, weights_out), bias_out)

    # apply activation function to output
    yhat = kayak.Logistic(out)

    # ----------------------------- loss function -----------------------------------

    loss = kayak.MatAdd(kayak.MatSum(kayak.L2Loss(yhat, T)),
                        kayak.L2Norm(weights_1, layer1_l2),
                        kayak.L2Norm(weights_2, layer2_l2))

    # Use momentum for the gradient-based optimization.
    mom_grad_W1 = np.zeros(weights_1.shape)
    mom_grad_W2 = np.zeros(weights_2.shape)
    mom_grad_W3 = np.zeros(weights_out.shape)

    # Loop over epochs.
    plot_loss = np.ones((iterations, 2))
    for epoch in xrange(iterations):

        # Track the total loss.
        total_loss = 0.0

        for batch in batcher:
            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W1 = loss.grad(weights_1)
            grad_B1 = loss.grad(bias_1)
            grad_W2 = loss.grad(weights_2)
            grad_B2 = loss.grad(bias_2)
            grad_W3 = loss.grad(weights_out)
            grad_B3 = loss.grad(bias_out)

            # Use momentum on the weight gradients.
            mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1
            mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2
            mom_grad_W3 = momentum * mom_grad_W3 + (1.0 - momentum) * grad_W3

            # Now make the actual parameter updates.
            weights_1.value -= learn_rate * mom_grad_W1
            bias_1.value -= learn_rate * grad_B1
            weights_2.value -= learn_rate * mom_grad_W2
            bias_2.value -= learn_rate * grad_B2
            weights_out.value -= learn_rate * mom_grad_W3
            bias_out.value -= learn_rate * grad_B3

        # save values into table to print learning curve at the end of trianing
        plot_loss[epoch, 0] = epoch
        plot_loss[epoch, 1] = total_loss
        print epoch, total_loss

    pyplot.plot(plot_loss[:, 0], plot_loss[:, 1], linewidth=2.0)
    pyplot.show()

    def compute_predictions(x):
        X.data = x
        batcher.test_mode()
        return yhat.value

    return compute_predictions