Beispiel #1
0
def test_logsoftmax_grad_2():
    npr.seed(4)

    for ii in xrange(NUM_TRIALS):

        np_X = npr.randn(5, 6)
        X = kayak.Parameter(np_X)
        Y = kayak.LogSoftMax(X, axis=0)
        Z = kayak.MatSum(Y)

        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Beispiel #2
0
def test_logsoftmax_grad_3():
    npr.seed(5)

    for ii in xrange(NUM_TRIALS):

        np_X = npr.randn(5, 6)
        np_T = npr.randint(0, 10, np_X.shape)
        X = kayak.Parameter(np_X)
        T = kayak.Targets(np_T)
        Y = kayak.LogSoftMax(X)
        Z = kayak.MatSum(kayak.LogMultinomialLoss(Y, T))

        assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
Beispiel #3
0
def test_logsoftmax_values_1():
    npr.seed(1)

    for ii in xrange(NUM_TRIALS):

        np_X = npr.randn(5, 6)
        X = kayak.Parameter(np_X)
        Y = kayak.LogSoftMax(X)

        np_Y = np.exp(np_X)
        np_Y = np_Y / np.sum(np_Y, axis=1)[:, np.newaxis]
        np_Y = np.log(np_Y)

        assert Y.shape == np_X.shape
        assert np.all(close_float(Y.value, np_Y))
Beispiel #4
0
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight,
          l2_weight, dropout, improvement_thresh):

    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, inputs.shape[0])

    # Inputs and targets need access to the batcher.
    X = kayak.Inputs(inputs, batcher)
    T = kayak.Targets(targets, batcher)

    # Put some dropout regularization on the inputs
    H = kayak.Dropout(X, dropout)

    # Weights and biases, with random initializations.
    W = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], 10))
    B = kayak.Parameter(0.1 * npr.randn(1, 10))

    # Nothing fancy here: inputs times weights, plus bias, then softmax.
    Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H, W), B))

    # The training loss is negative multinomial log likelihood.
    loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)),
                        kayak.L2Norm(W, l2_weight), kayak.L1Norm(W, l1_weight))

    # Use momentum for the gradient-based optimization.
    mom_grad_W = np.zeros(W.shape)

    best_loss = np.inf
    best_epoch = -1

    # Loop over epochs.
    for epoch in range(100):

        # Track the total loss.
        total_loss = 0.0

        # Loop over batches -- using batcher as iterator.
        for batch in batcher:

            # Draw new random dropouts
            H.draw_new_mask()

            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W = loss.grad(W)
            grad_B = loss.grad(B)

            # Use momentum on the weight gradient.
            mom_grad_W *= momentum
            mom_grad_W += (1.0 - momentum) * grad_W

            # Now make the actual parameter updates.
            W.value -= learn_rate * mom_grad_W
            B.value -= learn_rate * grad_B

        print("Epoch: %d, total loss: %f" % (epoch, total_loss))

        if not np.isfinite(total_loss):
            print("Training diverged. Returning constraint violation.")
            break

        if total_loss < best_loss:
            best_epoch = epoch
        else:
            if (epoch - best_epoch) > improvement_thresh:
                print("Has been %d epochs without improvement. Aborting." %
                      (epoch - best_epoch))
                break

    # After we've trained, we return a sugary little function handle
    # that makes things easy.  Basically, what we're doing here is
    # simply replacing the inputs in the above defined graph and then
    # running through it to produce the outputs.
    # The point here is that we wind up with a function
    # handle the can be called with a numpy object and it produces the
    # target values for novel data, using the parameters we just learned.
    def predict(x):
        X.value = x
        H.reinstate_units()
        return Y.value

    return predict
Beispiel #5
0
def kayak_mlp(X, y):
    """
    Kayak implementation of a mlp with relu hidden layers and dropout
    """
    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, X.shape[0])

    # count number of rows and columns
    num_examples, num_features = np.shape(X)

    X = kayak.Inputs(X, batcher)
    T = kayak.Targets(y, batcher)

    # ----------------------------- first hidden layer -------------------------------

    # set up weights for our input layer
    # use the same scheme as our numpy mlp
    input_range = 1.0 / num_features**(1 / 2)
    weights_1 = kayak.Parameter(0.1 * np.random.randn(X.shape[1], layer1_size))
    bias_1 = kayak.Parameter(0.1 * np.random.randn(1, layer1_size))

    # linear combination of weights and inputs
    hidden_1_input = kayak.ElemAdd(kayak.MatMult(X, weights_1), bias_1)

    # apply activation function to hidden layer
    hidden_1_activation = kayak.HardReLU(hidden_1_input)

    # apply a dropout for regularization
    hidden_1_out = kayak.Dropout(hidden_1_activation,
                                 layer1_dropout,
                                 batcher=batcher)

    # ----------------------------- second hidden layer -----------------------------

    # set up weights
    weights_2 = kayak.Parameter(0.1 *
                                np.random.randn(layer1_size, layer2_size))
    bias_2 = kayak.Parameter(0.1 * np.random.randn(1, layer2_size))

    # linear combination of weights and layer1 output
    hidden_2_input = kayak.ElemAdd(kayak.MatMult(hidden_1_out, weights_2),
                                   bias_2)

    # apply activation function to hidden layer
    hidden_2_activation = kayak.HardReLU(hidden_2_input)

    # apply a dropout for regularization
    hidden_2_out = kayak.Dropout(hidden_2_activation,
                                 layer2_dropout,
                                 batcher=batcher)

    # ----------------------------- output layer -----------------------------------

    weights_out = kayak.Parameter(0.1 * np.random.randn(layer2_size, 10))
    bias_out = kayak.Parameter(0.1 * np.random.randn(1, 10))

    # linear combination of layer2 output and output weights
    out = kayak.ElemAdd(kayak.MatMult(hidden_2_out, weights_out), bias_out)

    # apply activation function to output
    yhat = kayak.LogSoftMax(out)

    # ----------------------------- loss function -----------------------------------

    loss = kayak.MatSum(kayak.LogMultinomialLoss(yhat, T))

    # Use momentum for the gradient-based optimization.
    mom_grad_W1 = np.zeros(weights_1.shape)
    mom_grad_W2 = np.zeros(weights_2.shape)
    mom_grad_W3 = np.zeros(weights_out.shape)

    # Loop over epochs.
    for epoch in xrange(iterations):

        # Track the total loss.
        total_loss = 0.0

        for batch in batcher:
            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W1 = loss.grad(weights_1)
            grad_B1 = loss.grad(bias_1)
            grad_W2 = loss.grad(weights_2)
            grad_B2 = loss.grad(bias_2)
            grad_W3 = loss.grad(weights_out)
            grad_B3 = loss.grad(bias_out)

            # Use momentum on the weight gradients.
            mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1
            mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2
            mom_grad_W3 = momentum * mom_grad_W3 + (1.0 - momentum) * grad_W3

            # Now make the actual parameter updates.
            weights_1.value -= learn_rate * mom_grad_W1
            bias_1.value -= learn_rate * grad_B1
            weights_2.value -= learn_rate * mom_grad_W2
            bias_2.value -= learn_rate * grad_B2
            weights_out.value -= learn_rate * mom_grad_W3
            bias_out.value -= learn_rate * grad_B3

        print epoch, total_loss

    def compute_predictions(x):
        X.data = x
        batcher.test_mode()
        return yhat.value

    return compute_predictions
Beispiel #6
0
def train(inputs, targets):
    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, inputs.shape[0])

    # Inputs and targets need access to the batcher.
    X = kayak.Inputs(inputs, batcher)
    T = kayak.Targets(targets, batcher)

    # First-layer weights and biases, with random initializations.
    W1 = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], layer1_sz))
    B1 = kayak.Parameter(0.1 * npr.randn(1, layer1_sz))

    # First hidden layer: ReLU + Dropout
    H1 = kayak.Dropout(kayak.HardReLU(kayak.ElemAdd(kayak.MatMult(X, W1), B1)),
                       layer1_dropout,
                       batcher=batcher)

    # Second-layer weights and biases, with random initializations.
    W2 = kayak.Parameter(0.1 * npr.randn(layer1_sz, layer2_sz))
    B2 = kayak.Parameter(0.1 * npr.randn(1, layer2_sz))

    # Second hidden layer: ReLU + Dropout
    H2 = kayak.Dropout(kayak.HardReLU(kayak.ElemAdd(kayak.MatMult(H1, W2),
                                                    B2)),
                       layer2_dropout,
                       batcher=batcher)

    # Output layer weights and biases, with random initializations.
    W3 = kayak.Parameter(0.1 * npr.randn(layer2_sz, 10))
    B3 = kayak.Parameter(0.1 * npr.randn(1, 10))

    # Output layer.
    Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H2, W3), B3))

    # The training loss is negative multinomial log likelihood.
    loss = kayak.MatSum(kayak.LogMultinomialLoss(Y, T))

    # Use momentum for the gradient-based optimization.
    mom_grad_W1 = np.zeros(W1.shape)
    mom_grad_W2 = np.zeros(W2.shape)
    mom_grad_W3 = np.zeros(W3.shape)

    # Loop over epochs.
    for epoch in xrange(10):

        # Track the total loss.
        total_loss = 0.0

        # Loop over batches -- using batcher as iterator.
        for batch in batcher:
            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W1 = loss.grad(W1)
            grad_B1 = loss.grad(B1)
            grad_W2 = loss.grad(W2)
            grad_B2 = loss.grad(B2)
            grad_W3 = loss.grad(W3)
            grad_B3 = loss.grad(B3)

            # Use momentum on the weight gradients.
            mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1
            mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2
            mom_grad_W3 = momentum * mom_grad_W3 + (1.0 - momentum) * grad_W3

            # Now make the actual parameter updates.
            W1.value -= learn_rate * mom_grad_W1
            B1.value -= learn_rate * grad_B1
            W2.value -= learn_rate * mom_grad_W2
            B2.value -= learn_rate * grad_B2
            W3.value -= learn_rate * mom_grad_W3
            B3.value -= learn_rate * grad_B3

        print epoch, total_loss

    # After we've trained, we return a sugary little function handle
    # that makes things easy.  Basically, what we're doing here is
    # handing the output object (not the loss!) a dictionary where the
    # key is the Kayak input object 'X' (that is the features being
    # used here for logistic regression) and the value in that
    # dictionary is being determined by the argument to the lambda
    # expression.  The point here is that we wind up with a function
    # handle the can be called with a numpy object and it produces the
    # target values for novel data, using the parameters we just learned.

    def compute_predictions(x):
        X.data = x
        batcher.test_mode()
        return Y.value

    return compute_predictions
Beispiel #7
0
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight, l2_weight, dropout):

    # Create a batcher object.
    batcher = kayak.Batcher(batch_size, inputs.shape[0])

    # Inputs and targets need access to the batcher.
    X    = kayak.Inputs(inputs, batcher)
    T    = kayak.Targets(targets, batcher)

    # Weights and biases, with random initializations.
    W    = kayak.Parameter( 0.1*npr.randn( inputs.shape[1], 10 ))
    B    = kayak.Parameter( 0.1*npr.randn(1,10) )

    # Nothing fancy here: inputs times weights, plus bias, then softmax.
    dropout_layer = kayak.Dropout(X, dropout, batcher=batcher)
    Y    = kayak.LogSoftMax( kayak.ElemAdd( kayak.MatMult(dropout_layer, W), B ) )

    # The training loss is negative multinomial log likelihood.
    loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)),
                        kayak.L2Norm(W, l2_weight),
                        kayak.L1Norm(W, l1_weight))

    # Use momentum for the gradient-based optimization.
    mom_grad_W = np.zeros(W.shape)

    # Loop over epochs.
    for epoch in xrange(10):

        # Track the total loss and the overall gradient.
        total_loss   = 0.0
        total_grad_W = np.zeros(W.shape)

        # Loop over batches -- using batcher as iterator.
        for batch in batcher:
            # Compute the loss of this minibatch by asking the Kayak
            # object for its value and giving it reset=True.
            total_loss += loss.value

            # Now ask the loss for its gradient in terms of the
            # weights and the biases -- the two things we're trying to
            # learn here.
            grad_W = loss.grad(W)
            grad_B = loss.grad(B)
            
            # Use momentum on the weight gradient.
            mom_grad_W = momentum*mom_grad_W + (1.0-momentum)*grad_W

            # Now make the actual parameter updates.
            W.value -= learn_rate * mom_grad_W
            B.value -= learn_rate * grad_B

            # Keep track of the gradient to see if we're converging.
            total_grad_W += grad_W

        #print epoch, total_loss, np.sum(total_grad_W**2)

    # After we've trained, we return a sugary little function handle
    # that makes things easy.  Basically, what we're doing here is
    # handing the output object (not the loss!) a dictionary where the
    # key is the Kayak input object 'X' (that is the features being
    # used here for logistic regression) and the value in that
    # dictionary is being determined by the argument to the lambda
    # expression.  The point here is that we wind up with a function
    # handle the can be called with a numpy object and it produces the
    # target values for novel data, using the parameters we just learned.
    
    def compute_predictions(x):
        X.data = x
        batcher.test_mode()
        return Y.value

    return compute_predictions