def test_batcher_can_reinstate_dropout_mask(): batcher = kayak.Batcher(5, 10) X = kayak.Inputs(np.ones((10,10))) Y = kayak.Dropout(X, batcher=batcher) assert not np.all(Y.value == np.ones((10, 10))) batcher.test_mode() print "Y value", Y.value assert np.all(Y.value == np.ones((10, 10)))
def test_batcher_updates_dropout(): batcher = kayak.Batcher(5, 10) X = kayak.Inputs(np.random.randn(10,10)) Y = kayak.Dropout(X, batcher=batcher) val1 = Y.value batcher.next() val2 = Y.value assert not np.all(val1 == val2)
def test_dropout_clears_value_cache(): X = kayak.Inputs(np.random.randn(10,10)) Y = kayak.Dropout(X) Z = kayak.MatSum(Y, axis=1) val1 = Z.value Y.draw_new_mask() val2 = Z.value assert not np.all(val1 == val2) assert np.all(Z.value == Z.value)
def test_alldropout_values(): npr.seed(2) # Drop everything out. np_X = npr.randn(10,20) X = kayak.Parameter(np_X) Y = kayak.Dropout(X, drop_prob=1.0) assert np.all(Y.value == 0.0)
def test_nondropout_values(): npr.seed(1) # First sanity check: don't actually drop anything out. # Make sure we get everything back. np_X = npr.randn(10,20) X = kayak.Parameter(np_X) Y = kayak.Dropout(X, drop_prob=0.0) assert np.all(close_float(Y.value, np_X))
def test_alldropout_grad(): npr.seed(5) np_X = npr.randn(10,20) X = kayak.Parameter(np_X) Y = kayak.Dropout(X, drop_prob=1.0) Z = kayak.MatSum(Y) Z.value assert Z.grad(X).shape == np_X.shape assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
def test_dropout_values(): # Drop some things out. npr.seed(3) for ii in xrange(NUM_TRIALS): prob = npr.rand() scale = 1.0 / (1.0 - prob) np_X = npr.randn(5,6) X = kayak.Parameter(np_X) Y = kayak.Dropout(X, drop_prob=prob) Y.value assert np.all(np.logical_xor(Y.value == 0.0, close_float(Y.value, scale*np_X)))
def test_dropout_grad(): # Drop some things out. npr.seed(6) for ii in xrange(NUM_TRIALS): prob = npr.rand() scale = 1.0 / (1.0 - prob) np_X = npr.randn(5,6) X = kayak.Parameter(np_X) Y = kayak.Dropout(X, drop_prob=prob) Z = kayak.MatSum(Y) Z.value assert Z.grad(X).shape == np_X.shape assert kayak.util.checkgrad(X, Z) < MAX_GRAD_DIFF
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight, l2_weight, dropout, improvement_thresh): # Create a batcher object. batcher = kayak.Batcher(batch_size, inputs.shape[0]) # Inputs and targets need access to the batcher. X = kayak.Inputs(inputs, batcher) T = kayak.Targets(targets, batcher) # Put some dropout regularization on the inputs H = kayak.Dropout(X, dropout) # Weights and biases, with random initializations. W = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], 10)) B = kayak.Parameter(0.1 * npr.randn(1, 10)) # Nothing fancy here: inputs times weights, plus bias, then softmax. Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H, W), B)) # The training loss is negative multinomial log likelihood. loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)), kayak.L2Norm(W, l2_weight), kayak.L1Norm(W, l1_weight)) # Use momentum for the gradient-based optimization. mom_grad_W = np.zeros(W.shape) best_loss = np.inf best_epoch = -1 # Loop over epochs. for epoch in range(100): # Track the total loss. total_loss = 0.0 # Loop over batches -- using batcher as iterator. for batch in batcher: # Draw new random dropouts H.draw_new_mask() # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W = loss.grad(W) grad_B = loss.grad(B) # Use momentum on the weight gradient. mom_grad_W *= momentum mom_grad_W += (1.0 - momentum) * grad_W # Now make the actual parameter updates. W.value -= learn_rate * mom_grad_W B.value -= learn_rate * grad_B print("Epoch: %d, total loss: %f" % (epoch, total_loss)) if not np.isfinite(total_loss): print("Training diverged. Returning constraint violation.") break if total_loss < best_loss: best_epoch = epoch else: if (epoch - best_epoch) > improvement_thresh: print("Has been %d epochs without improvement. Aborting." % (epoch - best_epoch)) break # After we've trained, we return a sugary little function handle # that makes things easy. Basically, what we're doing here is # simply replacing the inputs in the above defined graph and then # running through it to produce the outputs. # The point here is that we wind up with a function # handle the can be called with a numpy object and it produces the # target values for novel data, using the parameters we just learned. def predict(x): X.value = x H.reinstate_units() return Y.value return predict
def kayak_mlp(X, y): """ Kayak implementation of a mlp with relu hidden layers and dropout """ # Create a batcher object. batcher = kayak.Batcher(batch_size, X.shape[0]) # count number of rows and columns num_examples, num_features = np.shape(X) X = kayak.Inputs(X, batcher) T = kayak.Targets(y, batcher) # ----------------------------- first hidden layer ------------------------------- # set up weights for our input layer # use the same scheme as our numpy mlp input_range = 1.0 / num_features**(1 / 2) weights_1 = kayak.Parameter(0.1 * np.random.randn(X.shape[1], layer1_size)) bias_1 = kayak.Parameter(0.1 * np.random.randn(1, layer1_size)) # linear combination of weights and inputs hidden_1_input = kayak.ElemAdd(kayak.MatMult(X, weights_1), bias_1) # apply activation function to hidden layer hidden_1_activation = kayak.HardReLU(hidden_1_input) # apply a dropout for regularization hidden_1_out = kayak.Dropout(hidden_1_activation, layer1_dropout, batcher=batcher) # ----------------------------- output layer ----------------------------------- weights_out = kayak.Parameter(0.1 * np.random.randn(layer1_size, 9)) bias_out = kayak.Parameter(0.1 * np.random.randn(1, 9)) # linear combination of layer2 output and output weights out = kayak.ElemAdd(kayak.MatMult(hidden_1_out, weights_out), bias_out) # apply activation function to output yhat = kayak.SoftMax(out) # ----------------------------- loss function ----------------------------------- loss = kayak.MatAdd(kayak.MatSum(kayak.L2Loss(yhat, T)), kayak.L2Norm(weights_1, layer1_l2)) # Use momentum for the gradient-based optimization. mom_grad_W1 = np.zeros(weights_1.shape) mom_grad_W2 = np.zeros(weights_out.shape) # Loop over epochs. plot_loss = np.ones((iterations, 2)) for epoch in xrange(iterations): # Track the total loss. total_loss = 0.0 for batch in batcher: # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W1 = loss.grad(weights_1) grad_B1 = loss.grad(bias_1) grad_W2 = loss.grad(weights_out) grad_B2 = loss.grad(bias_out) # Use momentum on the weight gradients. mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1 mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2 # Now make the actual parameter updates. weights_1.value -= learn_rate * mom_grad_W1 bias_1.value -= learn_rate * grad_B1 weights_out.value -= learn_rate * mom_grad_W2 bias_out.value -= learn_rate * grad_B2 # save values into table to print learning curve at the end of trianing plot_loss[epoch, 0] = epoch plot_loss[epoch, 1] = total_loss print epoch, total_loss #pyplot.plot(plot_loss[:,0], plot_loss[:,1], linewidth=2.0) #pyplot.show() def compute_predictions(x): X.data = x batcher.test_mode() return yhat.value return compute_predictions
batcher = kayak.Batcher(batch_size, N) # Build network. kyk_inputs = kayak.Inputs(X, batcher) # Labels. kyk_targets = kayak.Targets(Y, batcher) # First layer weights and biases. kyk_W1 = kayak.Parameter(npr.randn(D, H1)) kyk_B1 = kayak.Parameter(npr.randn(1, H1)) # First layer weight mult plus biases, then nonlinearity. kyk_H1 = kayak.Dropout(kayak.HardReLU( kayak.ElemAdd(kayak.MatMult(kyk_inputs, kyk_W1), kyk_B1)), drop_prob=0.5, batcher=batcher) # Second layer weights and bias. kyk_W2 = kayak.Parameter(npr.randn(H1, P)) kyk_B2 = kayak.Parameter(npr.randn(1, P)) # Second layer multiplication. kyk_out = kayak.Dropout(kayak.HardReLU( kayak.ElemAdd(kayak.MatMult(kyk_H1, kyk_W2), kyk_B2)), drop_prob=0.5, batcher=batcher) # Elementwise Loss. kyk_el_loss = kayak.L2Loss(kyk_out, kyk_targets)
def train(inputs, targets): # Create a batcher object. batcher = kayak.Batcher(batch_size, inputs.shape[0]) # Inputs and targets need access to the batcher. X = kayak.Inputs(inputs, batcher) T = kayak.Targets(targets, batcher) # First-layer weights and biases, with random initializations. W1 = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], layer1_sz)) B1 = kayak.Parameter(0.1 * npr.randn(1, layer1_sz)) # First hidden layer: ReLU + Dropout H1 = kayak.Dropout(kayak.HardReLU(kayak.ElemAdd(kayak.MatMult(X, W1), B1)), layer1_dropout, batcher=batcher) # Second-layer weights and biases, with random initializations. W2 = kayak.Parameter(0.1 * npr.randn(layer1_sz, layer2_sz)) B2 = kayak.Parameter(0.1 * npr.randn(1, layer2_sz)) # Second hidden layer: ReLU + Dropout H2 = kayak.Dropout(kayak.HardReLU(kayak.ElemAdd(kayak.MatMult(H1, W2), B2)), layer2_dropout, batcher=batcher) # Output layer weights and biases, with random initializations. W3 = kayak.Parameter(0.1 * npr.randn(layer2_sz, 10)) B3 = kayak.Parameter(0.1 * npr.randn(1, 10)) # Output layer. Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H2, W3), B3)) # The training loss is negative multinomial log likelihood. loss = kayak.MatSum(kayak.LogMultinomialLoss(Y, T)) # Use momentum for the gradient-based optimization. mom_grad_W1 = np.zeros(W1.shape) mom_grad_W2 = np.zeros(W2.shape) mom_grad_W3 = np.zeros(W3.shape) # Loop over epochs. for epoch in xrange(10): # Track the total loss. total_loss = 0.0 # Loop over batches -- using batcher as iterator. for batch in batcher: # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W1 = loss.grad(W1) grad_B1 = loss.grad(B1) grad_W2 = loss.grad(W2) grad_B2 = loss.grad(B2) grad_W3 = loss.grad(W3) grad_B3 = loss.grad(B3) # Use momentum on the weight gradients. mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1 mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2 mom_grad_W3 = momentum * mom_grad_W3 + (1.0 - momentum) * grad_W3 # Now make the actual parameter updates. W1.value -= learn_rate * mom_grad_W1 B1.value -= learn_rate * grad_B1 W2.value -= learn_rate * mom_grad_W2 B2.value -= learn_rate * grad_B2 W3.value -= learn_rate * mom_grad_W3 B3.value -= learn_rate * grad_B3 print epoch, total_loss # After we've trained, we return a sugary little function handle # that makes things easy. Basically, what we're doing here is # handing the output object (not the loss!) a dictionary where the # key is the Kayak input object 'X' (that is the features being # used here for logistic regression) and the value in that # dictionary is being determined by the argument to the lambda # expression. The point here is that we wind up with a function # handle the can be called with a numpy object and it produces the # target values for novel data, using the parameters we just learned. def compute_predictions(x): X.data = x batcher.test_mode() return Y.value return compute_predictions
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight, l2_weight, dropout): # Create a batcher object. batcher = kayak.Batcher(batch_size, inputs.shape[0]) # Inputs and targets need access to the batcher. X = kayak.Inputs(inputs, batcher) T = kayak.Targets(targets, batcher) # Weights and biases, with random initializations. W = kayak.Parameter( 0.1*npr.randn( inputs.shape[1], 10 )) B = kayak.Parameter( 0.1*npr.randn(1,10) ) # Nothing fancy here: inputs times weights, plus bias, then softmax. dropout_layer = kayak.Dropout(X, dropout, batcher=batcher) Y = kayak.LogSoftMax( kayak.ElemAdd( kayak.MatMult(dropout_layer, W), B ) ) # The training loss is negative multinomial log likelihood. loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)), kayak.L2Norm(W, l2_weight), kayak.L1Norm(W, l1_weight)) # Use momentum for the gradient-based optimization. mom_grad_W = np.zeros(W.shape) # Loop over epochs. for epoch in xrange(10): # Track the total loss and the overall gradient. total_loss = 0.0 total_grad_W = np.zeros(W.shape) # Loop over batches -- using batcher as iterator. for batch in batcher: # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W = loss.grad(W) grad_B = loss.grad(B) # Use momentum on the weight gradient. mom_grad_W = momentum*mom_grad_W + (1.0-momentum)*grad_W # Now make the actual parameter updates. W.value -= learn_rate * mom_grad_W B.value -= learn_rate * grad_B # Keep track of the gradient to see if we're converging. total_grad_W += grad_W #print epoch, total_loss, np.sum(total_grad_W**2) # After we've trained, we return a sugary little function handle # that makes things easy. Basically, what we're doing here is # handing the output object (not the loss!) a dictionary where the # key is the Kayak input object 'X' (that is the features being # used here for logistic regression) and the value in that # dictionary is being determined by the argument to the lambda # expression. The point here is that we wind up with a function # handle the can be called with a numpy object and it produces the # target values for novel data, using the parameters we just learned. def compute_predictions(x): X.data = x batcher.test_mode() return Y.value return compute_predictions