def test_batcher_clears_value_cache(): batcher = kayak.Batcher(1, 2) X = kayak.Inputs(np.array([[1, 2, 3], [2, 3, 4]]), batcher) Y = kayak.MatSum(X) correct_vals = [6, 9] for ii, batch in enumerate(batcher): assert Y.value == correct_vals[ii]
def test_batcher_clears_shape_cache(): batcher = kayak.Batcher(2, 3) X = kayak.Inputs(np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]]), batcher) Y = kayak.MatSum(X, axis=1) correct_shapes = [(2, 1), (1, 1)] for ii, batch in enumerate(batcher): assert Y.shape == correct_shapes[ii]
def test_graph_diamond(): npr.seed(2) N = 10 D = 5 H1 = 6 H2 = 7 X = kayak.Inputs(npr.randn(N,D)) W1 = kayak.Parameter(npr.randn(D,H1)) W2a = kayak.Parameter(npr.randn(H1,H2)) W2b = kayak.Parameter(npr.randn(H1,H2)) W3 = kayak.Parameter(npr.randn(H2,1)) U1 = kayak.SoftReLU(kayak.MatMult(X, W1)) U2a = kayak.SoftReLU(kayak.MatMult(U1, W2a)) U2b = kayak.SoftReLU(kayak.MatMult(U1, W2b)) U3a = kayak.SoftReLU(kayak.MatMult(U2a, W3)) U3b = kayak.SoftReLU(kayak.MatMult(U2b, W3)) out = kayak.MatSum(kayak.MatAdd(U3a, U3b)) out.value print kayak.util.checkgrad(W1, out) print kayak.util.checkgrad(W2a, out) print kayak.util.checkgrad(W2b, out) print kayak.util.checkgrad(W3, out) assert kayak.util.checkgrad(W1, out) < MAX_GRAD_DIFF assert kayak.util.checkgrad(W2a, out) < MAX_GRAD_DIFF assert kayak.util.checkgrad(W2b, out) < MAX_GRAD_DIFF assert kayak.util.checkgrad(W3, out) < MAX_GRAD_DIFF
def test_batcher_updates_dropout(): batcher = kayak.Batcher(5, 10) X = kayak.Inputs(np.random.randn(10,10)) Y = kayak.Dropout(X, batcher=batcher) val1 = Y.value batcher.next() val2 = Y.value assert not np.all(val1 == val2)
def test_batcher_can_reinstate_dropout_mask(): batcher = kayak.Batcher(5, 10) X = kayak.Inputs(np.ones((10,10))) Y = kayak.Dropout(X, batcher=batcher) assert not np.all(Y.value == np.ones((10, 10))) batcher.test_mode() print "Y value", Y.value assert np.all(Y.value == np.ones((10, 10)))
def test_irrelevant_outputs(): # Having an irrelevant output shouldn't cause problems. Indeed, its # gradient and value should not be called. class NoValue(kayak.Differentiable): def __init__(self, A, *args): # Recurse to handle lists of arguments. super(NoValue, self).__init__([A]) def _compute_value(self): raise AttributeError("Value should not be called") def _local_grad(self, parent, d_out_d_self): raise AttributeError("Grad should not be called") X = kayak.Inputs(npr.randn(10, 20)) Y = kayak.Inputs(npr.randn(10, 20)) Z = X + Y bad_output = NoValue(X) Z.grad(X) # Will raise AttributeError is W's value or grad is called
def test_dropout_clears_value_cache(): X = kayak.Inputs(np.random.randn(10,10)) Y = kayak.Dropout(X) Z = kayak.MatSum(Y, axis=1) val1 = Z.value Y.draw_new_mask() val2 = Z.value assert not np.all(val1 == val2) assert np.all(Z.value == Z.value)
def test_irrelevant_outputs_2(): # As above, with a chain of ouptuts class NoValue(kayak.Differentiable): def __init__(self, A, *args): # Recurse to handle lists of arguments. super(NoValue, self).__init__([A]) def _compute_value(self): raise AttributeError("Value should not be called") def _local_grad(self, parent, d_out_d_self): raise AttributeError("Grad should not be called") X = kayak.Inputs(npr.randn(10, 20)) Y = kayak.Inputs(npr.randn(10, 20)) Z = X + Y bad_pre_output = NoValue(X) bad_output = NoValue(bad_pre_output) Z.grad(X) # Will raise AttributeError is W's value or grad is called
def test_batcher_updates_value(): batcher = kayak.Batcher(12, 20) data = npr.randn(20, 7) X = kayak.Inputs(data, batcher) for i, batch in enumerate(batcher): if i == 0: assert np.all(X.value == data[:12, :]) elif i == 1: assert np.all(X.value == data[12:, :]) else: assert False batcher.test_mode() assert np.all(X.value == data)
def test_graph_simple(): npr.seed(1) N = 1 D = 1 H1 = 1 X = kayak.Inputs(npr.randn(N,D)) W1 = kayak.Parameter(npr.randn(D,H1)) U3 = kayak.MatMult(W1, X) out = U3 print "Value: ", out.value print "Gradient: ", out.grad(W1) print "Grad error: ", kayak.util.checkgrad(W1, out) assert kayak.util.checkgrad(W1, out) < MAX_GRAD_DIFF
def test_cache_utility(): npr.seed(3) num_layers = 17 num_dims = 3 X = kayak.Inputs(npr.randn(10, num_dims)) W1 = kayak.Parameter(npr.randn(num_dims, num_dims)) W2 = kayak.Parameter(npr.randn(num_dims, num_dims)) Z = kayak.MatMult(X, W1) for jj in xrange(num_layers): Z = kayak.SoftReLU(kayak.MatAdd(kayak.MatMult(Z, W2), kayak.MatMult(Z, W2))) out = kayak.MatSum(Z) assert kayak.util.checkgrad(W1, out) < 1e-4
def test_graph_chain(): npr.seed(1) N = 10 D = 5 H1 = 6 H2 = 7 X = kayak.Inputs(npr.randn(N,D)) W1 = kayak.Parameter(npr.randn(D,H1)) W2 = kayak.Parameter(npr.randn(H1,H2)) W3 = kayak.Parameter(npr.randn(H2,1)) U1 = kayak.SoftReLU(kayak.MatMult(X, W1)) U2 = kayak.SoftReLU(kayak.MatMult(U1, W2)) U3 = kayak.SoftReLU(kayak.MatMult(U2, W3)) out = kayak.MatSum(U3) out.value assert kayak.util.checkgrad(W1, out) < MAX_GRAD_DIFF assert kayak.util.checkgrad(W2, out) < MAX_GRAD_DIFF assert kayak.util.checkgrad(W3, out) < MAX_GRAD_DIFF
def test_graph_dag(): npr.seed(3) num_layers = 7 num_dims = 5 for ii in xrange(NUM_TRIALS): probs = npr.rand() X = kayak.Inputs(npr.randn(25,num_dims)) wts = [] layers = [] for jj in xrange(num_layers): U = kayak.Constant(np.zeros((25,num_dims))) if npr.rand() < probs: W = kayak.Parameter(0.1*npr.randn(num_dims, num_dims)) wts.append(W) U = kayak.MatAdd( U, kayak.SoftReLU(kayak.MatMult(X, W)) ) for kk in xrange(jj): if npr.rand() < probs: W = kayak.Parameter(0.1*npr.randn(num_dims, num_dims)) wts.append(W) U = kayak.MatAdd( U, kayak.SoftReLU(kayak.MatMult(layers[kk], W)) ) layers.append(U) out = kayak.MatSum(layers[-1]) out.value for jj, wt in enumerate(wts): diff = kayak.util.checkgrad(wt, out, 1e-4) print diff assert diff < 1e-4
def test_data_update_clears_value_cache(): X = kayak.Inputs(np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]])) assert np.all(X.value == [[1, 2, 3], [2, 3, 4], [3, 4, 5]]) X.data = [1,2] assert X._value is None assert np.all(X.value == [1, 2])
def kayak_mlp(X, y): """ Kayak implementation of a mlp with relu hidden layers and dropout """ # Create a batcher object. batcher = kayak.Batcher(batch_size, X.shape[0]) # count number of rows and columns num_examples, num_features = np.shape(X) X = kayak.Inputs(X, batcher) T = kayak.Targets(y, batcher) # ----------------------------- first hidden layer ------------------------------- # set up weights for our input layer # use the same scheme as our numpy mlp input_range = 1.0 / num_features**(1 / 2) weights_1 = kayak.Parameter(0.1 * np.random.randn(X.shape[1], layer1_size)) bias_1 = kayak.Parameter(0.1 * np.random.randn(1, layer1_size)) # linear combination of weights and inputs hidden_1_input = kayak.ElemAdd(kayak.MatMult(X, weights_1), bias_1) # apply activation function to hidden layer hidden_1_activation = kayak.HardReLU(hidden_1_input) # apply a dropout for regularization hidden_1_out = kayak.Dropout(hidden_1_activation, layer1_dropout, batcher=batcher) # ----------------------------- output layer ----------------------------------- weights_out = kayak.Parameter(0.1 * np.random.randn(layer1_size, 9)) bias_out = kayak.Parameter(0.1 * np.random.randn(1, 9)) # linear combination of layer2 output and output weights out = kayak.ElemAdd(kayak.MatMult(hidden_1_out, weights_out), bias_out) # apply activation function to output yhat = kayak.SoftMax(out) # ----------------------------- loss function ----------------------------------- loss = kayak.MatAdd(kayak.MatSum(kayak.L2Loss(yhat, T)), kayak.L2Norm(weights_1, layer1_l2)) # Use momentum for the gradient-based optimization. mom_grad_W1 = np.zeros(weights_1.shape) mom_grad_W2 = np.zeros(weights_out.shape) # Loop over epochs. plot_loss = np.ones((iterations, 2)) for epoch in xrange(iterations): # Track the total loss. total_loss = 0.0 for batch in batcher: # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W1 = loss.grad(weights_1) grad_B1 = loss.grad(bias_1) grad_W2 = loss.grad(weights_out) grad_B2 = loss.grad(bias_out) # Use momentum on the weight gradients. mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1 mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2 # Now make the actual parameter updates. weights_1.value -= learn_rate * mom_grad_W1 bias_1.value -= learn_rate * grad_B1 weights_out.value -= learn_rate * mom_grad_W2 bias_out.value -= learn_rate * grad_B2 # save values into table to print learning curve at the end of trianing plot_loss[epoch, 0] = epoch plot_loss[epoch, 1] = total_loss print epoch, total_loss #pyplot.plot(plot_loss[:,0], plot_loss[:,1], linewidth=2.0) #pyplot.show() def compute_predictions(x): X.data = x batcher.test_mode() return yhat.value return compute_predictions
N = 10000 D = 5 P = 1 learn = 0.00001 batch_size = 500 # Random inputs. X = npr.randn(N, D) true_W = npr.randn(D, P) lam = np.exp(np.dot(X, true_W)) Y = npr.poisson(lam) kyk_batcher = kayak.Batcher(batch_size, N) # Build network. kyk_inputs = kayak.Inputs(X, kyk_batcher) # Labels. kyk_targets = kayak.Targets(Y, kyk_batcher) # Weights. W = 0.01 * npr.randn(D, P) kyk_W = kayak.Parameter(W) # Linear layer. kyk_activation = kayak.MatMult(kyk_inputs, kyk_W) # Exponential inverse-link function. kyk_lam = kayak.ElemExp(kyk_activation) # Poisson negative log likelihood.
def train(inputs, targets): # Create a batcher object. batcher = kayak.Batcher(batch_size, inputs.shape[0]) # Inputs and targets need access to the batcher. X = kayak.Inputs(inputs, batcher) T = kayak.Targets(targets, batcher) # First-layer weights and biases, with random initializations. W1 = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], layer1_sz)) B1 = kayak.Parameter(0.1 * npr.randn(1, layer1_sz)) # First hidden layer: ReLU + Dropout H1 = kayak.Dropout(kayak.HardReLU(kayak.ElemAdd(kayak.MatMult(X, W1), B1)), layer1_dropout, batcher=batcher) # Second-layer weights and biases, with random initializations. W2 = kayak.Parameter(0.1 * npr.randn(layer1_sz, layer2_sz)) B2 = kayak.Parameter(0.1 * npr.randn(1, layer2_sz)) # Second hidden layer: ReLU + Dropout H2 = kayak.Dropout(kayak.HardReLU(kayak.ElemAdd(kayak.MatMult(H1, W2), B2)), layer2_dropout, batcher=batcher) # Output layer weights and biases, with random initializations. W3 = kayak.Parameter(0.1 * npr.randn(layer2_sz, 10)) B3 = kayak.Parameter(0.1 * npr.randn(1, 10)) # Output layer. Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H2, W3), B3)) # The training loss is negative multinomial log likelihood. loss = kayak.MatSum(kayak.LogMultinomialLoss(Y, T)) # Use momentum for the gradient-based optimization. mom_grad_W1 = np.zeros(W1.shape) mom_grad_W2 = np.zeros(W2.shape) mom_grad_W3 = np.zeros(W3.shape) # Loop over epochs. for epoch in xrange(10): # Track the total loss. total_loss = 0.0 # Loop over batches -- using batcher as iterator. for batch in batcher: # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W1 = loss.grad(W1) grad_B1 = loss.grad(B1) grad_W2 = loss.grad(W2) grad_B2 = loss.grad(B2) grad_W3 = loss.grad(W3) grad_B3 = loss.grad(B3) # Use momentum on the weight gradients. mom_grad_W1 = momentum * mom_grad_W1 + (1.0 - momentum) * grad_W1 mom_grad_W2 = momentum * mom_grad_W2 + (1.0 - momentum) * grad_W2 mom_grad_W3 = momentum * mom_grad_W3 + (1.0 - momentum) * grad_W3 # Now make the actual parameter updates. W1.value -= learn_rate * mom_grad_W1 B1.value -= learn_rate * grad_B1 W2.value -= learn_rate * mom_grad_W2 B2.value -= learn_rate * grad_B2 W3.value -= learn_rate * mom_grad_W3 B3.value -= learn_rate * grad_B3 print epoch, total_loss # After we've trained, we return a sugary little function handle # that makes things easy. Basically, what we're doing here is # handing the output object (not the loss!) a dictionary where the # key is the Kayak input object 'X' (that is the features being # used here for logistic regression) and the value in that # dictionary is being determined by the argument to the lambda # expression. The point here is that we wind up with a function # handle the can be called with a numpy object and it produces the # target values for novel data, using the parameters we just learned. def compute_predictions(x): X.data = x batcher.test_mode() return Y.value return compute_predictions
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight, l2_weight, dropout, improvement_thresh): # Create a batcher object. batcher = kayak.Batcher(batch_size, inputs.shape[0]) # Inputs and targets need access to the batcher. X = kayak.Inputs(inputs, batcher) T = kayak.Targets(targets, batcher) # Put some dropout regularization on the inputs H = kayak.Dropout(X, dropout) # Weights and biases, with random initializations. W = kayak.Parameter(0.1 * npr.randn(inputs.shape[1], 10)) B = kayak.Parameter(0.1 * npr.randn(1, 10)) # Nothing fancy here: inputs times weights, plus bias, then softmax. Y = kayak.LogSoftMax(kayak.ElemAdd(kayak.MatMult(H, W), B)) # The training loss is negative multinomial log likelihood. loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)), kayak.L2Norm(W, l2_weight), kayak.L1Norm(W, l1_weight)) # Use momentum for the gradient-based optimization. mom_grad_W = np.zeros(W.shape) best_loss = np.inf best_epoch = -1 # Loop over epochs. for epoch in range(100): # Track the total loss. total_loss = 0.0 # Loop over batches -- using batcher as iterator. for batch in batcher: # Draw new random dropouts H.draw_new_mask() # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W = loss.grad(W) grad_B = loss.grad(B) # Use momentum on the weight gradient. mom_grad_W *= momentum mom_grad_W += (1.0 - momentum) * grad_W # Now make the actual parameter updates. W.value -= learn_rate * mom_grad_W B.value -= learn_rate * grad_B print("Epoch: %d, total loss: %f" % (epoch, total_loss)) if not np.isfinite(total_loss): print("Training diverged. Returning constraint violation.") break if total_loss < best_loss: best_epoch = epoch else: if (epoch - best_epoch) > improvement_thresh: print("Has been %d epochs without improvement. Aborting." % (epoch - best_epoch)) break # After we've trained, we return a sugary little function handle # that makes things easy. Basically, what we're doing here is # simply replacing the inputs in the above defined graph and then # running through it to produce the outputs. # The point here is that we wind up with a function # handle the can be called with a numpy object and it produces the # target values for novel data, using the parameters we just learned. def predict(x): X.value = x H.reinstate_units() return Y.value return predict
def train(inputs, targets, batch_size, learn_rate, momentum, l1_weight, l2_weight, dropout): # Create a batcher object. batcher = kayak.Batcher(batch_size, inputs.shape[0]) # Inputs and targets need access to the batcher. X = kayak.Inputs(inputs, batcher) T = kayak.Targets(targets, batcher) # Weights and biases, with random initializations. W = kayak.Parameter( 0.1*npr.randn( inputs.shape[1], 10 )) B = kayak.Parameter( 0.1*npr.randn(1,10) ) # Nothing fancy here: inputs times weights, plus bias, then softmax. dropout_layer = kayak.Dropout(X, dropout, batcher=batcher) Y = kayak.LogSoftMax( kayak.ElemAdd( kayak.MatMult(dropout_layer, W), B ) ) # The training loss is negative multinomial log likelihood. loss = kayak.MatAdd(kayak.MatSum(kayak.LogMultinomialLoss(Y, T)), kayak.L2Norm(W, l2_weight), kayak.L1Norm(W, l1_weight)) # Use momentum for the gradient-based optimization. mom_grad_W = np.zeros(W.shape) # Loop over epochs. for epoch in xrange(10): # Track the total loss and the overall gradient. total_loss = 0.0 total_grad_W = np.zeros(W.shape) # Loop over batches -- using batcher as iterator. for batch in batcher: # Compute the loss of this minibatch by asking the Kayak # object for its value and giving it reset=True. total_loss += loss.value # Now ask the loss for its gradient in terms of the # weights and the biases -- the two things we're trying to # learn here. grad_W = loss.grad(W) grad_B = loss.grad(B) # Use momentum on the weight gradient. mom_grad_W = momentum*mom_grad_W + (1.0-momentum)*grad_W # Now make the actual parameter updates. W.value -= learn_rate * mom_grad_W B.value -= learn_rate * grad_B # Keep track of the gradient to see if we're converging. total_grad_W += grad_W #print epoch, total_loss, np.sum(total_grad_W**2) # After we've trained, we return a sugary little function handle # that makes things easy. Basically, what we're doing here is # handing the output object (not the loss!) a dictionary where the # key is the Kayak input object 'X' (that is the features being # used here for logistic regression) and the value in that # dictionary is being determined by the argument to the lambda # expression. The point here is that we wind up with a function # handle the can be called with a numpy object and it produces the # target values for novel data, using the parameters we just learned. def compute_predictions(x): X.data = x batcher.test_mode() return Y.value return compute_predictions