def test_feedforward_theano_mix(): minibatch_size = 100 random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym, y_sym = add_datasets_to_graph([X, y], ["X", "y"], graph) l1_o = linear_layer([X_sym], graph, 'l1', proj_dim=20, random_state=random_state) l1_o = .999 * l1_o y_pred = softmax_layer([l1_o], graph, 'pred', n_classes, random_state=random_state) cost = categorical_crossentropy(y_pred, y_sym).mean() params, grads = get_params_and_grads(graph, cost) learning_rate = 0.001 opt = sgd(params) updates = opt.updates(params, grads, learning_rate) fit_function = theano.function([X_sym, y_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym, y_sym], [cost], mode="FAST_COMPILE") checkpoint_dict = {} train_indices = np.arange(len(X)) valid_indices = np.arange(len(X)) early_stopping_trainer(fit_function, cost_function, checkpoint_dict, [X, y], minibatch_size, train_indices, valid_indices, fit_function_output_names=["cost"], cost_function_output_name="valid_cost", n_epochs=1)
def test_batch_normalization(): random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym, y_sym = add_datasets_to_graph([X, y], ["X", "y"], graph, list_of_test_values=[X, y]) on_off = tensor.iscalar() on_off.tag.test_value = 1 l1 = relu_layer([X_sym], graph, "proj", proj_dim=5, batch_normalize=True, mode_switch=on_off, random_state=random_state) l2 = relu_layer([l1], graph, "proj2", proj_dim=5, batch_normalize=True, mode_switch=on_off, random_state=random_state) f = theano.function([X_sym, on_off], [l2], mode="FAST_COMPILE") params, grads = get_params_and_grads(graph, l2.mean()) opt = sgd(params, .1) updates = opt.updates(params, grads) train_f = theano.function([X_sym, on_off], [l2], mode="FAST_COMPILE", updates=updates) valid_f = theano.function([X_sym, on_off], [l2], mode="FAST_COMPILE") X1 = random_state.rand(*X.shape) X2 = np.vstack([X1, .5 * X1]) t1 = train_f(X1, 0)[0] t2 = valid_f(X1, 1)[0] t3 = train_f(X2, 0)[0] t4 = valid_f(X1, 1)[0] t5 = valid_f(X1, 1)[0] assert_almost_equal(t4, t5) assert_raises(AssertionError, assert_almost_equal, t2, t4)
def test_vae(): minibatch_size = 100 random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym, y_sym = add_datasets_to_graph([X, y], ["X", "y"], graph) l1_enc = relu_layer([X_sym, y_sym], graph, 'l1_enc', proj_dim=20, random_state=random_state) mu = linear_layer([l1_enc], graph, 'mu', proj_dim=10, random_state=random_state) log_sigma = linear_layer([l1_enc], graph, 'log_sigma', proj_dim=10, random_state=random_state) samp = gaussian_log_sample_layer([mu], [log_sigma], graph, 'gaussian_log_sample', random_state=random_state) l1_dec = relu_layer([samp], graph, 'l1_dec', proj_dim=20, random_state=random_state) out = sigmoid_layer([l1_dec], graph, 'out', proj_dim=X.shape[1], random_state=random_state) kl = gaussian_log_kl([mu], [log_sigma], graph, 'gaussian_kl').mean() cost = binary_crossentropy(out, X_sym).mean() + kl params, grads = get_params_and_grads(graph, cost) learning_rate = 0.001 opt = sgd(params) updates = opt.updates(params, grads, learning_rate) train_function = theano.function([X_sym, y_sym], [cost], updates=updates, mode="FAST_COMPILE") iterate_function(train_function, [X, y], minibatch_size, list_of_output_names=["cost"], n_epochs=1)
def test_tanh_rnn(): # random state so script is deterministic random_state = np.random.RandomState(1999) # home of the computational graph graph = OrderedDict() # number of hidden features n_hid = 10 # number of output_features = input_features n_out = X.shape[-1] # input (where first dimension is time) datasets_list = [X, X_mask, y, y_mask] names_list = ["X", "X_mask", "y", "y_mask"] test_values_list = [X, X_mask, y, y_mask] X_sym, X_mask_sym, y_sym, y_mask_sym = add_datasets_to_graph( datasets_list, names_list, graph, list_of_test_values=test_values_list) # Setup weights l1 = linear_layer([X_sym], graph, 'l1_proj', n_hid, random_state) h = tanh_recurrent_layer([l1], X_mask_sym, n_hid, graph, 'l1_rec', random_state) # linear output activation y_hat = linear_layer([h], graph, 'l2_proj', n_out, random_state) # error between output and target cost = squared_error(y_hat, y_sym) cost = masked_cost(cost, y_mask_sym).mean() # Parameters of the model params, grads = get_params_and_grads(graph, cost) # Use stochastic gradient descent to optimize opt = sgd(params) learning_rate = 0.001 updates = opt.updates(params, grads, learning_rate) fit_function = theano.function([X_sym, X_mask_sym, y_sym, y_mask_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym, X_mask_sym, y_sym, y_mask_sym], [cost], mode="FAST_COMPILE") checkpoint_dict = {} train_indices = np.arange(X.shape[1]) valid_indices = np.arange(X.shape[1]) early_stopping_trainer(fit_function, cost_function, checkpoint_dict, [X, y], minibatch_size, train_indices, valid_indices, fit_function_output_names=["cost"], cost_function_output_name="valid_cost", n_epochs=1)
def test_tanh_rnn(): # random state so script is deterministic random_state = np.random.RandomState(1999) # home of the computational graph graph = OrderedDict() # number of hidden features n_hid = 10 # number of output_features = input_features n_out = X.shape[-1] # input (where first dimension is time) datasets_list = [X, X_mask, y, y_mask] names_list = ["X", "X_mask", "y", "y_mask"] test_values_list = [X, X_mask, y, y_mask] X_sym, X_mask_sym, y_sym, y_mask_sym = add_datasets_to_graph( datasets_list, names_list, graph, list_of_test_values=test_values_list) # Setup weights l1 = linear_layer([X_sym], graph, 'l1_proj', proj_dim=n_hid, random_state=random_state) h = tanh_recurrent_layer([l1], X_mask_sym, n_hid, graph, 'l1_rec', random_state) # linear output activation y_hat = linear_layer([h], graph, 'l2_proj', proj_dim=n_out, random_state=random_state) # error between output and target cost = squared_error(y_hat, y_sym) cost = masked_cost(cost, y_mask_sym).mean() # Parameters of the model params, grads = get_params_and_grads(graph, cost) # Use stochastic gradient descent to optimize learning_rate = 0.001 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) fit_function = theano.function([X_sym, X_mask_sym, y_sym, y_mask_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym, X_mask_sym, y_sym, y_mask_sym], [cost], mode="FAST_COMPILE") checkpoint_dict = {} train_indices = np.arange(X.shape[1]) valid_indices = np.arange(X.shape[1]) early_stopping_trainer(fit_function, cost_function, train_indices, valid_indices, checkpoint_dict, [X, y], minibatch_size, list_of_train_output_names=["cost"], valid_output_name="valid_cost", n_epochs=1)
def test_vae(): minibatch_size = 10 random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym = add_datasets_to_graph([X], ["X"], graph) l1_enc = softplus_layer([X_sym], graph, 'l1_enc', proj_dim=100, random_state=random_state) mu = linear_layer([l1_enc], graph, 'mu', proj_dim=50, random_state=random_state) log_sigma = linear_layer([l1_enc], graph, 'log_sigma', proj_dim=50, random_state=random_state) samp = gaussian_log_sample_layer([mu], [log_sigma], graph, 'gaussian_log_sample', random_state=random_state) l1_dec = softplus_layer([samp], graph, 'l1_dec', proj_dim=100, random_state=random_state) out = sigmoid_layer([l1_dec], graph, 'out', proj_dim=X.shape[1], random_state=random_state) kl = gaussian_log_kl([mu], [log_sigma], graph, 'gaussian_kl').mean() cost = binary_crossentropy(out, X_sym).mean() + kl params, grads = get_params_and_grads(graph, cost) learning_rate = 0.00000 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) fit_function = theano.function([X_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym], [cost], mode="FAST_COMPILE") checkpoint_dict = {} train_indices = np.arange(len(X)) valid_indices = np.arange(len(X)) early_stopping_trainer(fit_function, cost_function, train_indices, valid_indices, checkpoint_dict, [X], minibatch_size, list_of_train_output_names=["cost"], valid_output_name="valid_cost", n_epochs=1)
def test_feedforward_classifier(): minibatch_size = 100 random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym, y_sym = add_datasets_to_graph([X, y], ["X", "y"], graph) l1_o = linear_layer([X_sym], graph, "l1", proj_dim=20, random_state=random_state) y_pred = softmax_layer([l1_o], graph, "pred", n_classes, random_state=random_state) cost = categorical_crossentropy(y_pred, y_sym).mean() params, grads = get_params_and_grads(graph, cost) learning_rate = 0.001 opt = sgd(params) updates = opt.updates(params, grads, learning_rate) train_function = theano.function([X_sym, y_sym], [cost], updates=updates, mode="FAST_COMPILE") iterate_function(train_function, [X, y], minibatch_size, list_of_output_names=["cost"], n_epochs=1)
def test_fixed_projection_layer(): random_state = np.random.RandomState(1999) rand_projection = random_state.randn(64, 12) graph = OrderedDict() X_sym = add_datasets_to_graph([X], ["X"], graph) out = fixed_projection_layer([X_sym], rand_projection, graph, 'proj') out2 = fixed_projection_layer([X_sym], rand_projection, graph, 'proj', pre=rand_projection[:, 0]) out3 = fixed_projection_layer([X_sym], rand_projection, graph, 'proj', post=rand_projection[0]) final = linear_layer([out2], graph, 'linear', 17, random_state=random_state) # Test that it compiles with and without bias f = theano.function([X_sym], [out, out2, out3, final], mode="FAST_COMPILE") # Test updates params, grads = get_params_and_grads(graph, final.mean()) opt = sgd(params) updates = opt.updates(params, grads, .1) f2 = theano.function([X_sym], [out2, final], updates=updates) ret = f(np.ones_like(X))[0] assert ret.shape[1] != X.shape[1] ret2 = f(np.ones_like(X))[1] assert ret.shape[1] != X.shape[1] out1, final1 = f2(X) out2, final2 = f2(X) # Make sure fixed basis is unchanged assert_almost_equal(out1, out2) # Make sure linear layer is updated assert_raises(AssertionError, assert_almost_equal, final1, final2)
def test_fixed_projection_layer(): random_state = np.random.RandomState(1999) rand_projection = random_state.randn(64, 12) graph = OrderedDict() X_sym = add_datasets_to_graph([X], ["X"], graph) out = fixed_projection_layer([X_sym], rand_projection, graph, 'proj') out2 = fixed_projection_layer([X_sym], rand_projection, graph, 'proj', pre=rand_projection[:, 0]) out3 = fixed_projection_layer([X_sym], rand_projection, graph, 'proj', post=rand_projection[0]) final = linear_layer([out2], graph, 'linear', 17, random_state=random_state) # Test that it compiles with and without bias f = theano.function([X_sym], [out, out2, out3, final], mode="FAST_COMPILE") # Test updates params, grads = get_params_and_grads( graph, final.mean()) opt = sgd(params, .1) updates = opt.updates(params, grads) f2 = theano.function([X_sym], [out2, final], updates=updates) ret = f(np.ones_like(X))[0] assert ret.shape[1] != X.shape[1] ret2 = f(np.ones_like(X))[1] assert ret.shape[1] != X.shape[1] out1, final1 = f2(X) out2, final2 = f2(X) # Make sure fixed basis is unchanged assert_almost_equal(out1, out2) # Make sure linear layer is updated assert_raises(AssertionError, assert_almost_equal, final1, final2)
def test_correlated_mixture_density(): # graph holds information necessary to build layers from parents random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym, y_sym = add_datasets_to_graph([bernoulli_X, bernoulli_y], ["X", "y"], graph) n_hid = 20 minibatch_size = len(bernoulli_X) train_indices = np.arange(len(bernoulli_X)) valid_indices = np.arange(len(bernoulli_X)) l1 = tanh_layer([X_sym], graph, 'l1', proj_dim=n_hid, random_state=random_state) rval = bernoulli_and_correlated_log_gaussian_mixture_layer( [l1], graph, 'hw', proj_dim=2, n_components=3, random_state=random_state) binary, coeffs, mus, log_sigmas, corr = rval cost = bernoulli_and_correlated_log_gaussian_mixture_cost( binary, coeffs, mus, log_sigmas, corr, y_sym).mean() params, grads = get_params_and_grads(graph, cost) learning_rate = 1E-6 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) fit_function = theano.function([X_sym, y_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym, y_sym], [cost], mode="FAST_COMPILE") checkpoint_dict = create_checkpoint_dict(locals()) epoch_results = fixed_n_epochs_trainer( fit_function, cost_function, train_indices, valid_indices, checkpoint_dict, [bernoulli_X, bernoulli_y], minibatch_size, list_of_train_output_names=["train_cost"], valid_output_name="valid_cost", n_epochs=1)
minibatch_size = 20 n_hid = 1000 l1 = relu_layer([X_sym], graph, 'l1', proj_dim=n_hid, random_state=random_state) y_pred = softmax_zeros_layer([l1], graph, 'y_pred', proj_dim=n_targets) nll = categorical_crossentropy(y_pred, y_sym).mean() weights = get_weights_from_graph(graph) L2 = sum([(w**2).sum() for w in weights]) cost = nll + .0001 * L2 params, grads = get_params_and_grads(graph, cost) learning_rate = 1E-4 momentum = 0.95 opt = rmsprop(params, learning_rate, momentum) updates = opt.updates(params, grads) fit_function = theano.function([X_sym, y_sym], [cost], updates=updates) cost_function = theano.function([X_sym, y_sym], [cost]) predict_function = theano.function([X_sym], [y_pred]) checkpoint_dict = create_checkpoint_dict(locals()) def error(*args): xargs = args[:-1]
code_log_sigma = linear_layer([l2_enc], graph, 'code_log_sigma', n_code, random_state) kl = gaussian_log_kl([code_mu], [code_log_sigma], graph, 'kl').mean() samp = gaussian_log_sample_layer([code_mu], [code_log_sigma], graph, 'samp', random_state) # decode path aka p l1_dec = softplus_layer([samp], graph, 'l1_dec', n_dec_layer[0], random_state) l2_dec = softplus_layer([l1_dec], graph, 'l2_dec', n_dec_layer[1], random_state) out = linear_layer([l2_dec], graph, 'out', n_input, random_state) nll = squared_error(out, X_sym).mean() # log p(x) = -nll so swap sign # want to minimize cost in optimization so multiply by -1 cost = -1 * (-nll - kl) params, grads = get_params_and_grads(graph, cost) learning_rate = 0.0003 opt = adam(params) updates = opt.updates(params, grads, learning_rate) # Checkpointing try: checkpoint_dict = load_last_checkpoint() fit_function = checkpoint_dict["fit_function"] cost_function = checkpoint_dict["cost_function"] encode_function = checkpoint_dict["encode_function"] decode_function = checkpoint_dict["decode_function"] previous_epoch_results = checkpoint_dict["previous_epoch_results"] except KeyError: fit_function = theano.function([X_sym], [nll, kl, nll + kl],
def test_loop(): # graph holds information necessary to build layers from parents graph = OrderedDict() X_sym, y_sym = add_datasets_to_graph([X, y], ["X", "y"], graph) # random state so script is deterministic random_state = np.random.RandomState(1999) minibatch_size = 10 y_pred = softmax_zeros_layer([X_sym], graph, "y_pred", proj_dim=n_targets) nll = categorical_crossentropy(y_pred, y_sym).mean() weights = get_weights_from_graph(graph) cost = nll params, grads = get_params_and_grads(graph, cost) learning_rate = 0.13 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) fit_function = theano.function([X_sym, y_sym], [cost], updates=updates) cost_function = theano.function([X_sym, y_sym], [cost]) predict_function = theano.function([X_sym], [y_pred]) checkpoint_dict = { "fit_function": fit_function, "cost_function": cost_function, "predict_function": predict_function, } def error(*args): xargs = args[:-1] y = args[-1] final_args = xargs y_pred = predict_function(*final_args)[0] return 1 - np.mean((np.argmax(y_pred, axis=1).ravel()) == (np.argmax(y, axis=1).ravel())) TL1 = TrainingLoop( fit_function, error, train_indices[:10], valid_indices[:10], minibatch_size, checkpoint_dict=checkpoint_dict, list_of_train_output_names=["train_cost"], valid_output_name="valid_error", n_epochs=1, optimizer_object=opt, ) epoch_results1 = TL1.run([X, y]) TL1.train_indices = train_indices[10:20] TL1.valid_indices = valid_indices[10:20] epoch_results1 = TL1.run([X, y]) TL2 = TrainingLoop( fit_function, error, train_indices[:20], valid_indices[:20], minibatch_size, checkpoint_dict=checkpoint_dict, list_of_train_output_names=["train_cost"], valid_output_name="valid_error", n_epochs=1, optimizer_object=opt, ) epoch_results2 = TL2.run([X, y]) r1 = TL1.__dict__["checkpoint_dict"]["previous_results"]["train_cost"][-1] r2 = TL2.__dict__["checkpoint_dict"]["previous_results"]["train_cost"][-1] assert r1 == r2
def test_vae(): minibatch_size = 10 random_state = np.random.RandomState(1999) graph = OrderedDict() X_sym = add_datasets_to_graph([X], ["X"], graph) l1_enc = softplus_layer([X_sym], graph, 'l1_enc', proj_dim=100, random_state=random_state) mu = linear_layer([l1_enc], graph, 'mu', proj_dim=50, random_state=random_state) log_sigma = linear_layer([l1_enc], graph, 'log_sigma', proj_dim=50, random_state=random_state) samp = gaussian_log_sample_layer([mu], [log_sigma], graph, 'gaussian_log_sample', random_state=random_state) l1_dec = softplus_layer([samp], graph, 'l1_dec', proj_dim=100, random_state=random_state) out = sigmoid_layer([l1_dec], graph, 'out', proj_dim=X.shape[1], random_state=random_state) kl = gaussian_log_kl([mu], [log_sigma], graph, 'gaussian_kl').mean() cost = binary_crossentropy(out, X_sym).mean() + kl params, grads = get_params_and_grads(graph, cost) learning_rate = 0.00000 opt = sgd(params) updates = opt.updates(params, grads, learning_rate) fit_function = theano.function([X_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym], [cost], mode="FAST_COMPILE") checkpoint_dict = {} train_indices = np.arange(len(X)) valid_indices = np.arange(len(X)) early_stopping_trainer(fit_function, cost_function, checkpoint_dict, [X], minibatch_size, train_indices, valid_indices, fit_function_output_names=["cost"], cost_function_output_name="valid_cost", n_epochs=1)