def test_gaussian_log_sample(): del_shared() random_state = np.random.RandomState(1999) mu = linear([X_sym], [X.shape[1]], proj_dim=100, name='mu', random_state=random_state) sigma = linear([X_sym], [X.shape[1]], proj_dim=100, name='sigma', random_state=random_state) random_state = np.random.RandomState(1999) r1 = gaussian_log_sample([mu], [sigma], name="samp1", random_state=random_state) random_state = np.random.RandomState(1999) r2 = gaussian_log_sample([mu], [sigma], name="samp2", random_state=random_state) random_state = np.random.RandomState(42) r3 = gaussian_log_sample([mu], [sigma], name="samp3", random_state=random_state) sample_function = theano.function([X_sym], [r1, r2, r3], mode="FAST_COMPILE") s_r1, s_r2, s_r3 = sample_function(X[:100]) assert_almost_equal(s_r1, s_r2) assert_raises(AssertionError, assert_almost_equal, s_r1, s_r3) ss_r1, ss_r2, ss_r3 = sample_function(X[:100]) assert_raises(AssertionError, assert_almost_equal, s_r1, ss_r1)
def run_lstm(): del_shared() n_in = X.shape[-1] n_hid = 20 n_out = y.shape[-1] random_state = np.random.RandomState(42) h_init = np.zeros((minibatch_size, 2 * n_hid)).astype("float32") h0 = tensor.fmatrix() l1 = lstm_fork([X_sym], [n_in], n_hid, name="l1", random_state=random_state) def step(in_t, h_tm1): h_t = lstm(in_t, h_tm1, n_hid, name="rec", random_state=random_state) return h_t h, _ = theano.scan(step, sequences=[l1], outputs_info=[h0]) h_o = slice_state(h, n_hid) pred = linear([h_o], [n_hid], n_out, name="l2", random_state=random_state) cost = ((y_sym - pred) ** 2).sum() params = list(get_params().values()) grads = tensor.grad(cost, params) learning_rate = 0.000000000001 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) f = theano.function([X_sym, y_sym, h0], [cost, h], updates=updates, mode="FAST_COMPILE") f(X, y, h_init)
def test_feedforward_theano_mix(): del_shared() minibatch_size = 100 random_state = np.random.RandomState(1999) X_sym = tensor.fmatrix() y_sym = tensor.fmatrix() l1_o = linear([X_sym], [X.shape[1]], proj_dim=20, name='l1', random_state=random_state) l1_o = .999 * l1_o y_pred = softmax([l1_o], [20], proj_dim=n_classes, name='out', random_state=random_state) cost = categorical_crossentropy(y_pred, y_sym).mean() params = list(get_params().values()) grads = theano.grad(cost, params) learning_rate = 0.001 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) fit_function = theano.function([X_sym, y_sym], [cost], updates=updates, mode="FAST_COMPILE") cost_function = theano.function([X_sym, y_sym], [cost], mode="FAST_COMPILE") train_itr = minibatch_iterator([X, y], minibatch_size, axis=0) valid_itr = minibatch_iterator([X, y], minibatch_size, axis=0) X_train, y_train = next(train_itr) X_valid, y_valid = next(valid_itr) fit_function(X_train, y_train) cost_function(X_valid, y_valid)
def test_fixed_projection(): random_state = np.random.RandomState(1999) rand_projection = random_state.randn(64, 12) rand_dim = rand_projection.shape[1] out = fixed_projection([X_sym], [X.shape[1]], rand_projection, 'proj1') out2 = fixed_projection([X_sym], [X.shape[1]], rand_projection, 'proj2', pre=rand_projection[:, 0]) out3 = fixed_projection([X_sym], [X.shape[1]], rand_projection, 'proj3', post=rand_projection[0]) final = linear([out2], [rand_dim], 5, 'linear', random_state=random_state) # Test that it compiles with and without bias f = theano.function([X_sym], [out, out2, out3, final], mode="FAST_COMPILE") # Test updates params = list(get_params().values()) grads = tensor.grad(final.mean(), params) opt = sgd(params, .1) updates = opt.updates(params, grads) f2 = theano.function([X_sym], [out2, final], updates=updates) ret = f(np.ones_like(X))[0] assert ret.shape[1] != X.shape[1] ret2 = f(np.ones_like(X))[1] assert ret.shape[1] != X.shape[1] out1, final1 = f2(X) out2, final2 = f2(X) # Make sure fixed basis is unchanged assert_almost_equal(out1, out2) # Make sure linear layer is updated assert_raises(AssertionError, assert_almost_equal, final1, final2)
# random state so script is deterministic random_state = np.random.RandomState(1999) minibatch_size = 100 n_code = 100 n_hid = 200 width = 28 height = 28 n_input = width * height # encode path aka q l1_enc = softplus([X_sym], [X.shape[1]], proj_dim=n_hid, name='l1_enc', random_state=random_state) l2_enc = softplus([l1_enc], [n_hid], proj_dim=n_hid, name='l2_enc', random_state=random_state) code_mu = linear([l2_enc], [n_hid], proj_dim=n_code, name='code_mu', random_state=random_state) code_log_sigma = linear([l2_enc], [n_hid], proj_dim=n_code, name='code_log_sigma', random_state=random_state) kl = gaussian_log_kl([code_mu], [code_log_sigma]).mean() sample_state = np.random.RandomState(2177) samp = gaussian_log_sample([code_mu], [code_log_sigma], name='samp', random_state=sample_state) # decode path aka p l1_dec = softplus([samp], [n_code], proj_dim=n_hid, name='l1_dec', random_state=random_state) l2_dec = softplus([l1_dec], [n_hid], proj_dim=n_hid, name='l2_dec', random_state=random_state) out = sigmoid([l2_dec], [n_hid], proj_dim=X.shape[1], name='out', random_state=random_state)
n_hid = 200 width = 28 height = 28 n_input = width * height # encode path aka q l1_enc = softplus([X_sym], [X.shape[1]], proj_dim=n_hid, name='l1_enc', random_state=random_state) l2_enc = softplus([l1_enc], [n_hid], proj_dim=n_hid, name='l2_enc', random_state=random_state) code_mu = linear([l2_enc], [n_hid], proj_dim=n_code, name='code_mu', random_state=random_state) code_log_sigma = linear([l2_enc], [n_hid], proj_dim=n_code, name='code_log_sigma', random_state=random_state) kl = gaussian_log_kl([code_mu], [code_log_sigma]).mean() sample_state = np.random.RandomState(2177) samp = gaussian_log_sample([code_mu], [code_log_sigma], name='samp', random_state=sample_state) # decode path aka p l1_dec = softplus([samp], [n_code], proj_dim=n_hid, name='l1_dec',
random_state = np.random.RandomState(1999) X_fork = lstm_fork([X_sym], [n_in], n_hid, name="h1", random_state=random_state) def step(in_t, h_tm1): h_t = lstm(in_t, h_tm1, [n_in], n_hid, name=None, random_state=random_state) return h_t h, _ = theano.scan(step, sequences=[X_fork], outputs_info=[h0]) h_o = slice_state(h, n_hid) y_pred = linear([h_o], [n_hid], n_out, name="h2", random_state=random_state) cost = ((y_sym - y_pred) ** 2).sum() params = list(get_params().values()) params = params grads = tensor.grad(cost, params) learning_rate = 0.001 opt = sgd(params, learning_rate) updates = opt.updates(params, grads) fit_function = theano.function([X_sym, y_sym, h0], [cost, h], updates=updates) cost_function = theano.function([X_sym, y_sym, h0], [cost, h]) predict_function = theano.function([X_sym, h0], [y_pred, h]) def train_loop(itr):