def train_bnn(data='expx', n_data=50, n_samples=20, arch=[1,20,1], prior_params=None, prior_type=None, act='rbf', iters=300, lr=0.01, plot=True, save=False): if type(data) == str: inputs, targets = build_toy_dataset(data=data, n_data=n_data) else: inputs, targets = data if plot: fig, ax = p.setup_plot() init_params= init_var_params(arch) def loss(params, t): return vlb_objective(params, inputs, targets, arch, n_samples, act=act, prior_params=prior_params, prior_type=prior_type) def callback(params, t, g): plot_inputs = np.linspace(-10, 10, num=500)[:, None] f_bnn = sample_bnn(params, plot_inputs, 5, arch, act) #print(params[1]) # Plot data and functions. p.plot_iter(ax, inputs, plot_inputs, targets, f_bnn) print("ITER {} | LOSS {}".format(t, -loss(params, t))) var_params = adam(grad(loss),init_params , step_size=lr, num_iters=iters, callback=callback)
def main(): npz_fn = ("../embeddings/models/buckeye.mfcc.utd/train_cae/e6f4584e05/" "cae.best_val.test.npz") print("Reading:", npz_fn) npz = np.load(npz_fn) if True: print("Normalising embeddings") norm_npz = {} for key in npz: embed = npz[key] norm_npz[key] = embed / np.linalg.norm(embed) npz = norm_npz embeddings, labels = get_embeds_and_labels(norm_npz, KEYWORDS_FILTER) # Perform t-SNE tsne = manifold.TSNE(n_components=2, perplexity=TSNE_PERPLEXITY, init="random", random_state=0) X_tsne = tsne.fit_transform(embeddings) # Plot t-SNE plotting.setup_plot() plt.rcParams["figure.figsize"] = 4.0, 2.5 plt.rcParams["figure.subplot.bottom"] = 0.01 plt.rcParams["figure.subplot.left"] = 0.01 plt.rcParams["figure.subplot.right"] = 0.99 plt.rcParams["figure.subplot.top"] = 0.99 # plt.figure() plot_labelled_2d_data(X_tsne, labels) plt.legend(loc="best", ncol=2) # plt.xlim([-31, 34]) plt.ylim([-21, 39]) plt.yticks([]) plt.xticks([]) plt.savefig("cae_embeddings.pdf")
def __init__(self, system_str): for key, value in make_system_data(system_str).items(): setattr(self, key, value) self.x, self.u = self.x_eq_list[0], self.u_eq_list[0] A, B = linearize(self.dynamics, self.x, self.u) Q, R, S = quadratize(self.cost, self.x, self.u) self.K = get_gain(A, B, Q, R, S) self.fig, self.ax = setup_plot(system_str) self.draw_system = partial(draw_system_parametric, make_artist_props=self.make_artist_props) self.artists = self.draw_system(self.x, self.u, self.x, self.u, self.ax, artists=None)
def train_gp(D=1, data='xsinx', n_data=5): num_params = D + 3 # mean , 2 kernel params, noise params = 0.1 * rs.randn(num_params) X, y = build_toy_dataset(data, n_data) y = y.ravel() D = X, y[:, None] fig, ax = plotting.setup_plot() x_plot = np.reshape(np.linspace(-8, 8, 400), (400, 1)) pred_mean, pred_cov = predict(params, X, y, x_plot) # shapes [N_data], [N_data, N_data] std = np.sqrt(np.diag(pred_cov)) # shape [N_data] ax.plot(x_plot, pred_mean, 'b') p = sample_functions(params, X, y, x_plot, 3) # [nf, ] ax.plot(x_plot, p) ax.plot(X.ravel(), y.ravel(), '.') plt.show()
def train_bnn(data='expx', n_data=20, n_samples=5, arch=[1,20,20,1], prior_params=None, prior_type=None, act='rbf', iters=65, lr=0.07, plot=True, save=False): if type(data) == str: inputs, targets = build_toy_dataset() else: inputs, targets = data if plot: fig, ax = p.setup_plot() def loss(params, t): return vlb_objective(params, inputs, targets, arch, n_samples, act=act, prior_params=prior_params, prior_type=prior_type) def callback(params, t, g): plot_inputs = np.linspace(-8, 8, num=400)[:, None] f_bnn = sample_bnn(params, plot_inputs, 5, arch, act) # Plot data and functions. p.plot_iter(ax, inputs, plot_inputs, targets, f_bnn) print("ITER {} | LOSS {}".format(t, -loss(params, t))) if t > 50: D = inputs, targets x_plot = np.reshape(np.linspace(-8, 8, 400), (400, 1)) pred = sample_bnn(params, x_plot, 5, arch, act) p.plot_deciles(x_plot.ravel(), pred.T, D, str(t) + "bnnpostfullprior", plot="gpp") var_params = adam(grad(loss), init_var_params(arch), step_size=lr, num_iters=iters, callback=callback) D = inputs, targets x_plot = np.reshape(np.linspace(-8, 8, 400), (400, 1)) pred = sample_bnn(var_params, x_plot, 5, arch, act) p.plot_deciles(x_plot.ravel(), pred.T, D,"bnnpostfullprior", plot="gpp")
interval, columns=input_features) windows, x_train, x_test, y_train, y_test = pp.preprocess( data, sequence_length, test_fraction) if not model_from_disk: model = predictor.build_model([ len(input_features), sequence_length - 1, 100, len(input_features) ]) start = time.time() model.fit(x_train, y_train, batch_size=256, nb_epoch=epochs, validation_split=0.1) model.save('model.hdf5') logger.info('training took {} seconds'.format(time.time() - start)) else: model = predictor.load_model('model.hdf5') logger.info('Loaded model from disk') start = time.time() xs, ax, index = plt.setup_plot(windows, test_fraction) for ix, x in enumerate(x_test): prediction = (predictor.predict_next_point(model, x[None, :], feature_indices)) plt.update_plot(prediction, index + ix) plt.freeze_plot() logger.info('prediction took {} seconds'.format(time.time() - start))
n_data, n_data_test = 15, 200 n_functions = 100 nn_arch = [1, 20, 20, 1] _, num_weights = shapes_and_num(nn_arch) hyper_arch = [n_data, 30, 30, num_weights] act = 'sin' ker = 'per' save_name = '-' + str(n_data) + 'nf-' + str(n_functions) + "-" + act + ker xs, ys = sample_gps() # [nf, nd] print(xs.shape, ys.shape) plot = True if plot: fig, ax = setup_plot() def objective(params, t): return total_loss(params, xs, ys, nn_arch, act) int = np.random.randint(n_functions) y = ys[10] x = xs[None, 0] print(y.shape, x.shape) def callback(params, t, g): preds = bnn_predict(params, x, nn_arch, act)[:, :, 0] #[1,nd] #print(preds.shape) if plot: plot_iter(ax, x.ravel(), x.ravel(), y, preds[0]) print("ITER {} | OBJ {}".format(t, objective(params, t)))
def main(): # SupervisedBoWCNN embeddings pkl_fn = path.join( "..", "speech_nn", "models/train_bow_cnn/12597afba4/" "sigmoid_final_feedforward_dict.dev_queries_all.pkl") supervised_bow_cnn_embeds, labels = embeddings_from_pickle(pkl_fn) print("Embeddings shape:", supervised_bow_cnn_embeds.shape) print("Embeddings min:", np.min(supervised_bow_cnn_embeds)) print("Embeddings max:", np.max(supervised_bow_cnn_embeds)) print("Embeddings mean:", np.mean(supervised_bow_cnn_embeds)) # Perform t-SNE tsne = manifold.TSNE(n_components=2, perplexity=TSNE_PERPLEXITY, init="random", random_state=0) supervised_bow_cnn_X_tsne = tsne.fit_transform(supervised_bow_cnn_embeds) # Plot t-SNE plotting.setup_plot() plt.rcParams["figure.figsize"] = 5, 4.0 plt.rcParams["figure.subplot.bottom"] = 0.01 plt.rcParams["figure.subplot.left"] = 0.01 plt.rcParams["figure.subplot.right"] = 0.99 plt.rcParams["figure.subplot.top"] = 0.99 plt.figure() plot_labelled_2d_data(supervised_bow_cnn_X_tsne, labels) plt.legend(loc="best", ncol=2) plt.xlim([-31, 34]) plt.ylim([-45, 33]) plt.yticks([]) plt.xticks([]) plt.savefig("train_bow_cnn_12597afba4_embeddings.pdf") print() # VisionSpeechCNN embeddings pkl_fn = path.join( "..", "speech_nn", "models/train_visionspeech_cnn/18ba6618ad/" "sigmoid_final_feedforward_dict.dev_queries_all.pkl") vision_speech_cnn_embeds, labels = embeddings_from_pickle(pkl_fn) print("Embeddings shape:", vision_speech_cnn_embeds.shape) print("Embeddings min:", np.min(vision_speech_cnn_embeds)) print("Embeddings max:", np.max(vision_speech_cnn_embeds)) print("Embeddings mean:", np.mean(vision_speech_cnn_embeds)) # Perform t-SNE tsne = manifold.TSNE(n_components=2, perplexity=TSNE_PERPLEXITY, init="random", random_state=0) vision_speech_cnn_X_tsne = tsne.fit_transform(vision_speech_cnn_embeds) # Plot t-SNE plotting.setup_plot() plt.rcParams["figure.figsize"] = 5, 4.0 plt.rcParams["figure.subplot.bottom"] = 0.01 plt.rcParams["figure.subplot.left"] = 0.01 plt.rcParams["figure.subplot.right"] = 0.99 plt.rcParams["figure.subplot.top"] = 0.99 plt.figure() plot_labelled_2d_data(vision_speech_cnn_X_tsne, labels) plt.legend(loc="best", ncol=2) plt.xlim([-30, 28]) plt.ylim([-28, 26]) plt.yticks([]) plt.xticks([]) plt.savefig("train_visionspeech_cnn_18ba6618ad_embeddings.pdf")