def just_train_on_dataset_up_to_T(dat, exs, pybml_ho, sess, T): train_fd, valid_fd = utils.feed_dicts(dat, exs) # print('train_feed:', train_fd) # DEBUG sess.run(pybml_ho.outergradient.initialization) tr_acc, v_acc = [[] for _ in range(T)], [[] for _ in range(T)] for ex in exs: # ex.model.initialize(session=sess) for t in range(T): sess.run( ex.optimizers["apply_updates"], feed_dict={ex.x: train_fd[ex.x], ex.y: train_fd[ex.y]}, ) tr_acc[t].append( sess.run( ex.scores["accuracy"], feed_dict={ex.x: train_fd[ex.x], ex.y: train_fd[ex.y]}, ) ) v_acc[t].append( sess.run( ex.scores["accuracy"], feed_dict={ex.x: valid_fd[ex.x_], ex.y: valid_fd[ex.y_]}, ) ) return tr_acc, v_acc
def just_train_on_dataset(dat, exs, pybml_ho, sess, T): train_fd, valid_fd = feed_dicts(dat, exs) # print('train_feed:', train_fd) # DEBUG sess.run(pybml_ho.outergradient.initialization) tr_acc, v_acc = [], [] for ex in exs: [ sess.run(ex.optimizers['apply_updates'], feed_dict={ ex.x: train_fd[ex.x], ex.y: train_fd[ex.y] }) for _ in range(T) ] tr_acc.append( sess.run(ex.scores['accuracy'], feed_dict={ ex.x: train_fd[ex.x], ex.y: train_fd[ex.y] })) v_acc.append( sess.run(ex.scores['accuracy'], feed_dict={ ex.x: valid_fd[ex.x_], ex.y: valid_fd[ex.y_] })) return tr_acc, v_acc
def meta_train( exp_dir, metasets, exs, pybml_ho, saver, sess, n_test_episodes, MBS, seed, resume, T, n_meta_iterations, print_interval, save_interval, ): # use workers to fill the batches queues (is it worth it?) result_path = os.path.join(exp_dir, "results.pickle") tf.global_variables_initializer().run(session=sess) n_test_batches = n_test_episodes // MBS rand = dl.get_rand_state(seed) results = { "train_train": {"mean": [], "std": []}, "train_test": {"mean": [], "std": []}, "test_test": {"mean": [], "std": []}, "valid_test": {"mean": [], "std": []}, "outer_losses": {"mean": [], "std": []}, "learning_rate": [], "iterations": [], "episodes": [], "time": [], "alpha": [], } resume_itr = 0 if resume: model_file = tf.train.latest_checkpoint(exp_dir) if model_file: print("Restoring results from " + result_path) results = load_obj(result_path) ind1 = model_file.index("model") resume_itr = int(model_file[ind1 + 5 :]) + 1 print("Restoring model weights from " + model_file) saver.restore(sess, model_file) """ Meta-Train """ train_batches = BatchQueueMock(metasets.train, 1, MBS, rand) valid_batches = BatchQueueMock(metasets.validation, n_test_batches, MBS, rand) test_batches = BatchQueueMock(metasets.test, n_test_batches, MBS, rand) start_time = time.time() print( "\nIteration quantities: train_train acc, train_test acc, valid_test, acc" " test_test acc mean(std) over %d episodes" % n_test_episodes ) with sess.as_default(): inner_losses = [] for meta_it in range(resume_itr, n_meta_iterations): tr_fd, v_fd = utils.feed_dicts(train_batches.get_all_batches()[0], exs) pybml_ho.run(tr_fd, v_fd) duration = time.time() - start_time results["time"].append(duration) outer_losses = [] for _, ex in enumerate(exs): outer_losses.append( sess.run( ex.errors["validation"], boml.utils.merge_dicts(tr_fd, v_fd) ) ) outer_losses_moments = (np.mean(outer_losses), np.std(outer_losses)) results["outer_losses"]["mean"].append(outer_losses_moments[0]) results["outer_losses"]["std"].append(outer_losses_moments[1]) if meta_it % print_interval == 0 or meta_it == n_meta_iterations - 1: results["iterations"].append(meta_it) results["episodes"].append(meta_it * MBS) if "alpha" in pybml_ho.param_dict.keys(): alpha_moment = pybml_ho.param_dict["alpha"].eval() print("alpha_itr" + str(meta_it) + ": ", alpha_moment) results["alpha"].append(alpha_moment) if "s" in pybml_ho.param_dict.keys(): s = sess.run(["s:0"])[0] print("s: {}".format(s)) if "t" in pybml_ho.param_dict.keys(): t = sess.run(["t:0"])[0] print("t: {}".format(t)) train_result = accuracy_on(train_batches, exs, pybml_ho, sess, T) test_result = accuracy_on(test_batches, exs, pybml_ho, sess, T) valid_result = accuracy_on(valid_batches, exs, pybml_ho, sess, T) train_train = (np.mean(train_result[0]), np.std(train_result[0])) train_test = (np.mean(train_result[1]), np.std(train_result[1])) valid_test = (np.mean(valid_result[1]), np.std(valid_result[1])) test_test = (np.mean(test_result[1]), np.std(test_result[1])) results["train_train"]["mean"].append(train_train[0]) results["train_test"]["mean"].append(train_test[0]) results["valid_test"]["mean"].append(valid_test[0]) results["test_test"]["mean"].append(test_test[0]) results["train_train"]["std"].append(train_train[1]) results["train_test"]["std"].append(train_test[1]) results["valid_test"]["std"].append(valid_test[1]) results["test_test"]["std"].append(test_test[1]) results["inner_losses"] = inner_losses print("mean outer losses: {}".format(outer_losses_moments[0])) print( "it %d, ep %d (%.5fs): %.5f, %.5f, %.5f, %.5f" % ( meta_it, meta_it * MBS, duration, train_train[0], train_test[0], valid_test[0], test_test[0], ) ) lr = sess.run(["lr:0"])[0] print("lr: {}".format(lr)) # do_plot(logdir, results) if meta_it % save_interval == 0 or meta_it == n_meta_iterations - 1: saver.save(sess, exp_dir + "/model" + str(meta_it)) save_obj(result_path, results) start_time = time.time() return results
def meta_train(exp_dir, metasets, exs, pybml_ho, saver, sess, n_test_episodes, MBS, seed, resume, T, n_meta_iterations, print_interval, save_interval): # use workers to fill the batches queues (is it worth it?) result_path = os.path.join(exp_dir, 'results.pickle') tf.global_variables_initializer().run(session=sess) n_test_batches = n_test_episodes // MBS rand = dl.get_rand_state(seed) results = { 'train_train': { 'mean': [], 'std': [] }, 'train_test': { 'mean': [], 'std': [] }, 'test_test': { 'mean': [], 'std': [] }, 'valid_test': { 'mean': [], 'std': [] }, 'outer_losses': { 'mean': [], 'std': [] }, 'learning_rate': [], 'iterations': [], 'episodes': [], 'time': [], 'alpha': [] } resume_itr = 0 if resume: model_file = tf.train.latest_checkpoint(exp_dir) if model_file: print("Restoring results from " + result_path) results = load_obj(result_path) ind1 = model_file.index('model') resume_itr = int(model_file[ind1 + 5:]) + 1 print("Restoring model weights from " + model_file) saver.restore(sess, model_file) ''' Meta-Train ''' train_batches = BatchQueueMock(metasets.train, 1, MBS, rand) valid_batches = BatchQueueMock(metasets.validation, n_test_batches, MBS, rand) test_batches = BatchQueueMock(metasets.test, n_test_batches, MBS, rand) start_time = time.time() print( '\nIteration quantities: train_train acc, train_test acc, valid_test, acc' ' test_test acc mean(std) over %d episodes' % n_test_episodes) with sess.as_default(): inner_losses = [] for meta_it in range(resume_itr, n_meta_iterations): tr_fd, v_fd = feed_dicts(train_batches.get_all_batches()[0], exs) pybml_ho.run(tr_fd, v_fd) duration = time.time() - start_time results['time'].append(duration) outer_losses = [] for _, ex in enumerate(exs): outer_losses.append( sess.run(ex.errors['validation'], boml.utils.merge_dicts(tr_fd, v_fd))) outer_losses_moments = (np.mean(outer_losses), np.std(outer_losses)) results['outer_losses']['mean'].append(outer_losses_moments[0]) results['outer_losses']['std'].append(outer_losses_moments[1]) if meta_it % print_interval == 0 or meta_it == n_meta_iterations - 1: results['iterations'].append(meta_it) results['episodes'].append(meta_it * MBS) if 'alpha' in pybml_ho.param_dict.keys(): alpha_moment = pybml_ho.param_dict['alpha'].eval() print('alpha_itr' + str(meta_it) + ': ', alpha_moment) results['alpha'].append(alpha_moment) if 's' in pybml_ho.param_dict.keys(): s = sess.run(["s:0"])[0] print('s: {}'.format(s)) if 't' in pybml_ho.param_dict.keys(): t = sess.run(["t:0"])[0] print('t: {}'.format(t)) train_result = accuracy_on(train_batches, exs, pybml_ho, sess, T) test_result = accuracy_on(test_batches, exs, pybml_ho, sess, T) valid_result = accuracy_on(valid_batches, exs, pybml_ho, sess, T) train_train = (np.mean(train_result[0]), np.std(train_result[0])) train_test = (np.mean(train_result[1]), np.std(train_result[1])) valid_test = (np.mean(valid_result[1]), np.std(valid_result[1])) test_test = (np.mean(test_result[1]), np.std(test_result[1])) results['train_train']['mean'].append(train_train[0]) results['train_test']['mean'].append(train_test[0]) results['valid_test']['mean'].append(valid_test[0]) results['test_test']['mean'].append(test_test[0]) results['train_train']['std'].append(train_train[1]) results['train_test']['std'].append(train_test[1]) results['valid_test']['std'].append(valid_test[1]) results['test_test']['std'].append(test_test[1]) results['inner_losses'] = inner_losses print('mean outer losses: {}'.format(outer_losses_moments[0])) print('it %d, ep %d (%.5fs): %.5f, %.5f, %.5f, %.5f' % (meta_it, meta_it * MBS, duration, train_train[0], train_test[0], valid_test[0], test_test[0])) lr = sess.run(["lr:0"])[0] print('lr: {}'.format(lr)) # do_plot(logdir, results) if meta_it % save_interval == 0 or meta_it == n_meta_iterations - 1: saver.save(sess, exp_dir + '/model' + str(meta_it)) save_obj(result_path, results) start_time = time.time() return results