def test_baseline_nnlm_init(self): model = NNLM(ctx_size=2, vocab_size=4, embed_dim=10, h_dim=4, num_h=2, use_dropout=True, embed_dropout=True, drop_probability=0.1) print("RUN GRAPH:") for layer in tx.layers_to_list(model.run_outputs): print(layer.full_str()) print("=" * 60) print("TRAIN GRAPH:") for layer in tx.layers_to_list(model.train_outputs): print(layer.full_str()) print("=" * 60) print("EVAL GRAPH:") for layer in tx.layers_to_list(model.eval_outputs): print(layer.full_str()) print("=" * 60) runner = tx.ModelRunner(model) runner.log_graph("/tmp/")
def test_nce_nnlm(self): vocab_size = 1000 embed_size = 100 nce_samples = 10 model = NNLM(ctx_size=2, vocab_size=vocab_size, h_activation=tx.relu, embed_dim=embed_size, embed_share=True, num_h=1, h_dim=128, use_f_predict=True, use_dropout=True, drop_probability=0.75, embed_dropout=True, use_nce=False, nce_samples=nce_samples) # model.eval_tensors.append(model.train_loss_tensors[0]) runner = tx.ModelRunner(model) options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) # options = None runner.set_session(runtime_stats=True, run_options=options) runner.set_log_dir("/tmp/") runner.log_graph() runner.config_optimizer( tf.train.GradientDescentOptimizer(learning_rate=0.01), gradient_op=lambda grad: tf.clip_by_norm(grad, 1.0)) # runner.config_optimizer(tf.train.AdamOptimizer(learning_rate=0.005)) data = np.array([[0, 2], [5, 7], [9, 8], [3, 4], [1, 9]]) labels = np.array([[32], [56], [12], [2], [5]]) # data = np.array([[0, 2], [5, 7], [9, 8], [3, 4], [3, 2]]) # labels = np.array([[32], [56], [12], [2], [7]]) # data = np.array([[0, 2]]) # labels = np.array([[32]]) ppl_curve = [] for i in tqdm(range(3000)): res = runner.train(data, labels, output_loss=True) # print(res) # print(res) if i % 5 == 0: # print(res) res = runner.eval(data, labels) print(res) ppl_curve.append(np.exp(res)) ppl = sns.lineplot(x=np.array(list(range(len(ppl_curve)))), y=np.array(ppl_curve)) print(ppl_curve) plt.show()
def run(progress=False, **kwargs): arg_dict.from_dict(kwargs) args = arg_dict.to_namespace() # ====================================================================================== # Load Params, Prepare results assets # ====================================================================================== # Experiment parameter summary res_param_filename = os.path.join(args.out_dir, "params_{id}.csv".format(id=args.run_id)) with open(res_param_filename, "w") as param_file: writer = csv.DictWriter(f=param_file, fieldnames=arg_dict.keys()) writer.writeheader() writer.writerow(arg_dict) param_file.flush() # make dir for model checkpoints if args.save_model: model_ckpt_dir = os.path.join(args.out_dir, "model_{id}".format(id=args.run_id)) os.makedirs(model_ckpt_dir, exist_ok=True) model_path = os.path.join(model_ckpt_dir, "nnlm_{id}.ckpt".format(id=args.run_id)) # start perplexity file ppl_header = ["id", "run", "epoch", "step", "lr", "dataset", "perplexity"] ppl_fname = os.path.join(args.out_dir, "perplexity_{id}.csv".format(id=args.run_id)) ppl_file = open(ppl_fname, "w") ppl_writer = csv.DictWriter(f=ppl_file, fieldnames=ppl_header) ppl_writer.writeheader() # ====================================================================================== # Load Corpus & Vocab # ====================================================================================== corpus = h5py.File(os.path.join(args.corpus, "ptb_{}.hdf5".format(args.ngram_size)), mode='r') vocab = marisa_trie.Trie(corpus["vocabulary"]) def data_pipeline(data, epochs=1, batch_size=args.batch_size, shuffle=False): def chunk_fn(x): return chunk_it(x, chunk_size=batch_size * 1000) if epochs > 1: data = repeat_apply(chunk_fn, data, epochs) else: data = chunk_fn(data) if shuffle: data = shuffle_it(data, args.shuffle_buffer_size) data = batch_it(data, size=batch_size, padding=False) return data # ====================================================================================== # MODEL # ====================================================================================== # Activation functions if args.h_act == "relu": h_act = tx.relu h_init = tx.he_normal_init() elif args.h_act == "tanh": h_act = tx.tanh h_init = tx.glorot_uniform() elif args.h_act == "elu": h_act = tx.elu h_init = tx.he_normal_init() # Parameter Init if args.embed_init == "normal": embed_init = tx.random_normal(mean=0., stddev=args.embed_init_val) elif args.embed_init == "uniform": embed_init = tx.random_uniform(minval=-args.embed_init_val, maxval=args.embed_init_val) if args.logit_init == "normal": logit_init = tx.random_normal(mean=0., stddev=args.logit_init_val) elif args.logit_init == "uniform": logit_init = tx.random_uniform(minval=-args.logit_init_val, maxval=args.logit_init_val) f_init = None if args.use_f_predict: if args.f_init == "normal": f_init = tx.random_normal(mean=0., stddev=args.f_init_val) elif args.f_init == "uniform": f_init = tx.random_uniform(minval=-args.f_init_val, maxval=args.f_init_val) model = NNLM(ctx_size=args.ngram_size - 1, vocab_size=len(vocab), embed_dim=args.embed_dim, embed_init=embed_init, embed_share=args.embed_share, logit_init=logit_init, h_dim=args.h_dim, num_h=args.num_h, h_activation=h_act, h_init=h_init, use_dropout=args.dropout, drop_probability=args.keep_prob, embed_dropout=args.embed_dropout, l2_loss=args.l2_loss, l2_weight=args.l2_loss_coef, use_f_predict=args.use_f_predict, f_init=f_init, logit_bias=args.logit_bias) model_runner = tx.ModelRunner(model) # we use an InputParam because we might want to change it during training lr_param = tx.InputParam(value=args.lr) if args.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer( learning_rate=lr_param.tensor) elif args.optimizer == "adam": optimizer = tf.train.AdamOptimizer(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) elif args.optimizer == "ams": optimizer = tx.AMSGrad(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) def clip_grad_global(grads): grads, _ = tf.clip_by_global_norm(grads, 12) return grads def clip_grad_local(grad): return tf.clip_by_norm(grad, args.clip_value) if args.clip_grads: if args.clip_local: clip_fn = clip_grad_local else: clip_fn = clip_grad_global if args.clip_grads: model_runner.config_optimizer(optimizer, optimizer_params=lr_param, gradient_op=clip_fn, global_gradient_op=not args.clip_local) else: model_runner.config_optimizer(optimizer, optimizer_params=lr_param) # ====================================================================================== # EVALUATION # ====================================================================================== def eval_model(runner, dataset_it, len_dataset=None, display_progress=False): if display_progress: pb = tqdm(total=len_dataset, ncols=60) batches_processed = 0 sum_loss = 0 for batch in dataset_it: batch = np.array(batch, dtype=np.int64) ctx = batch[:, :-1] target = batch[:, -1:] mean_loss = runner.eval(ctx, target) sum_loss += mean_loss if display_progress: pb.update(args.batch_size) batches_processed += 1 if display_progress: pb.close() return np.exp(sum_loss / batches_processed) def evaluation(runner: tx.ModelRunner, pb, cur_epoch, step, display_progress=False): ##pb.write("[Eval Validation Set]") val_data = corpus["validation"] ppl_validation = eval_model( runner, data_pipeline(val_data, epochs=1, shuffle=False), len(val_data), display_progress) res_row = { "id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "validation", "perplexity": ppl_validation } ppl_writer.writerow(res_row) if args.eval_test: # pb.write("[Eval Test Set]") test_data = corpus["test"] ppl_test = eval_model( runner, data_pipeline(test_data, epochs=1, shuffle=False), len(test_data), display_progress) res_row = { "id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "test", "perplexity": ppl_test } ppl_writer.writerow(res_row) ppl_file.flush() if args.eval_test: pb.write("test. ppl = {}".format(ppl_test)) # pb.write("valid. ppl = {}".format(ppl_validation)) return ppl_validation # ====================================================================================== # TRAINING LOOP # ====================================================================================== print("starting TF") # preparing evaluation steps # I use ceil because I make sure we have padded batches at the end epoch_step = 0 global_step = 0 current_epoch = 0 patience = 0 cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) model_runner.set_session(sess) model_runner.init_vars() training_dset = corpus["training"] progress = tqdm(total=len(training_dset) * args.epochs, position=1) training_data = data_pipeline(training_dset, epochs=args.epochs, shuffle=True) evals = [] try: for ngram_batch in training_data: epoch = progress.n // len(training_dset) + 1 # Start New Epoch if epoch != current_epoch: current_epoch = epoch epoch_step = 0 if progress: progress.write("epoch: {}".format(current_epoch)) # Eval Time if epoch_step == 0: current_eval = evaluation(model_runner, progress, epoch, global_step) evals.append(current_eval) if global_step > 0: if args.early_stop: if evals[-2] - evals[-1] < args.eval_threshold: if patience >= 3: break patience += 1 else: # restart patience and adjust lr patience = 0 # lr decay only at the start of each epoch if args.lr_decay and len(evals) > 0: if evals[-2] - evals[-1] < args.eval_threshold: lr_param.value = max( lr_param.value * args.lr_decay_rate, args.lr_decay_threshold) if progress: progress.write("lr decreased to {}".format( lr_param.value)) # ================================================ # TRAIN MODEL # ================================================ ngram_batch = np.array(ngram_batch, dtype=np.int64) ctx_ids = ngram_batch[:, :-1] word_ids = ngram_batch[:, -1:] model_runner.train(ctx_ids, word_ids) progress.update(args.batch_size) epoch_step += 1 global_step += 1 # if not early stop, evaluate last state of the model if not args.early_stop or patience < 3: evaluation(model_runner, progress, epoch, epoch_step) ppl_file.close() if args.save_model: model_runner.save_model(model_name=model_path, step=global_step, write_state=False) model_runner.close_session() progress.close() tf.reset_default_graph() except Exception as e: traceback.print_exc() os.remove(ppl_file.name) os.remove(param_file.name) raise e
def run(**kwargs): arg_dict.from_dict(kwargs) args = arg_dict.to_namespace() # ====================================================================================== # Load Corpus & Vocab # ====================================================================================== corpus = PTBReader(path=args.corpus, mark_eos=args.mark_eos) corpus_stats = h5py.File(os.path.join(args.corpus, "ptb_stats.hdf5"), mode='r') vocab = marisa_trie.Trie(corpus_stats["vocabulary"]) to_ngrams_batch = partial(to_ngrams, vocab=vocab, ngram_size=args.ngram_size, batch_size=args.batch_size, epochs=1, shuffle=False, shuffle_buffer_size=args.shuffle_buffer_size, enum_epoch=False) training_len = sum(1 for _ in to_ngrams_batch(corpus.training_set, batch_size=1)) validation_len = None test_len = None if args.eval_progress: validation_len = sum(1 for _ in to_ngrams_batch(corpus.validation_set, batch_size=1)) test_len = sum(1 for _ in to_ngrams_batch(corpus.test_set, batch_size=1)) # ====================================================================================== # Load Params, Prepare results assets # ====================================================================================== # Experiment parameter summary res_param_filename = os.path.join(args.out_dir, "params_{id}_{run}.csv".format(id=args.id, run=args.run)) with open(res_param_filename, "w") as param_file: writer = csv.DictWriter(f=param_file, fieldnames=arg_dict.keys()) writer.writeheader() writer.writerow(arg_dict) param_file.flush() # make dir for model checkpoints if args.save_model: model_ckpt_dir = os.path.join(args.out_dir, "model_{id}_{run}".format(id=args.id, run=args.run)) os.makedirs(model_ckpt_dir, exist_ok=True) model_path = os.path.join(model_ckpt_dir, "nnlm_{id}_{run}.ckpt".format(id=args.id, run=args.run)) # start perplexity file ppl_header = ["id", "run", "epoch", "step", "lr", "dataset", "perplexity"] ppl_filename = os.path.join(args.out_dir, "perplexity_{id}_{run}.csv".format(id=args.id, run=args.run)) ppl_file = open(ppl_filename, "w") ppl_writer = csv.DictWriter(f=ppl_file, fieldnames=ppl_header) ppl_writer.writeheader() # ====================================================================================== # MODEL # ====================================================================================== # Configure weight initializers based on activation functions if args.h_act == "relu": h_act = tx.relu h_init = tx.he_normal_init() elif args.h_act == "tanh": h_act = tx.tanh h_init = tx.glorot_uniform() elif args.h_act == "elu": h_act = tx.elu h_init = tx.he_normal_init() elif args.h_act == "selu": h_act = tf.nn.selu h_init = tx.glorot_uniform() # Configure embedding and logit weight initializers if args.embed_init == "normal": embed_init = tx.random_normal(mean=0., stddev=args.embed_init_val) elif args.embed_init == "uniform": embed_init = tx.random_uniform(minval=-args.embed_init_val, maxval=args.embed_init_val) if args.logit_init == "normal": logit_init = tx.random_normal(mean=0., stddev=args.logit_init_val) elif args.logit_init == "uniform": logit_init = tx.random_uniform(minval=-args.logit_init_val, maxval=args.logit_init_val) f_init = None if args.use_f_predict: if args.f_init == "normal": f_init = tx.random_normal(mean=0., stddev=args.f_init_val) elif args.f_init == "uniform": f_init = tx.random_uniform(minval=-args.f_init_val, maxval=args.f_init_val) inputs = tx.Input(args.ngram_size - 1, dtype=tf.int64, name="ctx_inputs") labels = tx.Input(1, dtype=tf.int64, name="ctx_inputs") model = NNLM(inputs=inputs, label_inputs=labels, vocab_size=len(vocab), embed_dim=args.embed_dim, embed_init=embed_init, embed_share=args.embed_share, logit_init=logit_init, h_dim=args.h_dim, num_h=args.num_h, h_activation=h_act, h_init=h_init, use_dropout=args.dropout, drop_probability=args.drop_probability, embed_dropout=args.embed_dropout, l2_loss=args.l2_loss, l2_weight=args.l2_loss_coef, use_f_predict=args.use_f_predict, f_init=f_init, logit_bias=args.logit_bias, use_nce=False) # Input params can be changed during training by setting their value # lr_param = tx.InputParam(init_value=args.lr) lr_param = tensorx.train.EvalStepDecayParam(value=args.lr, improvement_threshold=args.eval_threshold, less_is_better=True, decay_rate=args.lr_decay_rate, decay_threshold=args.lr_decay_threshold) if args.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr_param.tensor) elif args.optimizer == "adam": optimizer = tf.train.AdamOptimizer(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) elif args.optimizer == "ams": optimizer = tx.AMSGrad(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) def clip_grad_global(grads): grads, _ = tf.clip_by_global_norm(grads, 12) return grads def clip_grad_local(grad): return tf.clip_by_norm(grad, args.clip_value) if args.clip_grads: if args.clip_local: clip_fn = clip_grad_local else: clip_fn = clip_grad_global if args.clip_grads: model.config_optimizer(optimizer, optimizer_params=lr_param, gradient_op=clip_fn, global_gradient_op=not args.clip_local) else: model.config_optimizer(optimizer, optimizer_params=lr_param) # ====================================================================================== # EVALUATION # ====================================================================================== def eval_model(model, dataset_it, len_dataset=None, display_progress=False): if display_progress: pb = tqdm(total=len_dataset, ncols=60, position=1) batches_processed = 0 sum_loss = 0 for batch in dataset_it: batch = np.array(batch, dtype=np.int64) ctx = batch[:, :-1] target = batch[:, -1:] mean_loss = model.eval({inputs: ctx, labels: target}) sum_loss += mean_loss if display_progress: pb.update(args.batch_size) batches_processed += 1 if display_progress: pb.close() return np.exp(sum_loss / batches_processed) def evaluation(model: tx.Model, progress_bar, cur_epoch, step, display_progress=False): ppl_validation = eval_model(model, to_ngrams_batch(corpus.validation_set), validation_len, display_progress) res_row = {"id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "validation", "perplexity": ppl_validation} ppl_writer.writerow(res_row) if args.eval_test: # pb.write("[Eval Test Set]") ppl_test = eval_model(model, to_ngrams(corpus.test_set), test_len, display_progress) res_row = {"id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "test", "perplexity": ppl_test} ppl_writer.writerow(res_row) ppl_file.flush() if args.eval_test: progress_bar.set_postfix({"test PPL ": ppl_test}) # pb.write("valid. ppl = {}".format(ppl_validation)) return ppl_validation # ====================================================================================== # TRAINING LOOP # ====================================================================================== # print("Starting TensorFlow Session") # preparing evaluation steps # I use ceil because I make sure we have padded batches at the end epoch_step = 0 global_step = 0 current_epoch = 0 patience = 0 cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) model.set_session(sess) model.init_vars() progress = tqdm(total=training_len * args.epochs, position=args.pid + 1, disable=not args.display_progress) training_data = to_ngrams_batch(corpus.training_set, epochs=args.epochs, shuffle=args.shuffle, enum_epoch=True) evaluations = [] try: for i, ngram_batch in training_data: epoch = i + 1 # Start New Epoch if epoch != current_epoch: current_epoch = epoch epoch_step = 0 if args.display_progress: progress.set_postfix({"epoch": current_epoch}) # ================================================ # EVALUATION # ================================================ if epoch_step == 0: current_eval = evaluation(model, progress, epoch, global_step, display_progress=args.eval_progress) evaluations.append(current_eval) lr_param.update(current_eval) # print(lr_param.eval_history) # print("improvement ", lr_param.eval_improvement()) if global_step > 0: if args.early_stop and epoch > 1: if lr_param.eval_improvement() < lr_param.improvement_threshold: if patience >= 3: break patience += 1 else: patience = 0 # ================================================ # TRAIN MODEL # ================================================ ngram_batch = np.array(ngram_batch, dtype=np.int64) ctx_ids = ngram_batch[:, :-1] word_ids = ngram_batch[:, -1:] model.train({inputs: ctx_ids, labels: word_ids}) progress.update(args.batch_size) epoch_step += 1 global_step += 1 # if not early stop, evaluate last state of the model if not args.early_stop or patience < 3: current_eval = evaluation(model, progress, epoch, epoch_step) evaluations.append(current_eval) ppl_file.close() if args.save_model: model.save_model(model_name=model_path, step=global_step, write_state=False) model.close_session() progress.close() tf.reset_default_graph() # return the best validation evaluation return min(evaluations) except Exception as e: traceback.print_exc() os.remove(ppl_file.name) os.remove(param_file.name) raise e
if args.use_f_predict: if args.f_init == "normal": f_init = tx.random_normal(mean=0., stddev=args.f_init_val) elif args.f_init == "uniform": f_init = tx.random_uniform(minval=-args.f_init_val, maxval=args.f_init_val) model = NNLM(ctx_size=args.ngram_size - 1, vocab_size=len(vocab), embed_dim=args.embed_dim, embed_init=embed_init, embed_share=args.embed_share, logit_init=logit_init, h_dim=args.h_dim, num_h=args.num_h, h_activation=h_act, h_init=h_init, use_dropout=args.dropout, drop_probability=args.keep_prob, embed_dropout=args.embed_dropout, l2_loss=args.l2_loss, l2_weight=args.l2_loss_coef, use_f_predict=args.use_f_predict, f_init=f_init) model_runner = tx.ModelRunner(model) # we use an InputParam because we might want to change it during training lr_param = tx.InputParam(value=args.lr) if args.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer(
def test_nnlm(self): vocab_size = 4 ctx_size = 22 batch_size = 2 embed_dim = 512 h_dim = 128 inputs = tx.Input(ctx_size, dtype=tf.int64, name="ctx_inputs") labels = tx.Input(1, dtype=tf.int64, name="ctx_inputs") model = NNLM(inputs=inputs, label_inputs=labels, vocab_size=vocab_size, embed_dim=embed_dim, embed_share=True, use_f_predict=True, h_dim=h_dim, use_dropout=False, drop_probability=0.9, embed_dropout=False, use_nce=True, nce_samples=2) model.set_session(runtime_stats=True) model.set_log_dir("/tmp/") model.log_graph() options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) # options = None model.set_session(runtime_stats=True, run_options=options) model.config_optimizer( tf.train.AdamOptimizer(learning_rate=0.005), gradient_op=lambda grad: tf.clip_by_norm(grad, 4.0)) input_data = np.random.randint(0, vocab_size, [batch_size, ctx_size]) label_data = np.random.randint(0, vocab_size, [batch_size, 1]) with self.cached_session(): for _ in tqdm(range(1)): eval1 = model.eval({inputs: input_data, labels: label_data}) eval2 = model.eval({inputs: input_data, labels: label_data}) result = model.train({ inputs: input_data, labels: label_data }, write_summaries=True) # print(list(map(str,model.train_graph.output_layers))) self.assertArrayEqual(eval1, eval2) self.assertArrayNotEqual(result, eval2)