def __init__(self, layer, n_units, previous_state=None, activation=tx.tanh, use_bias=True, init=tx.glorot_uniform(), recurrent_init=tx.glorot_uniform(), share_state_with=None, name="rnn_cell"): self.activation = activation self.use_bias = use_bias self.init = init self.recurrent_init = recurrent_init super().__init__(layer, n_units, [layer.n_units, n_units], tf.float32, name) if previous_state is not None: if previous_state.n_units != self.n_units: raise ValueError( "previous state n_units ({}) != current n_units ({})". format(previous_state.n_units, self.n_units)) self.previous_state = previous_state if share_state_with is not None and not isinstance( share_state_with, RNNCell): raise TypeError( "shared_gate must be of type {} got {} instead".format( RNNCell, type(share_state_with))) self.share_state_with = share_state_with self.tensor = self._build_graph(layer, previous_state)
data = shuffle_it(data, args.shuffle_buffer_size) data = batch_it(data, size=batch_size, padding=False) return data # ====================================================================================== # MODEL # ====================================================================================== # Activation functions if args.h_act == "relu": h_act = tx.relu h_init = tx.he_normal_init() elif args.h_act == "tanh": h_act = tx.tanh h_init = tx.glorot_uniform() elif args.h_act == "elu": h_act = tx.elu h_init = tx.he_normal_init() # Parameter Init if args.embed_init == "normal": embed_init = tx.random_normal(mean=0., stddev=args.embed_init_val) elif args.embed_init == "uniform": embed_init = tx.random_uniform(minval=-args.embed_init_val, maxval=args.embed_init_val) if args.logit_init == "normal": logit_init = tx.random_normal(mean=0., stddev=args.logit_init_val) elif args.logit_init == "uniform": logit_init = tx.random_uniform(minval=-args.logit_init_val,
def run(**kwargs): arg_dict.from_dict(kwargs) args = arg_dict.to_namespace() # ====================================================================================== # Load Params, Prepare results assets # ====================================================================================== # os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) # print(args.corpus) # Experiment parameter summary res_param_filename = os.path.join(args.out_dir, "params_{id}.csv".format(id=args.run_id)) with open(res_param_filename, "w") as param_file: writer = csv.DictWriter(f=param_file, fieldnames=arg_dict.keys()) writer.writeheader() writer.writerow(arg_dict) param_file.flush() # make dir for model checkpoints if args.save_model: model_ckpt_dir = os.path.join(args.out_dir, "model_{id}".format(id=args.run_id)) os.makedirs(model_ckpt_dir, exist_ok=True) model_path = os.path.join(model_ckpt_dir, "nnlm_{id}.ckpt".format(id=args.run_id)) # start perplexity file ppl_header = ["id", "run", "epoch", "step", "lr", "dataset", "perplexity"] ppl_fname = os.path.join(args.out_dir, "perplexity_{id}.csv".format(id=args.run_id)) ppl_file = open(ppl_fname, "w") ppl_writer = csv.DictWriter(f=ppl_file, fieldnames=ppl_header) ppl_writer.writeheader() # ====================================================================================== # CORPUS, Vocab and RIs # ====================================================================================== corpus = h5py.File(os.path.join(args.corpus, "ptb_{}.hdf5".format(args.ngram_size)), mode='r') vocab = marisa_trie.Trie(corpus["vocabulary"]) # generates k-dimensional random indexes with s_active units all_positive = args.ri_all_positive ri_generator = Generator(dim=args.k_dim, num_active=args.s_active, symmetric=not all_positive) # pre-gen indices for vocab # it doesn't matter which ri gets assign to which word since we are pre-generating the indexes ris = [ri_generator.generate() for i in range(len(vocab))] ri_tensor = ris_to_sp_tensor_value(ris, dim=args.k_dim) # ri_tensor = RandomIndexTensor.from_ri_list(ris, args.k_dim, args.s_active) # ====================================================================================== def data_pipeline(data, epochs=1, batch_size=args.batch_size, shuffle=False): def chunk_fn(x): return chunk_it(x, chunk_size=batch_size * 1000) if epochs > 1: data = repeat_apply(chunk_fn, data, epochs) else: data = chunk_fn(data) if shuffle: data = shuffle_it(data, args.shuffle_buffer_size) data = batch_it(data, size=batch_size, padding=False) return data # ====================================================================================== # MODEL # ====================================================================================== # Activation functions if args.h_act == "relu": h_act = tx.relu h_init = tx.he_normal_init() elif args.h_act == "tanh": h_act = tx.tanh h_init = tx.glorot_uniform() elif args.h_act == "elu": h_act = tx.elu h_init = tx.he_normal_init() # Parameter Init if args.embed_init == "normal": embed_init = tx.random_normal(mean=0., stddev=args.embed_init_val) elif args.embed_init == "uniform": embed_init = tx.random_uniform(minval=-args.embed_init_val, maxval=args.embed_init_val) if args.logit_init == "normal": logit_init = tx.random_normal(mean=0., stddev=args.logit_init_val) elif args.logit_init == "uniform": logit_init = tx.random_uniform(minval=-args.logit_init_val, maxval=args.logit_init_val) if args.f_init == "normal": f_init = tx.random_normal(mean=0., stddev=args.f_init_val) elif args.f_init == "uniform": f_init = tx.random_uniform(minval=-args.f_init_val, maxval=args.f_init_val) # sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, # log_device_placement=True)) # with tf.device('/gpu:{}'.format(args.gpu)): model = NNLM_NRP(ctx_size=args.ngram_size - 1, vocab_size=len(vocab), k_dim=args.k_dim, s_active=args.s_active, ri_tensor=ri_tensor, embed_dim=args.embed_dim, embed_init=embed_init, embed_share=args.embed_share, logit_init=logit_init, logit_bias=args.logit_bias, h_dim=args.h_dim, num_h=args.num_h, h_activation=h_act, h_init=h_init, use_dropout=args.dropout, keep_prob=args.keep_prob, embed_dropout=args.embed_dropout, l2_loss=args.l2_loss, l2_loss_coef=args.l2_loss_coef, f_init=f_init) model_runner = tx.ModelRunner(model) # sess = tf.Session(config=tf.ConfigProto( # allow_soft_placement=True, log_device_placement=True)) # model_runner.set_session(sess) # sess = tf.Session(config=tf.ConfigProto( # allow_soft_placement=True, log_device_placement=True)) # model_runner.set_session(sess) # we use an InputParam because we might want to change it during training lr_param = tx.InputParam(value=args.lr) if args.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer( learning_rate=lr_param.tensor) elif args.optimizer == "adam": optimizer = tf.train.AdamOptimizer(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) elif args.optimizer == "ams": optimizer = tx.AMSGrad(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) def clip_grad_global(grads): grads, _ = tf.clip_by_global_norm(grads, 12) return grads def clip_grad_local(grad): return tf.clip_by_norm(grad, args.clip_value) if args.clip_grads: if args.clip_local: clip_fn = clip_grad_local else: clip_fn = clip_grad_global if args.clip_grads: model_runner.config_optimizer(optimizer, optimizer_params=lr_param, gradient_op=clip_fn, global_gradient_op=not args.clip_local) else: model_runner.config_optimizer(optimizer, optimizer_params=lr_param) # assert(model_runner.session == sess) # ====================================================================================== # EVALUATION # ====================================================================================== def eval_model(runner, dataset_it, len_dataset=None, display_progress=False): if display_progress: pb = tqdm(total=len_dataset, ncols=60) batches_processed = 0 sum_loss = 0 for batch in dataset_it: batch = np.array(batch, dtype=np.int64) ctx = batch[:, :-1] target = batch[:, -1:] mean_loss = runner.eval(ctx, target) sum_loss += mean_loss if display_progress: pb.update(args.batch_size) batches_processed += 1 if display_progress: pb.close() return np.exp(sum_loss / batches_processed) def evaluation(runner: tx.ModelRunner, pb, cur_epoch, step, display_progress=False): pb.write("[Eval Validation]") val_data = corpus["validation"] ppl_validation = eval_model( runner, data_pipeline(val_data, epochs=1, shuffle=False), len(val_data), display_progress) res_row = { "id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "validation", "perplexity": ppl_validation } ppl_writer.writerow(res_row) pb.write("Eval Test") test_data = corpus["test"] ppl_test = eval_model( runner, data_pipeline(test_data, epochs=1, shuffle=False), len(test_data), display_progress) res_row = { "id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "test", "perplexity": ppl_test } ppl_writer.writerow(res_row) ppl_file.flush() pb.write("valid. ppl = {} \n test ppl {}".format( ppl_validation, ppl_test)) return ppl_validation # ====================================================================================== # TRAINING LOOP # ====================================================================================== # preparing evaluation steps # I use ceil because I make sure we have padded batches at the end epoch_step = 0 global_step = 0 current_epoch = 0 patience = 0 cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) model_runner.set_session(sess) model_runner.init_vars() training_dset = corpus["training"] progress = tqdm(total=len(training_dset) * args.epochs) training_data = data_pipeline(training_dset, epochs=args.epochs, shuffle=True) evals = [] try: for ngram_batch in training_data: epoch = progress.n // len(training_dset) + 1 # Start New Epoch if epoch != current_epoch: current_epoch = epoch epoch_step = 0 progress.write("epoch: {}".format(current_epoch)) # Eval Time if epoch_step == 0: current_eval = evaluation(model_runner, progress, epoch, global_step) evals.append(current_eval) if global_step > 0: if args.early_stop: if evals[-2] - evals[-1] < args.eval_threshold: if patience >= 3: progress.write("early stop") break patience += 1 else: patience = 0 # lr decay only at the start of each epoch if args.lr_decay and len(evals) > 0: if evals[-2] - evals[-1] < args.eval_threshold: lr_param.value = max( lr_param.value * args.lr_decay_rate, args.lr_decay_threshold) progress.write("lr changed to {}".format( lr_param.value)) # ================================================ # TRAIN MODEL # ================================================ ngram_batch = np.array(ngram_batch, dtype=np.int64) ctx_ids = ngram_batch[:, :-1] word_ids = ngram_batch[:, -1:] model_runner.train(ctx_ids, word_ids) progress.update(args.batch_size) epoch_step += 1 global_step += 1 # if not early stop, evaluate last state of the model if not args.early_stop or patience < 3: evaluation(model_runner, progress, epoch, epoch_step) ppl_file.close() if args.save_model: model_runner.save_model(model_name=model_path, step=global_step, write_state=False) model_runner.close_session() progress.close() tf.reset_default_graph() except Exception as e: traceback.print_exc() os.remove(ppl_file.name) os.remove(param_file.name) raise e
def __init__(self, inputs, labels, vocab_size, embed_dim, h_dim, embed_init=tx.zeros_init(), logit_init=tx.glorot_uniform(), num_h=1, h_activation=tx.tanh, h_init=tx.glorot_uniform(), w_dropconnect=None, u_dropconnect=None, r_dropout=0.4, y_dropout=0.4, embed_dropout=0.3, other_dropout=0.3, l2_loss=False, l2_weight=1e-5, use_f_predict=False, f_init=tx.random_uniform(minval=-0.01, maxval=0.01), embed_share=False, logit_bias=False, use_nce=False, nce_samples=10, skip_connections=False): if not isinstance(inputs, tx.Input): raise TypeError("inputs must be an Input layer") self.inputs = inputs self.labels = labels if not isinstance(labels, tx.Input): raise TypeError("labels must be an Input layer") if inputs.dtype != tf.int32 and inputs.dtype != tf.int64: raise TypeError( "Invalid dtype for input: expected int32 or int64, got {}". format(inputs.dtype)) if num_h < 0: raise ValueError("num hidden should be >= 0") # =============================================== # RUN GRAPH # =============================================== var_reg = [] with tf.name_scope("run"): # feature lookup embeddings = tx.Lookup(inputs, seq_size=None, lookup_shape=[vocab_size, embed_dim], weight_init=embed_init) var_reg.append(embeddings.weights) feature_lookup = embeddings.permute_batch_time() last_layer = feature_lookup cell_proto = tx.LSTMCell.proto( n_units=h_dim, activation=h_activation, gate_activation=tx.hard_sigmoid, w_init=h_init, u_init=h_init, w_dropconnect=w_dropconnect, u_dropconnect=u_dropconnect, r_dropout=r_dropout, x_dropout=None, y_dropout=y_dropout, regularized=False, name="cell", ) lstm_layers = [] for i in range(num_h): lstm_layer = tx.RNN(last_layer, cell_proto=cell_proto, regularized=False, stateful=True, name="LSTM_{}".format(i + 1)) lstm_layers.append(lstm_layer) var_reg += [wi.weights for wi in lstm_layer.cell.w] var_reg += [ui.weights for ui in lstm_layer.cell.u] last_layer = lstm_layer # last time step is the state used to make the prediction # last_layer = tx.Reshape(last_layer, [-1, h_dim]) # TODO this is not consistent with locked dropout for the last layer # where the same mask should be applied across time steps # to do this I need either y_dropout to be available or some sort of map # operation I can use with layers outputting 3D tensors # something equivalent to https://keras.io/layers/wrappers/ which applies # a layer to every temporal slice of an input. They implement this the same way # they implement an RNN # feature prediction for Energy-Based Model if use_f_predict: last_layer = tx.Linear(last_layer, embed_dim, f_init, add_bias=True, name="f_predict") # proto = tx.GRUCell.proto(n_units=embed_dim, # activation=h_activation, # gate_activation=tx.hard_sigmoid, # w_init=h_init, # u_init=h_init, # w_dropconnect=w_dropconnect, # u_dropconnect=u_dropconnect, # r_dropout=r_dropout, # x_dropout=None, # y_dropout=y_dropout, # regularized=False) # last_layer1 = tx.RNN(last_layer, cell_proto=proto, regularized=False, stateful=False) # last_layer2 = last_layer1.reuse_with(last_layer, reverse=True) # last_layer = tx.Add(last_layer1, last_layer2) # last_layer = tx.Module(last_layer, last_layer) var_reg += last_layer.variables # var_reg.append(last_layer.weights) f_predict = last_layer shared_weights = feature_lookup.weights if embed_share else None transpose_weights = embed_share logit_init = logit_init if not embed_share else None run_logits = tx.Linear(last_layer, n_units=vocab_size, weight_init=logit_init, shared_weights=shared_weights, transpose_weights=transpose_weights, add_bias=logit_bias, name="logits") if not embed_share: var_reg.append(run_logits.weights) run_output = tx.Activation(run_logits, tx.softmax, name="run_output") # =============================================== # TRAIN GRAPH # =============================================== with tf.name_scope("train"): embeddings = embeddings.reuse_with(inputs) feature_lookup = embeddings.permute_batch_time() if embed_dropout: feature_lookup = tx.Dropout(feature_lookup, probability=embed_dropout, name="drop_features") last_layer = feature_lookup for i in range(num_h): lstm_layer = lstm_layers[i].reuse_with(last_layer, regularized=True) last_layer = lstm_layer # last_layer = tx.Reshape(last_layer, [-1, h_dim]) # feature prediction for Energy-Based Model if use_f_predict: # last_layer = f_predict.reuse_with(last_layer) last_layer = f_predict.reuse_with(last_layer, regularized=True) last_layer = tx.Dropout(last_layer, probability=other_dropout, locked=False) train_logits = run_logits.reuse_with(last_layer, name="train_logits") train_output = tx.Activation(train_logits, tx.softmax, name="run_output") def categorical_loss(labels, logits): # labels come as a batch of classes [[1,2],[3,4]] -> [1,3,2,4] time steps are ordered to match logits labels = tx.Transpose(labels) labels = tx.Reshape(labels, [-1]) labels = tx.dense_one_hot(labels, num_cols=vocab_size) loss = tx.categorical_cross_entropy(labels=labels, logits=logits) return tf.reduce_mean(loss) def nce_loss(labels, weights, bias, predict): noise = uniform_sampler(labels, 1, nce_samples, True, vocab_size) loss = tf.nn.nce_loss(weights=weights, biases=bias, inputs=predict, labels=labels, num_sampled=nce_samples, num_classes=vocab_size, num_true=1, sampled_values=noise) return tf.reduce_mean(loss) if use_nce: bias = tx.VariableLayer(var_shape=[vocab_size], name="nce_bias") # wraps a layer to expose the weights as a layer but with the layer as its input nce_weights = tx.WrapLayer(embeddings, n_units=embeddings.n_units, wrap_fn=lambda x: x.weights, layer_fn=True) train_loss = tx.LambdaLayer(labels, nce_weights, bias, last_layer, apply_fn=nce_loss, name="nce_loss") else: train_loss = tx.LambdaLayer(labels, train_logits, apply_fn=categorical_loss, name="train_loss") if l2_loss: l2_losses = [tf.nn.l2_loss(var) for var in var_reg] train_loss = tx.LambdaLayer( train_loss, apply_fn=lambda x: x + l2_weight * tf.add_n(l2_losses), name="train_loss_l2") # =============================================== # EVAL GRAPH # =============================================== with tf.name_scope("eval"): eval_loss = tx.LambdaLayer(labels, run_logits, apply_fn=categorical_loss, name="eval_loss") self.stateful_layers = lstm_layers # BUILD MODEL super().__init__(run_outputs=run_output, run_inputs=inputs, train_inputs=[inputs, labels], train_outputs=train_output, train_loss=train_loss, eval_inputs=[inputs, labels], eval_outputs=run_output, eval_score=eval_loss)
def run(**kwargs): arg_dict.from_dict(kwargs) args = arg_dict.to_namespace() # ====================================================================================== # Load Corpus & Vocab # ====================================================================================== corpus = PTBReader(path=args.corpus, mark_eos=args.mark_eos) corpus_stats = h5py.File(os.path.join(args.corpus, "ptb_stats.hdf5"), mode='r') ri_generator = Generator(dim=args.k_dim, num_active=args.s_active, symmetric=True) # vocab = marisa_trie.Trie(corpus_stats["vocabulary"]) index = TrieSignIndex(generator=ri_generator, vocabulary=corpus_stats["vocabulary"], pregen_indexes=True) # for i in range(1000): # w = index.get_sign(i) # ri: RandomIndex = index.get_ri(w) # print(w) # print(ri) # print(ri) # print(index.get_id(w)) # pre-gen indices for vocab, we could do this iteratively ... same thing # ris = [ri_generator.generate() for _ in range(len(vocab))] # print(vocab.keys()) # index = TrieSignIndex(generator=ri_generator,vocabulary=vocab) # TODO could create the NRP model with NCE only and input with random indices could be passed to the model # dynamically, also for inference and evaluation, we could either work with a dynamic encoding process # or give it the current ri tensor with all the known ris if we know there are no OOV words (words that might not # have been seen during training. # table with random indices for all known symbols # ri_tensor = RandomIndexTensor.from_ri_list(ris, k=args.k_dim, s=args.s_active) def corpus_pipeline(corpus_stream, n_gram_size=args.ngram_size, epochs=1, batch_size=args.batch_size, shuffle=args.shuffle, flatten=False): """ Corpus Processing Pipeline. Transforms the corpus reader -a stream of sentences or words- into a stream of n-gram batches. Args: n_gram_size: the size of the n-gram window corpus_stream: the stream of sentences of words epochs: number of epochs we want to iterate over this corpus batch_size: batch size for the n-gram batch shuffle: if true, shuffles the n-grams according to a buffer size flatten: if true sliding windows are applied over a stream of words rather than within each sentence (n-grams can cross sentence boundaries) """ if flatten: word_it = flatten_it(corpus_stream) n_grams = window_it(word_it, n_gram_size) else: sentence_n_grams = (window_it(sentence, n_gram_size) for sentence in corpus_stream) n_grams = flatten_it(sentence_n_grams) # at this point this is an n_gram iterator # n_grams = ([vocab[w] for w in ngram] for ngram in n_grams) n_grams = ([index.get_id(w) for w in ngram] for ngram in n_grams) if epochs > 1: n_grams = repeat_it(n_grams, epochs) if shuffle: n_grams = shuffle_it(n_grams, args.shuffle_buffer_size) n_grams = batch_it(n_grams, size=batch_size, padding=False) return n_grams # print("counting dataset samples...") training_len = sum(1 for _ in corpus_pipeline( corpus.training_set(), batch_size=1, epochs=1, shuffle=False)) validation_len = None test_len = None if args.eval_progress: validation_len = sum(1 for _ in corpus_pipeline( corpus.validation_set(), batch_size=1, epochs=1, shuffle=False)) test_len = sum(1 for _ in corpus_pipeline( corpus.test_set(), batch_size=1, epochs=1, shuffle=False)) # print("done") # print("dset len ", training_len) # ====================================================================================== # Load Params, Prepare results assets # ====================================================================================== # Experiment parameter summary res_param_filename = os.path.join( args.out_dir, "params_{id}_{run}.csv".format(id=args.id, run=args.run)) with open(res_param_filename, "w") as param_file: writer = csv.DictWriter(f=param_file, fieldnames=arg_dict.keys()) writer.writeheader() writer.writerow(arg_dict) param_file.flush() # make dir for model checkpoints if args.save_model: model_ckpt_dir = os.path.join( args.out_dir, "model_{id}_{run}".format(id=args.id, run=args.run)) os.makedirs(model_ckpt_dir, exist_ok=True) model_path = os.path.join( model_ckpt_dir, "nnlm_{id}_{run}.ckpt".format(id=args.id, run=args.run)) # start perplexity file ppl_header = ["id", "run", "epoch", "step", "lr", "dataset", "perplexity"] ppl_fname = os.path.join( args.out_dir, "perplexity_{id}_{run}.csv".format(id=args.id, run=args.run)) ppl_file = open(ppl_fname, "w") ppl_writer = csv.DictWriter(f=ppl_file, fieldnames=ppl_header) ppl_writer.writeheader() # ====================================================================================== # MODEL # ====================================================================================== # Configure weight initializers based on activation functions if args.h_act == "relu": h_act = tx.relu h_init = tx.he_normal_init() elif args.h_act == "tanh": h_act = tx.tanh h_init = tx.glorot_uniform() elif args.h_act == "elu": h_act = tx.elu h_init = tx.he_normal_init() elif args.h_act == "selu": h_act = tf.nn.selu h_init = tx.glorot_uniform() # Configure embedding and logit weight initializers if args.embed_init == "normal": embed_init = tx.random_normal(mean=0., stddev=args.embed_init_val) elif args.embed_init == "uniform": embed_init = tx.random_uniform(minval=-args.embed_init_val, maxval=args.embed_init_val) if args.logit_init == "normal": logit_init = tx.random_normal(mean=0., stddev=args.logit_init_val) elif args.logit_init == "uniform": logit_init = tx.random_uniform(minval=-args.logit_init_val, maxval=args.logit_init_val) f_init = None if args.use_f_predict: if args.f_init == "normal": f_init = tx.random_normal(mean=0., stddev=args.f_init_val) elif args.f_init == "uniform": f_init = tx.random_uniform(minval=-args.f_init_val, maxval=args.f_init_val) model = NRP(ctx_size=args.ngram_size - 1, sign_index=index, k_dim=args.k_dim, s_active=args.s_active, embed_dim=args.embed_dim, h_dim=args.h_dim, embed_init=embed_init, logit_init=logit_init, num_h=args.num_h, h_activation=h_act, h_init=h_init, use_dropout=args.dropout, embed_dropout=args.embed_dropout, keep_prob=args.keep_prob, l2_loss=args.l2_loss, l2_loss_coef=args.l2_loss_coef, f_init=f_init, embed_share=args.embed_share, logit_bias=args.logit_bias, use_nce=args.nce, nce_samples=args.nce_samples, nce_noise_amount=0.04) model_runner = tx.ModelRunner(model) # Input params can be changed during training by setting their value # lr_param = tx.InputParam(init_value=args.lr) lr_param = tensorx.train.EvalStepDecayParam( value=args.lr, improvement_threshold=args.eval_threshold, less_is_better=True, decay_rate=args.lr_decay_rate, decay_threshold=args.lr_decay_threshold) if args.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer( learning_rate=lr_param.tensor) elif args.optimizer == "adam": optimizer = tf.train.AdamOptimizer(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) elif args.optimizer == "ams": optimizer = tx.AMSGrad(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) def clip_grad_global(grads): grads, _ = tf.clip_by_global_norm(grads, 12) return grads def clip_grad_local(grad): return tf.clip_by_norm(grad, args.clip_value) if args.clip_grads: if args.clip_local: clip_fn = clip_grad_local else: clip_fn = clip_grad_global if args.clip_grads: model_runner.config_optimizer(optimizer, optimizer_params=lr_param, gradient_op=clip_fn, global_gradient_op=not args.clip_local) else: model_runner.config_optimizer(optimizer, optimizer_params=lr_param) # ====================================================================================== # EVALUATION # ====================================================================================== def eval_model(runner, dataset_it, len_dataset=None, display_progress=False): if display_progress: pb = tqdm(total=len_dataset, ncols=60, position=1) batches_processed = 0 sum_loss = 0 for batch in dataset_it: batch = np.array(batch, dtype=np.int64) ctx = batch[:, :-1] target = batch[:, -1:] mean_loss = runner.eval(ctx, target) sum_loss += mean_loss if display_progress: pb.update(args.batch_size) batches_processed += 1 if display_progress: pb.close() return np.exp(sum_loss / batches_processed) def evaluation(runner: tx.ModelRunner, progress_bar, cur_epoch, step, display_progress=False): ppl_validation = eval_model( runner, corpus_pipeline(corpus.validation_set(), epochs=1, shuffle=False), validation_len, display_progress) res_row = { "id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "validation", "perplexity": ppl_validation } ppl_writer.writerow(res_row) if args.eval_test: # pb.write("[Eval Test Set]") ppl_test = eval_model( runner, corpus_pipeline(corpus.test_set(), epochs=1, shuffle=False), test_len, display_progress) res_row = { "id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "test", "perplexity": ppl_test } ppl_writer.writerow(res_row) ppl_file.flush() if args.eval_test: progress_bar.set_postfix({"test PPL ": ppl_test}) # pb.write("valid. ppl = {}".format(ppl_validation)) return ppl_validation # ====================================================================================== # TRAINING LOOP # ====================================================================================== # print("Starting TensorFlow Session") # preparing evaluation steps # I use ceil because I make sure we have padded batches at the end epoch_step = 0 global_step = 0 current_epoch = 0 patience = 0 cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) model_runner.set_session(sess) model_runner.init_vars() progress = tqdm(total=training_len * args.epochs, position=args.pid + 1, disable=not args.display_progress) training_data = corpus_pipeline(corpus.training_set(), batch_size=args.batch_size, epochs=args.epochs, shuffle=args.shuffle) evaluations = [] try: for ngram_batch in training_data: epoch = progress.n // training_len + 1 # Start New Epoch if epoch != current_epoch: current_epoch = epoch epoch_step = 0 if args.display_progress: progress.set_postfix({"epoch": current_epoch}) # ================================================ # EVALUATION # ================================================ if epoch_step == 0: current_eval = evaluation(model_runner, progress, epoch, global_step, display_progress=args.eval_progress) evaluations.append(current_eval) lr_param.update(current_eval) # print(lr_param.eval_history) # print("improvement ", lr_param.eval_improvement()) if global_step > 0: if args.early_stop and epoch > 1: if lr_param.eval_improvement( ) < lr_param.improvement_threshold: if patience >= 3: break patience += 1 else: patience = 0 # ================================================ # TRAIN MODEL # ================================================ ngram_batch = np.array(ngram_batch, dtype=np.int64) ctx_ids = ngram_batch[:, :-1] word_ids = ngram_batch[:, -1:] model_runner.train(ctx_ids, word_ids) progress.update(args.batch_size) epoch_step += 1 global_step += 1 # if not early stop, evaluate last state of the model if not args.early_stop or patience < 3: current_eval = evaluation(model_runner, progress, epoch, epoch_step) evaluations.append(current_eval) ppl_file.close() if args.save_model: model_runner.save_model(model_name=model_path, step=global_step, write_state=False) model_runner.close_session() progress.close() tf.reset_default_graph() # return the best validation evaluation return min(evaluations) except Exception as e: traceback.print_exc() os.remove(ppl_file.name) os.remove(param_file.name) raise e
def run(**kwargs): arg_dict.from_dict(kwargs) args = arg_dict.to_namespace() # ====================================================================================== # Load Corpus & Vocab # ====================================================================================== corpus = PTBReader(path=args.corpus, mark_eos=args.mark_eos) corpus_stats = h5py.File(os.path.join(args.corpus, "ptb_stats.hdf5"), mode='r') vocab = marisa_trie.Trie(corpus_stats["vocabulary"]) to_ngrams_batch = partial(to_ngrams, vocab=vocab, ngram_size=args.ngram_size, batch_size=args.batch_size, epochs=1, shuffle=False, shuffle_buffer_size=args.shuffle_buffer_size, enum_epoch=False) training_len = sum(1 for _ in to_ngrams_batch(corpus.training_set, batch_size=1)) validation_len = None test_len = None if args.eval_progress: validation_len = sum(1 for _ in to_ngrams_batch(corpus.validation_set, batch_size=1)) test_len = sum(1 for _ in to_ngrams_batch(corpus.test_set, batch_size=1)) # ====================================================================================== # Load Params, Prepare results assets # ====================================================================================== # Experiment parameter summary res_param_filename = os.path.join(args.out_dir, "params_{id}_{run}.csv".format(id=args.id, run=args.run)) with open(res_param_filename, "w") as param_file: writer = csv.DictWriter(f=param_file, fieldnames=arg_dict.keys()) writer.writeheader() writer.writerow(arg_dict) param_file.flush() # make dir for model checkpoints if args.save_model: model_ckpt_dir = os.path.join(args.out_dir, "model_{id}_{run}".format(id=args.id, run=args.run)) os.makedirs(model_ckpt_dir, exist_ok=True) model_path = os.path.join(model_ckpt_dir, "nnlm_{id}_{run}.ckpt".format(id=args.id, run=args.run)) # start perplexity file ppl_header = ["id", "run", "epoch", "step", "lr", "dataset", "perplexity"] ppl_filename = os.path.join(args.out_dir, "perplexity_{id}_{run}.csv".format(id=args.id, run=args.run)) ppl_file = open(ppl_filename, "w") ppl_writer = csv.DictWriter(f=ppl_file, fieldnames=ppl_header) ppl_writer.writeheader() # ====================================================================================== # MODEL # ====================================================================================== # Configure weight initializers based on activation functions if args.h_act == "relu": h_act = tx.relu h_init = tx.he_normal_init() elif args.h_act == "tanh": h_act = tx.tanh h_init = tx.glorot_uniform() elif args.h_act == "elu": h_act = tx.elu h_init = tx.he_normal_init() elif args.h_act == "selu": h_act = tf.nn.selu h_init = tx.glorot_uniform() # Configure embedding and logit weight initializers if args.embed_init == "normal": embed_init = tx.random_normal(mean=0., stddev=args.embed_init_val) elif args.embed_init == "uniform": embed_init = tx.random_uniform(minval=-args.embed_init_val, maxval=args.embed_init_val) if args.logit_init == "normal": logit_init = tx.random_normal(mean=0., stddev=args.logit_init_val) elif args.logit_init == "uniform": logit_init = tx.random_uniform(minval=-args.logit_init_val, maxval=args.logit_init_val) f_init = None if args.use_f_predict: if args.f_init == "normal": f_init = tx.random_normal(mean=0., stddev=args.f_init_val) elif args.f_init == "uniform": f_init = tx.random_uniform(minval=-args.f_init_val, maxval=args.f_init_val) inputs = tx.Input(args.ngram_size - 1, dtype=tf.int64, name="ctx_inputs") labels = tx.Input(1, dtype=tf.int64, name="ctx_inputs") model = NNLM(inputs=inputs, label_inputs=labels, vocab_size=len(vocab), embed_dim=args.embed_dim, embed_init=embed_init, embed_share=args.embed_share, logit_init=logit_init, h_dim=args.h_dim, num_h=args.num_h, h_activation=h_act, h_init=h_init, use_dropout=args.dropout, drop_probability=args.drop_probability, embed_dropout=args.embed_dropout, l2_loss=args.l2_loss, l2_weight=args.l2_loss_coef, use_f_predict=args.use_f_predict, f_init=f_init, logit_bias=args.logit_bias, use_nce=False) # Input params can be changed during training by setting their value # lr_param = tx.InputParam(init_value=args.lr) lr_param = tensorx.train.EvalStepDecayParam(value=args.lr, improvement_threshold=args.eval_threshold, less_is_better=True, decay_rate=args.lr_decay_rate, decay_threshold=args.lr_decay_threshold) if args.optimizer == "sgd": optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr_param.tensor) elif args.optimizer == "adam": optimizer = tf.train.AdamOptimizer(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) elif args.optimizer == "ams": optimizer = tx.AMSGrad(learning_rate=lr_param.tensor, beta1=args.optimizer_beta1, beta2=args.optimizer_beta2, epsilon=args.optimizer_epsilon) def clip_grad_global(grads): grads, _ = tf.clip_by_global_norm(grads, 12) return grads def clip_grad_local(grad): return tf.clip_by_norm(grad, args.clip_value) if args.clip_grads: if args.clip_local: clip_fn = clip_grad_local else: clip_fn = clip_grad_global if args.clip_grads: model.config_optimizer(optimizer, optimizer_params=lr_param, gradient_op=clip_fn, global_gradient_op=not args.clip_local) else: model.config_optimizer(optimizer, optimizer_params=lr_param) # ====================================================================================== # EVALUATION # ====================================================================================== def eval_model(model, dataset_it, len_dataset=None, display_progress=False): if display_progress: pb = tqdm(total=len_dataset, ncols=60, position=1) batches_processed = 0 sum_loss = 0 for batch in dataset_it: batch = np.array(batch, dtype=np.int64) ctx = batch[:, :-1] target = batch[:, -1:] mean_loss = model.eval({inputs: ctx, labels: target}) sum_loss += mean_loss if display_progress: pb.update(args.batch_size) batches_processed += 1 if display_progress: pb.close() return np.exp(sum_loss / batches_processed) def evaluation(model: tx.Model, progress_bar, cur_epoch, step, display_progress=False): ppl_validation = eval_model(model, to_ngrams_batch(corpus.validation_set), validation_len, display_progress) res_row = {"id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "validation", "perplexity": ppl_validation} ppl_writer.writerow(res_row) if args.eval_test: # pb.write("[Eval Test Set]") ppl_test = eval_model(model, to_ngrams(corpus.test_set), test_len, display_progress) res_row = {"id": args.id, "run": args.run, "epoch": cur_epoch, "step": step, "lr": lr_param.value, "dataset": "test", "perplexity": ppl_test} ppl_writer.writerow(res_row) ppl_file.flush() if args.eval_test: progress_bar.set_postfix({"test PPL ": ppl_test}) # pb.write("valid. ppl = {}".format(ppl_validation)) return ppl_validation # ====================================================================================== # TRAINING LOOP # ====================================================================================== # print("Starting TensorFlow Session") # preparing evaluation steps # I use ceil because I make sure we have padded batches at the end epoch_step = 0 global_step = 0 current_epoch = 0 patience = 0 cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) model.set_session(sess) model.init_vars() progress = tqdm(total=training_len * args.epochs, position=args.pid + 1, disable=not args.display_progress) training_data = to_ngrams_batch(corpus.training_set, epochs=args.epochs, shuffle=args.shuffle, enum_epoch=True) evaluations = [] try: for i, ngram_batch in training_data: epoch = i + 1 # Start New Epoch if epoch != current_epoch: current_epoch = epoch epoch_step = 0 if args.display_progress: progress.set_postfix({"epoch": current_epoch}) # ================================================ # EVALUATION # ================================================ if epoch_step == 0: current_eval = evaluation(model, progress, epoch, global_step, display_progress=args.eval_progress) evaluations.append(current_eval) lr_param.update(current_eval) # print(lr_param.eval_history) # print("improvement ", lr_param.eval_improvement()) if global_step > 0: if args.early_stop and epoch > 1: if lr_param.eval_improvement() < lr_param.improvement_threshold: if patience >= 3: break patience += 1 else: patience = 0 # ================================================ # TRAIN MODEL # ================================================ ngram_batch = np.array(ngram_batch, dtype=np.int64) ctx_ids = ngram_batch[:, :-1] word_ids = ngram_batch[:, -1:] model.train({inputs: ctx_ids, labels: word_ids}) progress.update(args.batch_size) epoch_step += 1 global_step += 1 # if not early stop, evaluate last state of the model if not args.early_stop or patience < 3: current_eval = evaluation(model, progress, epoch, epoch_step) evaluations.append(current_eval) ppl_file.close() if args.save_model: model.save_model(model_name=model_path, step=global_step, write_state=False) model.close_session() progress.close() tf.reset_default_graph() # return the best validation evaluation return min(evaluations) except Exception as e: traceback.print_exc() os.remove(ppl_file.name) os.remove(param_file.name) raise e
def _sampled_logits_from_parametric_noise(ri_tensors, k_dim, weights, labels, inputs, input_dim, num_true=1, partition_strategy="mod", name=None): if isinstance(weights, variables.PartitionedVariable): weights = list(weights) if not isinstance(weights, list): weights = [weights] with ops.name_scope(name, "compute_sampled_logits", weights + [inputs, labels]): if labels.dtype != dtypes.int64: labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [-1]) # true_ris true_ris = tx.gather_sparse(sp_tensor=ri_tensors, ids=labels_flat) true_w = embedding_lookup_sparse(params=weights, sp_ids=tx.sparse_indices(true_ris), sp_weights=true_ris, combiner="sum", partition_strategy=partition_strategy) label_layer = tx.TensorLayer(true_w, input_dim) noise_fn = tx.FC(label_layer, 512, activation=tx.relu) noise_fn_sp = tx.ToSparse(noise_fn) noise_ris = tx.Linear(noise_fn_sp, k_dim, weight_init=tx.glorot_uniform(), bias=True) noise_ris_sp = tx.ToSparse(noise_ris) noise_w = embedding_lookup_sparse(params=weights, sp_ids=tx.sparse_indices(noise_ris_sp.tensor), sp_weights=noise_ris_sp.tensor, combiner="sum", partition_strategy=partition_strategy) noise_logits = math_ops.matmul(inputs, noise_w, transpose_b=True) dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0) true_w_e = array_ops.reshape(true_w, new_true_w_shape) row_wise_dots = math_ops.multiply(array_ops.expand_dims(inputs, 1), true_w_e) # We want the row-wise dot plus biases which yields a # [batch_size, num_true] tensor of true_logits. dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat([[-1], dim], 0)) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true]) # Construct output logits and labels. The true labels/logits start at col 0. out_logits = array_ops.concat([true_logits, noise_logits], 1) # true_logits is a float tensor, ones_like(true_logits) is a float # tensor of ones. We then divide by num_true to ensure the per-example # labels sum to 1.0, i.e. form a proper probability distribution. out_labels = array_ops.concat([ array_ops.ones_like(true_logits) / num_true, array_ops.zeros_like(noise_logits) ], 1) # out_logits = out_logits * math_ops.exp(partition_const) # out_logits = out_logits / (partition_const + 1) return out_logits, out_labels