logging.info("Source 7 length %d" % len(linModel.all_src[6])) with tf.Session() as session: session.run(init) for i in range(config.n_epochs): loss = linModel.train_on_batch(session, targets_batch) logging.info("loss: %d" % loss) linModel.config.eps = 0.5 * linModel.config.eps linModel.prepareSGNMT(args) # reset hypos linModel.cur_hypos = linModel.all_hypos[5:5 + linModel.config. batch_size] targets_batch = linModel.all_trg[5:5 + linModel.config.batch_size] def create_training_session(config): """Creates a MonitoredTrainingSession for training""" return training.MonitoredTrainingSession(checkpoint_dir=config.output_path, save_checkpoint_secs=1200) if __name__ == "__main__": # MAIN CODE STARTS HERE # Load configuration from command line arguments or configuration file args = get_args() validate_args(args) utils.switch_to_t2t_indexing() config = Config(args) do_multi_epoch_train(args, False)
import logging import sys import traceback import time import numpy as np import pickle from cam.sgnmt.decoding import core from cam.sgnmt import decode_utils from cam.sgnmt import utils from cam.sgnmt.ui import get_args from cam.sgnmt.predictors.core import UnboundedVocabularyPredictor # Load configuration from command line arguments or configuration file args = get_args() decode_utils.base_init(args) class ForcedDecoder(core.Decoder): """Forced decoder implementation. The decode() function returns the same hypos as the GreedyDecoder with forced predictor. However, this implementation keeps track of all posteriors along the way, which are dumped to the file system afterwards. """ def __init__(self, decoder_args): """Initialize the decoder and load target sentences.""" super(ForcedDecoder, self).__init__(decoder_args) self.trg_sentences = load_sentences(decoder_args.trg_test, "target") def decode(self, src_sentence):