def main(): parser = argparse.ArgumentParser("English - Lojban translation") parser.add_argument("--source", default='loj', help="source language data") parser.add_argument("--target", default='en', help="target language data") parser.add_argument("--iters", type=int, default=100000, help="number of iterations to train") parser.add_argument("--no-train", type=bool, default=False, help="Do not perform training. Only validation") parser.add_argument("--pretrain-encoder", help="Path to pretrained encoder") parser.add_argument("--pretrain-decoder", help="Path to pretrained decoder") parser.add_argument( "--pretrain-input-words", type=int, help="Number of source language words in pretrained model") parser.add_argument( "--pretrain-output-words", type=int, help="Number of target language words in pretrained model") parser.add_argument("--encoder-ckpt", default="encoder.pth", help="Name of encoder checkpoint filename") parser.add_argument("--decoder-ckpt", default="decoder.pth", help="Name of decoder checkpoint filename") parser.add_argument("--prefix", default='', help='Prefix, added to data files') args = parser.parse_args() input_lang, output_lang, pairs, pairs_val = prepare_data( args.source, args.target, prefix=args.prefix) langs = (input_lang, output_lang) print(random.choice(pairs)) input_words = args.pretrain_input_words or input_lang.n_words output_words = args.pretrain_output_words or output_lang.n_words encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device) decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device) if args.pretrain_encoder and args.pretrain_decoder: load_pretrained_model(encoder, decoder, args.pretrain_encoder, args.pretrain_decoder) if not args.no_train: train(encoder, decoder, args.iters, pairs, langs, print_every=5000) torch.save(encoder.state_dict(), args.encoder_ckpt) torch.save(decoder.state_dict(), args.decoder_ckpt) evaluate_all(encoder, decoder, pairs_val, langs)
def main(opts): # set manual_seed and build vocab print(opts, flush=True) setup(opts, opts.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Usando {device} :)") # create a batch training environment that will also preprocess text vocab = read_vocab(opts.train_vocab) tok = Tokenizer(opts.remove_punctuation == 1, opts.reversed == 1, vocab=vocab, encoding_length=opts.max_cap_length) # create language instruction encoder encoder_kwargs = { 'opts': opts, 'vocab_size': len(vocab), 'embedding_size': opts.word_embedding_size, 'hidden_size': opts.rnn_hidden_size, 'padding_idx': padding_idx, 'dropout_ratio': opts.rnn_dropout, 'bidirectional': opts.bidirectional == 1, 'num_layers': opts.rnn_num_layers } print('Using {} as encoder ...'.format(opts.lang_embed)) if 'lstm' in opts.lang_embed: encoder = EncoderRNN(**encoder_kwargs) else: raise ValueError('Unknown {} language embedding'.format(opts.lang_embed)) print(encoder) # create policy model policy_model_kwargs = { 'opts':opts, 'img_fc_dim': opts.img_fc_dim, 'img_fc_use_batchnorm': opts.img_fc_use_batchnorm == 1, 'img_dropout': opts.img_dropout, 'img_feat_input_dim': opts.img_feat_input_dim, 'rnn_hidden_size': opts.rnn_hidden_size, 'rnn_dropout': opts.rnn_dropout, 'max_len': opts.max_cap_length, 'max_navigable': opts.max_navigable } if opts.arch == 'regretful': model = Regretful(**policy_model_kwargs) elif opts.arch == 'self-monitoring': model = SelfMonitoring(**policy_model_kwargs) elif opts.arch == 'speaker-baseline': model = SpeakerFollowerBaseline(**policy_model_kwargs) else: raise ValueError('Unknown {} model for seq2seq agent'.format(opts.arch)) print(model) encoder = encoder.to(device) model = model.to(device) params = list(encoder.parameters()) + list(model.parameters()) optimizer = torch.optim.Adam(params, lr=opts.learning_rate) # optionally resume from a checkpoint if opts.resume: model, encoder, optimizer, best_success_rate = resume_training(opts, model, encoder, optimizer) # if a secondary exp name is specified, this is useful when resuming from a previous saved # experiment and save to another experiment, e.g., pre-trained on synthetic data and fine-tune on real data if opts.exp_name_secondary: opts.exp_name += opts.exp_name_secondary feature, img_spec = load_features(opts.img_feat_dir, opts.blind) if opts.test_submission: assert opts.resume, 'The model was not resumed before running for submission.' test_env = ('test', (R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, splits=['test'], tokenizer=tok), Evaluation(['test'], opts))) agent_kwargs = { 'opts': opts, 'env': test_env[1][0], 'results_path': "", 'encoder': encoder, 'model': model, 'feedback': opts.feedback } agent = PanoSeq2SeqAgent(**agent_kwargs) # setup trainer trainer = PanoSeq2SeqTrainer(opts, agent, optimizer) epoch = opts.start_epoch - 1 trainer.eval(epoch, test_env) return # set up R2R environments if not opts.train_data_augmentation: train_env = R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed, splits=['train'], tokenizer=tok) else: train_env = R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed, splits=['synthetic'], tokenizer=tok) val_craft_splits = ['craft_seen', 'craft_unseen'] val_splits = ['val_seen', 'val_unseen'] if opts.craft_eval: val_splits += val_craft_splits val_envs = {split: (R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, splits=[split], tokenizer=tok), Evaluation([split], opts)) for split in val_splits} # create agent agent_kwargs = { 'opts': opts, 'env': train_env, 'results_path': "", 'encoder': encoder, 'model': model, 'feedback': opts.feedback } agent = PanoSeq2SeqAgent(**agent_kwargs) # setup trainer trainer = PanoSeq2SeqTrainer(opts, agent, optimizer, opts.train_iters_epoch) if opts.eval_only: success_rate = [] for val_env in val_envs.items(): success_rate.append(trainer.eval(opts.start_epoch - 1, val_env, tb_logger=None)) return # set up tensorboard logger tb_logger = set_tb_logger(opts.log_dir, opts.exp_name, opts.resume) sys.stdout.flush() best_success_rate = best_success_rate if opts.resume else 0.0 for epoch in range(opts.start_epoch, opts.max_num_epochs + 1): trainer.train(epoch, train_env, tb_logger) if epoch % opts.eval_every_epochs == 0: success_rate = [] for val_env in val_envs.items(): success_rate.append(trainer.eval(epoch, val_env, tb_logger)) success_rate_compare = success_rate[1] if is_experiment(): # remember best val_seen success rate and save checkpoint is_best = success_rate_compare >= best_success_rate best_success_rate = max(success_rate_compare, best_success_rate) print("--> Highest val_unseen success rate: {}".format(best_success_rate)) sys.stdout.flush() # save the model if it is the best so far save_checkpoint({ 'opts': opts, 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'encoder_state_dict': encoder.state_dict(), 'best_success_rate': best_success_rate, 'optimizer': optimizer.state_dict(), 'max_episode_len': opts.max_episode_len, }, is_best, checkpoint_dir=opts.checkpoint_dir, name=opts.exp_name) if opts.train_data_augmentation and epoch == opts.epochs_data_augmentation: train_env = R2RPanoBatch(opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed, splits=['train'], tokenizer=tok) print("--> Finished training")
def main(opts): # set manual_seed and build vocab setup(opts, opts.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # create a batch training environment that will also preprocess text vocab = read_vocab(opts.train_vocab) tok = Tokenizer( opts.remove_punctuation == 1, opts.reversed == 1, vocab=vocab, encoding_length=opts.max_cap_length, ) # create language instruction encoder encoder_kwargs = { "opts": opts, "vocab_size": len(vocab), "embedding_size": opts.word_embedding_size, "hidden_size": opts.rnn_hidden_size, "padding_idx": padding_idx, "dropout_ratio": opts.rnn_dropout, "bidirectional": opts.bidirectional == 1, "num_layers": opts.rnn_num_layers, } print("Using {} as encoder ...".format(opts.lang_embed)) if "lstm" in opts.lang_embed: encoder = EncoderRNN(**encoder_kwargs) else: raise ValueError("Unknown {} language embedding".format( opts.lang_embed)) print(encoder) # create policy model policy_model_kwargs = { "opts": opts, "img_fc_dim": opts.img_fc_dim, "img_fc_use_batchnorm": opts.img_fc_use_batchnorm == 1, "img_dropout": opts.img_dropout, "img_feat_input_dim": opts.img_feat_input_dim, "rnn_hidden_size": opts.rnn_hidden_size, "rnn_dropout": opts.rnn_dropout, "max_len": opts.max_cap_length, "max_navigable": opts.max_navigable, } if opts.arch == "self-monitoring": model = SelfMonitoring(**policy_model_kwargs) elif opts.arch == "speaker-baseline": model = SpeakerFollowerBaseline(**policy_model_kwargs) else: raise ValueError("Unknown {} model for seq2seq agent".format( opts.arch)) print(model) encoder = encoder.to(device) model = model.to(device) params = list(encoder.parameters()) + list(model.parameters()) optimizer = torch.optim.Adam(params, lr=opts.learning_rate) # optionally resume from a checkpoint if opts.resume: model, encoder, optimizer, best_success_rate = resume_training( opts, model, encoder, optimizer) # if a secondary exp name is specified, this is useful when resuming from a previous saved # experiment and save to another experiment, e.g., pre-trained on synthetic data and fine-tune on real data if opts.exp_name_secondary: opts.exp_name += opts.exp_name_secondary feature, img_spec = load_features(opts.img_feat_dir) if opts.test_submission: assert (opts.resume ), "The model was not resumed before running for submission." test_env = ( "test", ( R2RPanoBatch( opts, feature, img_spec, batch_size=opts.batch_size, splits=["test"], tokenizer=tok, ), Evaluation(["test"]), ), ) agent_kwargs = { "opts": opts, "env": test_env[1][0], "results_path": "", "encoder": encoder, "model": model, "feedback": opts.feedback, } agent = PanoSeq2SeqAgent(**agent_kwargs) # setup trainer trainer = PanoSeq2SeqTrainer(opts, agent, optimizer) epoch = opts.start_epoch - 1 trainer.eval(epoch, test_env) return # set up R2R environments if not opts.train_data_augmentation: train_env = R2RPanoBatch( opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed, splits=["train"], tokenizer=tok, ) else: train_env = R2RPanoBatch( opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed, splits=["synthetic"], tokenizer=tok, ) val_envs = { split: ( R2RPanoBatch( opts, feature, img_spec, batch_size=opts.batch_size, splits=[split], tokenizer=tok, ), Evaluation([split]), ) for split in ["val_seen", "val_unseen"] } # create agent agent_kwargs = { "opts": opts, "env": train_env, "results_path": "", "encoder": encoder, "model": model, "feedback": opts.feedback, } agent = PanoSeq2SeqAgent(**agent_kwargs) # setup trainer trainer = PanoSeq2SeqTrainer(opts, agent, optimizer, opts.train_iters_epoch) if opts.eval_beam or opts.eval_only: success_rate = [] for val_env in val_envs.items(): success_rate.append( trainer.eval(opts.start_epoch - 1, val_env, tb_logger=None)) return # set up tensorboard logger tb_logger = set_tb_logger(opts.log_dir, opts.exp_name, opts.resume) best_success_rate = best_success_rate if opts.resume else 0.0 for epoch in range(opts.start_epoch, opts.max_num_epochs + 1): trainer.train(epoch, train_env, tb_logger) if epoch % opts.eval_every_epochs == 0: success_rate = [] for val_env in val_envs.items(): success_rate.append(trainer.eval(epoch, val_env, tb_logger)) success_rate_compare = success_rate[1] if is_experiment(): # remember best val_seen success rate and save checkpoint is_best = success_rate_compare >= best_success_rate best_success_rate = max(success_rate_compare, best_success_rate) print("--> Highest val_unseen success rate: {}".format( best_success_rate)) # save the model if it is the best so far save_checkpoint( { "opts": opts, "epoch": epoch + 1, "state_dict": model.state_dict(), "encoder_state_dict": encoder.state_dict(), "best_success_rate": best_success_rate, "optimizer": optimizer.state_dict(), "max_episode_len": opts.max_episode_len, }, is_best, checkpoint_dir=opts.checkpoint_dir, name=opts.exp_name, ) if (opts.train_data_augmentation and epoch == opts.epochs_data_augmentation): train_env = R2RPanoBatch( opts, feature, img_spec, batch_size=opts.batch_size, seed=opts.seed, splits=["train"], tokenizer=tok, ) print("--> Finished training")
def main(): train_loader = ChatbotDataset('train') val_loader = ChatbotDataset('valid') # Initialize word embeddings embedding = nn.Embedding(voc.num_words, hidden_size) # Initialize encoder & decoder models encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout) decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout) # Use appropriate device encoder = encoder.to(device) decoder = decoder.to(device) # Initialize optimizers print('Building optimizers ...') encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate) # Initializations print('Initializing ...') batch_time = AverageMeter() # forward prop. + back prop. time losses = AverageMeter() # loss (per word decoded) # Epochs for epoch in range(start_epoch, epochs): # One epoch's training # Ensure dropout layers are in train mode encoder.train() decoder.train() start = time.time() # Batches for i in range(train_loader.__len__()): input_variable, lengths, target_variable, mask, max_target_len = train_loader.__getitem__(i) loss = train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, encoder_optimizer, decoder_optimizer) # Keep track of metrics losses.update(loss, max_target_len) batch_time.update(time.time() - start) start = time.time() if i % print_every == 0: print('[{0}] Epoch: [{1}][{2}/{3}]\t' 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(timestamp(), epoch, i, len(train_loader), batch_time=batch_time, loss=losses)) # One epoch's validation val_loss = validate(val_loader, encoder, decoder) print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss)) # Initialize search module searcher = GreedySearchDecoder(encoder, decoder) for sentence in pick_n_valid_sentences(10): decoded_words = evaluate(searcher, sentence) print('Human: {}'.format(sentence)) print('Bot: {}'.format(''.join(decoded_words))) # Save checkpoint if epoch % save_every == 0: directory = save_dir if not os.path.exists(directory): os.makedirs(directory) torch.save({ 'epoch': epoch, 'en': encoder.state_dict(), 'de': decoder.state_dict(), 'en_opt': encoder_optimizer.state_dict(), 'de_opt': decoder_optimizer.state_dict(), 'loss': loss, 'voc': voc.__dict__ }, os.path.join(directory, '{}_{}_{}.tar'.format('checkpoint', epoch, val_loss)))
def main(): ap = argparse.ArgumentParser() ap.add_argument( '--hidden_size', default=256, type=int, help='hidden size of encoder/decoder, also word vector size') ap.add_argument('--edge_size', default=20, type=int, help='embedding dimension of edges') ap.add_argument('--n_iters', default=100000, type=int, help='total number of examples to train on') ap.add_argument('--print_every', default=5000, type=int, help='print loss info every this many training examples') ap.add_argument( '--checkpoint_every', default=10000, type=int, help='write out checkpoint every this many training examples') ap.add_argument('--initial_learning_rate', default=0.001, type=int, help='initial learning rate') ap.add_argument('--train_files', default='../amr_anno_1.0/data/split/training/*', help='training files.') ap.add_argument('--log_dir', default='./log', help='log directory') ap.add_argument('--exp_name', default='experiment', help='experiment name') ap.add_argument('--batch_size', default=5, type=int, help='batch size') ap.add_argument('--load_checkpoint', action='store_true', help='use existing checkpoint') args = ap.parse_args() logdir = args.log_dir exp_dir = logdir + '/' + args.exp_name if not os.path.exists(logdir): os.makedirs(logdir) if not os.path.exists(exp_dir): os.makedirs(exp_dir) load_state_file = None if args.load_checkpoint: max_iter = 0 state_files = glob.glob(exp_dir + '/*') for sf in state_files: iter_num = int(sf.split('_')[1].split('.')[0]) if iter_num > max_iter: max_iter = iter_num load_state_file = sf # Create vocab from training data iter_num = 0 train_files = glob.glob(args.train_files) train_pairs = AMR.read_AMR_files(train_files, True) amr_vocab, en_vocab = None, None state = None batch_size = args.batch_size hidden_size = args.hidden_size edge_size = args.edge_size drop = DROPOUT_P mlength = MAX_LENGTH if load_state_file is not None: state = torch.load(load_state_file) iter_num = state['iter_num'] amr_vocab = state['amr_vocab'] en_vocab = state['en_vocab'] hidden_size = state['hidden_size'] edge_size = state['edge_size'] drop = state['dropout'] mlength = state['max_length'] logging.info('loaded checkpoint %s', load_state_file) else: amr_vocab, en_vocab = make_vocabs(train_pairs) encoder = EncoderRNN(amr_vocab.n_nodes, hidden_size).to(device) child_sum = ChildSum(amr_vocab.n_edges, edge_size, hidden_size).to(device) decoder = AttnDecoderRNN(hidden_size, en_vocab.n_words, dropout_p=drop, max_length=mlength).to(device) #load checkpoint if state is not None: encoder.load_state_dict(state['enc_state']) child_sum.load_state_dict(state['sum_state']) decoder.load_state_dict(state['dec_state']) # set up optimization/loss params = list(encoder.parameters()) + list(child_sum.parameters()) + list( decoder.parameters()) # .parameters() returns generator optimizer = optim.Adam(params, lr=args.initial_learning_rate) criterion = nn.NLLLoss() #load checkpoint if state is not None: optimizer.load_state_dict(state['opt_state']) start = time.time() print_loss_total = 0 # Reset every args.print_every while iter_num < args.n_iters: num_samples = batch_size remaining = args.checkpoint_every - (iter_num % args.checkpoint_every) remaining2 = args.print_every - (iter_num % args.print_every) if remaining < batch_size: num_samples = remaining elif remaining2 < batch_size: num_samples = remaining2 iter_num += num_samples random_pairs = random.sample(train_pairs, num_samples) target_snt = tensors_from_batch(en_vocab, random_pairs) loss = train(random_pairs, target_snt, amr_vocab, encoder, child_sum, decoder, optimizer, criterion) print_loss_total += loss if iter_num % args.checkpoint_every == 0: state = { 'iter_num': iter_num, 'enc_state': encoder.state_dict(), 'sum_state': child_sum.state_dict(), 'dec_state': decoder.state_dict(), 'opt_state': optimizer.state_dict(), 'amr_vocab': amr_vocab, 'en_vocab': en_vocab, 'hidden_size': hidden_size, 'edge_size': edge_size, 'dropout': drop, 'max_length': mlength } filename = 'state_%010d.pt' % iter_num save_file = exp_dir + '/' + filename torch.save(state, save_file) logging.debug('wrote checkpoint to %s', save_file) if iter_num % args.print_every == 0: print_loss_avg = print_loss_total / args.print_every print_loss_total = 0 logging.info( 'time since start:%s (iter:%d iter/n_iters:%d%%) loss_avg:%.4f', time.time() - start, iter_num, iter_num / args.n_iters * 100, print_loss_avg)
n_iters = 1000000 training_pairs = [ tensorsFromPair(random.choice(pairs)) for i in range(n_iters) ] print_every = 100 save_every = 1000 print_loss_total = 0 start = time.time() for iter in range(1, n_iters + 1): training_pair = training_pairs[iter - 1] input_tensor = training_pair[0] output_tensor = training_pair[1] loss = loss_func(input_tensor, output_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print_loss_total += loss if iter % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print("{},{},{},{}".format(timeSince(start, iter/n_iters), iter, iter/n_iters*100, print_loss_avg)) if iter % save_every == 0: torch.save(encoder.state_dict(), "models/encoder_{}.pth".format(iter)) torch.save(decoder.state_dict(), "models/decoder_{}.pth".format(iter)) if iter % 10000: scheduler_encoder.step() scheduler_decoder.step()
def main(): """ Main function for the translation RNN """ args = parse_args() eng_prefixes = ( "i am ", "i m ", "he is", "he s ", "she is", "she s ", "you are", "you re ", "we are", "we re ", "they are", "they re " ) input_lang, output_lang, pairs = \ prepare_data('eng', 'fra', reverse=True, max_length=args.max_length, prefixes=eng_prefixes) # print(random.choice(pairs)) encoder = EncoderRNN(input_lang.num_words, args.hidden_size).to(args.device) decoder = AttentionDecoderRNN( args.hidden_size, output_lang.num_words, args.max_length, args.dropout ).to(args.device) if args.train: train_iters( encoder, decoder, pairs, args.max_length, input_lang, output_lang, args.num_iters, device=args.device, print_every=args.print_every, teacher_forcing_ratio=args.teacher_forcing_ratio) torch.save(encoder.state_dict(), 'encoder.pth') torch.save(decoder.state_dict(), 'decoder.pth') encoder.load_state_dict(torch.load('encoder.pth')) decoder.load_state_dict(torch.load('decoder.pth')) encoder.eval() decoder.eval() evaluate_randomly( encoder, decoder, pairs, input_lang, output_lang, args.max_length, args.device, n=10 ) # visualizing attention _, attentions = \ evaluate( encoder, decoder, 'je suis trop froid .', input_lang, output_lang, args.max_length, args.device ) plt.matshow(attentions.cpu().numpy()) input_sentences = ['elle a cinq ans de moins que moi .', 'elle est trop petit .', 'je ne crains pas de mourir .', 'c est un jeune directeur plein de talent .'] for input_sentence in input_sentences: evaluate_and_show_attention( encoder, decoder, input_sentence, input_lang, output_lang, args.max_length, args.device )
def main(args): torch.cuda.set_device(6) model_path = args.model_path if not os.path.exists(model_path): os.makedirs(model_path) # load vocablary with open(args.vocab_path, 'rb') as f: vocab = pickle.load(f) img_path = args.img_path factual_cap_path = args.factual_caption_path humorous_cap_path = args.humorous_caption_path # import data_loader data_loader = get_data_loader(img_path, factual_cap_path, vocab, args.caption_batch_size) styled_data_loader = get_styled_data_loader(humorous_cap_path, vocab, args.language_batch_size) # import models emb_dim = args.emb_dim hidden_dim = args.hidden_dim factored_dim = args.factored_dim vocab_size = len(vocab) encoder = EncoderRNN(voc_size=vocab_size, emb_size=emb_dim, hidden_size=emb_dim) decoder = FactoredLSTM(emb_dim, hidden_dim, factored_dim, vocab_size) if torch.cuda.is_available(): encoder = encoder.cuda() decoder = decoder.cuda() # loss and optimizer criterion = masked_cross_entropy cap_params = list(decoder.parameters()) + list(encoder.parameters()) lang_params = list(decoder.S_hc.parameters()) + list(decoder.S_hf.parameters()) \ + list(decoder.S_hi.parameters()) + list(decoder.S_ho.parameters()) optimizer_cap = torch.optim.Adam(cap_params, lr=args.lr_caption) optimizer_lang = torch.optim.Adam(lang_params, lr=args.lr_language) # train total_cap_step = len(data_loader) total_lang_step = len(styled_data_loader) epoch_num = args.epoch_num for epoch in range(epoch_num): # caption for i, (messages, m_lengths, targets, t_lengths) in enumerate(data_loader): messages = to_var(messages.long()) targets = to_var(targets.long()) # forward, backward and optimize decoder.zero_grad() encoder.zero_grad() output, features = encoder(messages, list(m_lengths)) outputs = decoder(targets, features, mode="factual") loss = criterion(outputs[:, 1:, :].contiguous(), targets[:, 1:].contiguous(), t_lengths - 1) loss.backward() optimizer_cap.step() # print log if i % args.log_step_caption == 0: print("Epoch [%d/%d], CAP, Step [%d/%d], Loss: %.4f" % (epoch + 1, epoch_num, i, total_cap_step, loss.data[0])) eval_outputs(outputs, vocab) # language for i, (captions, lengths) in enumerate(styled_data_loader): captions = to_var(captions.long()) # forward, backward and optimize decoder.zero_grad() outputs = decoder(captions, mode='humorous') loss = criterion(outputs, captions[:, 1:].contiguous(), lengths - 1) loss.backward() optimizer_lang.step() # print log if i % args.log_step_language == 0: print("Epoch [%d/%d], LANG, Step [%d/%d], Loss: %.4f" % (epoch + 1, epoch_num, i, total_lang_step, loss.data[0])) # save models torch.save(decoder.state_dict(), os.path.join(model_path, 'decoder-%d.pkl' % (epoch + 1, ))) torch.save(encoder.state_dict(), os.path.join(model_path, 'encoder-%d.pkl' % (epoch + 1, )))