if __name__ == '__main__': # get configs args = get_eval_args() print(args) # load data dm = DataManager(args) # prepare model model = init_model(args, dm) model = model.cuda() model.load_state_dict(torch.load(args.model_path)) # split data _, _, test_data = dm.split() test_loader = DataLoader(test_data, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=partial( collate_fn, split='test', padding_idx=dm.word2idx['<pad>'])) print('Start Video-Captioning Evaluation') test_score = evaluate(0, model, test_loader, dm, maxlen=args.max_sent_len, split='Test',
# load data dm = DataManager(args) # prepare model model = init_model(args, dm) model = model.cuda() # prepare training optimizer = Adam(lr=args.lr, params=model.parameters()) schedule = ExponentialLR(optimizer, args.lr_decay) loss_fn = NLLLossWithLength(ignore_index=dm.word2idx['<pad>']) # loss_fn = nn.NLLLoss(ignore_index=dm.word2idx['<pad>']) # split data train_data, val_data, test_data = dm.split() train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=partial(collate_fn, split='train', padding_idx=dm.word2idx['<pad>'])) val_loader = DataLoader(val_data, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=partial(collate_fn, split='val', padding_idx=dm.word2idx['<pad>'])) test_loader = DataLoader(test_data, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=partial(collate_fn, split='test', padding_idx=dm.word2idx['<pad>'])) print('Start Video-Captioning Training') best_cider = 0 best_epoch = -1 best_score = None max_epoch = args.n_epoch for i in range(max_epoch): model.schedule_sample_prob = args.schedule_sample_prob + i * args.schedule_sample_ratio