Example #1
0
def main(loader, vocab, opt, model=None):
    if model is None:
        vocab_size = len(vocab)
        model = MultimodalAtt(vocab_size, opt['max_len'], opt['dim_hidden'],
                              opt['dim_word'])

        model = nn.DataParallel(model)

        if opt['beam']:
            bw = opt['beam_size']
            print(f'Using beam search with beam width = {bw}')
        model_path = opt['checkpoint_path']
        for i in os.listdir(model_path):
            if i.endswith('.pth'):
                print(i)
                path = os.path.join(model_path, i)
                model.load_state_dict(torch.load(path))
                crit = NLUtils.LanguageModelCriterion()

                eval(model, crit, loader, vocab, opt)
    else:
        '''
        Running from inside train.py
        '''
        crit = NLUtils.LanguageModelCriterion()
        scores = eval(model, crit, loader, vocab, opt)
        return scores
Example #2
0
def main(opt):
    dataset = VideoAudioDataset(opt, 'val')
    opt['vocab_size'] = dataset.get_vocab_size()
    model = MultimodalAtt(opt['vocab_size'], opt['max_len'], opt['dim_hidden'], opt['dim_word'], dim_vid=opt['dim_vid'],
    n_layers=opt['num_layers'], rnn_cell=opt['rnn_type'], rnn_dropout_p=opt['rnn_dropout_p']).cuda()
    model = nn.DataParallel(model)
    crit = NLUtils.LanguageModelCriterion()
    for model_path in tqdm(glob.glob(os.path.join(opt['model_directory'],'*.pth'))):
        model.load_state_dict(torch.load(model_path))
        eval(model, crit, dataset, dataset.get_vocab(), opt, model_path)
Example #3
0
def main(opt):
    dataset = VideoAudioDataset(opt, 'test')
    opt['vocab_size'] = dataset.get_vocab_size()
    model = MultimodalAtt(opt['vocab_size'],
                          opt['max_len'],
                          opt['dim_hidden'],
                          opt['dim_word'],
                          dim_vid=opt['dim_vid'],
                          n_layers=opt['num_layers'],
                          rnn_dropout_p=opt['rnn_dropout_p']).cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(opt['model_path']))
    crit = NLUtils.LanguageModelCriterion()

    eval(model, crit, dataset, dataset.get_vocab(), opt)