class CIDErEvalCap: def __init__(self, df): # if 'idxs' in df: # _gts = gts # _res = res # else: # print 'tokenization...' # tokenizer = PTBTokenizer('gts') # _gts = tokenizer.tokenize(gts) # print 'tokenized refs' # tokenizer = PTBTokenizer('res') # _res = tokenizer.tokenize(res) # print 'tokenized cands' # # self.gts = _gts # self.res = _res self.df = df self.scorer = CiderD(df=self.df) def evaluate(self, gts, res): score, scores = self.scorer.compute_score(gts, res) return score, scores def method(self): return "Cider"
def __init__(self, df): # if 'idxs' in df: # _gts = gts # _res = res # else: # print 'tokenization...' # tokenizer = PTBTokenizer('gts') # _gts = tokenizer.tokenize(gts) # print 'tokenized refs' # tokenizer = PTBTokenizer('res') # _res = tokenizer.tokenize(res) # print 'tokenized cands' # # self.gts = _gts # self.res = _res self.df = df self.scorer = CiderD(df=self.df)
def evaluate(self): # ================================================= # Set up scorers # ================================================= print 'setting up scorers...' scorers = [(CiderD(df=self.df), "CIDErD")] # ================================================= # Compute scores # ================================================= metric_scores = {} for scorer, method in scorers: print 'computing %s score...' % (scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) print "Mean %s score: %0.3f" % (method, score) metric_scores[method] = list(scores) return metric_scores
from torch.utils.data import DataLoader import torch.optim as optim from tqdm import tqdm import time import os from Model_enc_no_attr.Model import TransformerCap from coco_loader_with_val import CocoDataset, collate_fn from ciderD.ciderD import CiderD import opts from train_utils import * from misc.reward import get_self_critical_reward_with_bleu import json cider_D = CiderD(df='coco-train-words') def search_pos(att_feat, model, max_len): model.eval() batch_size = att_feat.size(0) seq = torch.Tensor([[9488]] * batch_size).long().cuda() pos = get_seq_position(seq) with torch.no_grad(): for _ in range(max_len + 1): preds = model(seq, pos, att_feat, return_attn=False) preds = preds.view(batch_size, seq.size(1), -1) preds = preds[:, -1, :].max(1)[1].unsqueeze(1) seq = torch.cat([seq, preds], dim=1) pos = get_seq_position(seq) preds = seq[:, 1:] assert preds.size(1) == max_len + 1