Exemplo n.º 1
0
    def __init__(self, dataloader, path, cfg, reference_file):

        self.cfg = cfg
        self.prediction_filepath = path.prediction_filepath
        self.dataloader = dataloader
        self.coco = COCO(reference_file)
        self.scores = {}
        self.bleu4 = 0.206  # save best model based on bleu score
Exemplo n.º 2
0
def build_vocab(json, threshold):
    """Build a simple vocabulary wrapper."""
    coco = COCO(json)
    counter = Counter()
    ids = coco.anns.keys()
    for i, id in enumerate(ids):
        caption = str(coco.anns[id]['caption'])
        tokens = nltk.tokenize.word_tokenize(caption.lower())
        counter.update(tokens)

        if i % 1000 == 0:
            print("[%d/%d] Tokenized the captions." % (i, len(ids)))

    # If the word frequency is less than 'threshold', then the word is discarded.
    words = [word for word, cnt in counter.items() if cnt >= threshold]

    # Creates a vocab wrapper and add some special tokens.
    vocab = Vocabulary()
    vocab.add_word('<pad>')
    vocab.add_word('<start>')
    vocab.add_word('<end>')
    vocab.add_word('<unk>')

    # Adds the words to the vocabulary.
    for i, word in enumerate(words):
        vocab.add_word(word)
    return vocab
Exemplo n.º 3
0
class Evaluator:
    def __init__(self, dataloader, path, cfg, reference_file):

        self.cfg = cfg
        self.prediction_filepath = path.prediction_filepath
        self.dataloader = dataloader
        self.coco = COCO(reference_file)
        self.scores = {}
        self.bleu4 = 0.206  # save best model based on bleu score

    def prediction_list(self, model):
        result = []
        ide_list = []
        caption_list = []
        model.eval()
        with torch.no_grad():
            for data in self.dataloader:
                features, targets, mask, max_length, ides = data
                cap, cap_txt, _ = model.Greedy_Decoding(
                    features.to(self.cfg.device))
                ide_list += list(ides.cpu().numpy())
                caption_list += cap_txt
        for a in zip(ide_list, caption_list):
            result.append({'image_id': a[0].item(), 'caption': a[1].strip()})

        return result

    def prediction_file_generation(self, result, prediction_filename):

        self.predicted_file = os.path.join(self.prediction_filepath,
                                           prediction_filename)
        with open(self.predicted_file, 'w') as fp:
            json.dump(result, fp)

    def evaluate(self, model, epoch):
        prediction_filename = self.cfg.model_name + str(epoch) + '.json'
        result = self.prediction_list(model)
        self.prediction_file_generation(result, prediction_filename)

        cocoRes = self.coco.loadRes(self.predicted_file)
        cocoEval = COCOEvalCap(self.coco, cocoRes)
        scores = cocoEval.evaluate()
        self.scores[epoch] = scores
        #         if scores[0][1][3] > self.bleu4:
        #             self.bleu4 = scores[0][1][3]
        #             self.save_model(model,epoch)
        return scores

    def save_model(self, model, epoch):
        print('Better result saving models....')
        encoder_filename = 'Save/' + Config.model_name + 'encoder_' + str(
            epoch) + '.pt'
        decoder_filename = 'Save/' + Config.model_name + 'decoder_' + str(
            epoch) + '.pt'
        torch.save(model.encoder.state_dict(), encoder_filename)
        torch.save(model.decoder.state_dict(), decoder_filename)
Exemplo n.º 4
0
 def __init__(self, root, json, vocab, transform=None):
     """Set the path for images, captions and vocabulary wrapper.
     
     Args:
         root: image directory.
         json: coco annotation file path.
         vocab: vocabulary wrapper.
         transform: image transformer.
     """
     self.root = root
     self.coco = COCO(json)
     self.ids = list(self.coco.anns.keys())
     self.vocab = vocab
     self.transform = transform
Exemplo n.º 5
0
def coco_eval(model, args, epoch):
    '''
    model: trained model to be evaluated
    args: pre-set parameters
    epoch: epoch #, for disp purpose
    '''

    model.eval()

    # Validation images are required to be resized to 224x224 already
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    # Load the vocabulary
    with open(args.vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    # Wrapper the COCO VAL dataset
    eval_data_loader = torch.utils.data.DataLoader(
        CocoImageFolder(args.val_dir, args.caption_val_path, transform),
        batch_size=args.eval_size,
        shuffle=False,
        num_workers=args.num_workers,
        drop_last=False)

    # Generated captions to be compared with GT
    results = []
    print '---------------------Start evaluation on MS-COCO dataset-----------------------'
    for i, (images, image_ids, _) in enumerate(eval_data_loader):

        images = to_var(images)
        generated_captions, _, _ = model.sampler(images)

        captions = generated_captions.cpu().data.numpy()

        # Build caption based on Vocabulary and the '<end>' token
        for image_idx in range(captions.shape[0]):

            sampled_ids = captions[image_idx]
            sampled_caption = []

            for word_id in sampled_ids:

                word = vocab.idx2word[word_id]
                if word == '<end>':
                    break
                else:
                    sampled_caption.append(word)

            sentence = ' '.join(sampled_caption)

            temp = {'image_id': int(image_ids[image_idx]), 'caption': sentence}
            results.append(temp)

        # Disp evaluation process
        if (i + 1) % 100 == 0:
            print '[%d/%d]' % ((i + 1), len(eval_data_loader))

    print '------------------------Caption Generated-------------------------------------'

    # Evaluate the results based on the COCO API
    resFile = 'results/adaptive-' + str(epoch) + '.json'
    json.dump(results, open(resFile, 'w'))

    annFile = args.caption_val_path
    coco = COCO(annFile)
    cocoRes = coco.loadRes(resFile)

    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # Get CIDEr score for validation evaluation
    cider = 0.
    print '-----------Evaluation performance on MS-COCO validation dataset for Epoch %d----------' % (
        epoch)
    for metric, score in cocoEval.eval.items():

        print '%s: %.4f' % (metric, score)
        if metric == 'CIDEr':
            cider = score

    return cider
Exemplo n.º 6
0
ref_file = os.path.join(data_config["result_dir"], "reference.json")
gen_file = os.path.join(data_config["result_dir"], "generated.json")

if not os.path.isfile(ref_file):
    print("reference.json not found in %s\nExiting" %
          (data_config["result_dir"]))
    sys.exit()

if not os.path.isfile(gen_file):
    print("generated_caption.json not found in %s\nExiting" %
          (data_config["result_dir"]))
    sys.exit()

# create coco object and cocoRes object
coco = COCO(ref_file)
cocoRes = coco.loadRes(gen_file)

# create cocoEval object by taking coco and cocoRes
cocoEval = COCOEvalCap(coco, cocoRes)

# evaluate on a subset of images by setting
# cocoEval.params['image_id'] = cocoRes.getImgIds()
# please remove this line when evaluating the full validation set
cocoEval.params['image_id'] = cocoRes.getImgIds()

# evaluate results
cocoEval.evaluate()

# print output evaluation scores
for metric, score in cocoEval.eval.items():