# Print the parameters
print "Running CIDEr with the following settings"
print "*****************************"
print "Reference File:%s" % (refName)
print "Candidate File:%s" % (candName)
print "Result File:%s" % (resultFile)
print "IDF:%s" % (df_mode)
print "*****************************"

# In[2]:

# load reference and candidate sentences
loadDat = LoadData(pathToData)
gts, res = loadDat.readJson(refName, candName)

# In[3]:

# calculate cider scores
scorer = ciderEval(gts, res, 'corpus')
# scores: dict of list with key = metric and value = score given to each
# candidate
scores = scorer.evaluate()

# In[7]:

# scores['CIDEr'] contains CIDEr scores in a list for each candidate
# scores['CIDErD'] contains CIDEr-D scores in a list for each candidate

with open(resultFile, 'w') as outfile:
    json.dump(scores, outfile)
# coding: utf-8

# In[1]:

# demo script for running CIDEr
import json
from pydataformat.loadData import LoadData
from pyciderevalcap.eval import CIDErEvalCap as ciderEval

# load the configuration file
config = json.loads(open('params.json', 'r').read())

pathToData = config['pathToData']
refName = config['refName']
candName = config['candName']
resultFile = config['resultFile']
df_mode = config['idf']

# load reference and candidate sentences
loadDat = LoadData(pathToData)
gts, res = loadDat.readJson(refName, candName)

# calculate cider scores
scorer = ciderEval(gts, res, df_mode)

scores = scorer.evaluate()

with open(resultFile, 'w') as outfile:
    json.dump(scores, outfile)
Example #3
0
    def generate_cidre_rewards(self,
                               sess,
                               data,
                               path,
                               keys,
                               captions_GT,
                               num_samples=10):
        actions = path["actions"]
        observation_image_features = path["observation_image_features"]
        observation_hidden_state = path["observation_hidden_state"]
        observation_cell_state = path["observation_cell_state"]

        cand_list = []

        for i in range(self.gen_spec.n_seq_steps):
            for j in range(num_samples):
                sampled_actions = np.zeros(
                    (self.gen_spec.batch_size, self.gen_spec.n_seq_steps),
                    dtype=int)
                sampled_actions[:, 0:i + 1] = actions[:, 0:i + 1]

                if i + 1 < self.gen_spec.n_seq_steps:
                    feed_dict = {
                        self.generator.ph_input:
                        actions[:, i:i + 1],
                        self.generator.ph_image_feat_input:
                        observation_image_features[:, i + 1, :],
                        self.generator.ph_hidden_state:
                        observation_hidden_state[:, i + 1, :],
                        self.generator.ph_cell_state:
                        observation_cell_state[:, i + 1, :],
                        self.generator.ph_initial_step:
                        False
                    }

                    for k in range(i + 1, self.gen_spec.n_seq_steps):
                        ac, rs = sess.run([
                            self.generator.sampled_ac, self.generator.rnn_state
                        ],
                                          feed_dict=feed_dict)

                        feed_dict[self.generator.ph_hidden_state] = rs[0]
                        feed_dict[self.generator.ph_cell_state] = rs[1]
                        feed_dict[self.generator.ph_input] = ac

                        sampled_actions[:, k] = ac[:, 0]

                candidates = data.decode(sampled_actions)
                assert (len(candidates) == len(keys))
                for idx, cap in enumerate(candidates):
                    cand_list.append({'image_id': keys[idx], 'caption': cap})

        scorer = ciderEval(captions_GT, cand_list, "coco-val-df")
        scores = scorer.evaluate()
        reshaped_scores = np.reshape(
            scores,
            (self.gen_spec.n_seq_steps, num_samples, self.gen_spec.batch_size))
        rewards = np.mean(np.swapaxes(np.swapaxes(reshaped_scores, 1, 2), 0,
                                      1),
                          axis=2)
        return rewards
Example #4
0
print("Result File:%s" % (resultFile))
print("IDF:%s" % (df_mode))
print("*****************************")

ref_file = os.path.join(pathToData, refName)
cand_file = os.path.join(pathToData, candName)

ref_list = json.load(open(ref_file))
cand_list = json.load(open(cand_file))

gts = {}
for ref in ref_list:
    if ref['image_id'] in gts:
        gts[ref['image_id']].append(ref['caption'])
    else:
        gts[ref['image_id']] = [ref['caption']]

# calculate cider scores
scorer = ciderEval(gts, cand_list, df_mode)
# scores: dict of list with key = metric and value = score given to each
# candidate
scores = scorer.evaluate()

# In[7]:

# scores['CIDEr'] contains CIDEr scores in a list for each candidate
# scores['CIDErD'] contains CIDEr-D scores in a list for each candidate

with open(resultFile, 'w') as outfile:
    json.dump(scores, outfile)