Exemplo n.º 1
0
def main(params):
    # set up file names and paths
    taskType = params['task']
    dataType = 'mscoco'  # 'mscoco' for real and 'abstract_v002' for abstract
    dataSubType = 'val2014'
    annFile = '%s/Annotations/%s_%s_annotations.json' % (dataDir, dataType,
                                                         dataSubType)
    quesFile = '%s/Questions/%s_%s_%s_questions.json' % (dataDir, taskType,
                                                         dataType, dataSubType)

    resultPath = params['res_file'].rsplit('/', 1)[0]
    resultPath = '.' if resultPath == params['res_file'] else resultPath
    resultType = params['res_file'].rsplit('_', 1)[0].rsplit('/', 1)[-1]
    fileTypes = ['accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']

    # An example result json file has been provided in './Results' folder.

    resFile = params['res_file']
    [accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = \
        ['%s/%s_%s_%s_%s_%s.json'%(resultPath, taskType, dataType, dataSubType, resultType, fileType) \
            for fileType in fileTypes]

    # create vqa object and vqaRes object
    vqa = VQA(annFile, quesFile)
    vqaRes = vqa.loadRes(resFile, quesFile)

    # create vqaEval object by taking vqa and vqaRes
    vqaEval = VQAEval(
        vqa, vqaRes, n=2
    )  #n is precision of accuracy (number of places after decimal), default is 2

    # evaluate results
    """
    If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
    By default it uses all the question ids in annotation file
    """
    vqaEval.evaluate()

    # print accuracies
    #print "\n"
    print("Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall']),
          file=sys.stderr)
    #print "Per Question Type Accuracy is the following:"
    #for quesType in vqaEval.accuracy['perQuestionType']:
    #        print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType])
    #print "\n"
    #print "Per Answer Type Accuracy is the following:"
    #for ansType in vqaEval.accuracy['perAnswerType']:
    #        print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType])
    #print "\n"

    # save evaluation results to ./Results folder
    print(accuracyFile)
    json.dump(vqaEval.accuracy, open(accuracyFile, 'w'))
    json.dump(vqaEval.evalQA, open(evalQAFile, 'w'))
    json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w'))
    json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w'))
Exemplo n.º 2
0
class VQA_Evaluator:
    def __init__(self,
                 summary_writer=None,
                 dataDir='/auto/homes/bat34/VQA',
                 versionType='v2_',
                 taskType='OpenEnded',
                 dataType='mscoco',
                 dataSubType='val2014'):
        self.writer = summary_writer
        self.versionType = versionType
        self.taskType = taskType
        self.dataType = dataType
        self.dataSubType = dataSubType
        self.annFile = '%s/Annotations/%s%s_%s_annotations.json' % (
            dataDir, versionType, dataType, dataSubType)
        self.quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (
            dataDir, versionType, taskType, dataType, dataSubType)
        self.vqa = VQA(self.annFile, self.quesFile)

    def evaluate(self, resFile, epoch):
        vqaRes = self.vqa.loadRes(resFile, self.quesFile)

        # create vqaEval object by taking vqa and vqaRes
        vqaEval = VQAEval(self.vqa, vqaRes, n=2)
        vqaEval.evaluate()
        print("\n")
        print("Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall']))
        if self.writer:
            self.writer.add_scalar('validation/overall_accuracy',
                                   vqaEval.accuracy['overall'], epoch)

        print("Per Question Type Accuracy is the following:")
        for quesType in vqaEval.accuracy['perQuestionType']:
            print("%s : %.02f" %
                  (quesType, vqaEval.accuracy['perQuestionType'][quesType]))
            if self.writer:
                self.writer.add_scalar(
                    'validation/%s' % quesType,
                    vqaEval.accuracy['perQuestionType'][quesType], epoch)
        print("\n")
        print("Per Answer Type Accuracy is the following:")
        for ansType in vqaEval.accuracy['perAnswerType']:
            print("%s : %.02f" %
                  (ansType, vqaEval.accuracy['perAnswerType'][ansType]))
            if self.writer:
                self.writer.add_scalar(
                    'validation/%s' % ansType,
                    vqaEval.accuracy['perAnswerType'][ansType], epoch)
        print("\n")
        return float(vqaEval.accuracy['overall'])
Exemplo n.º 3
0
def evaluate(predicted_json_path, ann_path, ques_path):
    """revised from official evaluation code

    Args:
      result_path: predicted result in json format.
      ann_path: annotation_file path.
      ques_path: question_file path.
      result_dir_path: if given, save the evalutation result to the dir path.

    """
    from vqa import VQA
    from vqaEval import VQAEval

    vqa = VQA(ann_path, ques_path)
    result = vqa.loadRes(predicted_json_path, ques_path)
    vqa_eval = VQAEval(vqa, result, n=2)
    vqa_eval.evaluate()
    print("\nOverall Accuracy is: %.02f" % (vqa_eval.accuracy['overall']))
    print("Per Question Type Accuracy is the following:")
    for quesType in vqa_eval.accuracy['perQuestionType']:
        print("%s: %.02f" %
              (quesType, vqa_eval.accuracy['perQuestionType'][quesType]))
    print("Per Answer Type Accuracy is the following:")
    for ansType in vqa_eval.accuracy['perAnswerType']:
        print("%s: %.02f" %
              (ansType, vqa_eval.accuracy['perAnswerType'][ansType]))

    result_dir_path = predicted_json_path + "_eval"
    if result_dir_path is not None:
        if not os.path.exists(result_dir_path):
            os.makedirs(result_dir_path)
        json.dump(vqa_eval.accuracy,
                  open(os.path.join(result_dir_path, 'accuracy'), 'w'))
        json.dump(vqa_eval.evalQA,
                  open(os.path.join(result_dir_path, 'evalQA'), 'w'))
        json.dump(vqa_eval.evalQuesType,
                  open(os.path.join(result_dir_path, 'evalQuesType'), 'w'))
        json.dump(vqa_eval.evalAnsType,
                  open(os.path.join(result_dir_path, 'evalAnsType'), 'w'))
Exemplo n.º 4
0
annFile = '%s/raw/annotations/%s_%s_annotations.json' % (args.dirvqa, dataType,
                                                         dataSubType)
quesFile = '%s/raw/annotations/%s_%s_%s_questions.json' % (
    args.dirvqa, taskType, dataType, dataSubType)
#imgDir      ='/local/cadene/data/raw/%s/%s/' %(dataType, dataSubType)

fileTypes = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']

# An example result json file has been provided in './Results' folder.

[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/%s_%s_%s_%s_%s.json'%(args.direpoch, taskType, dataType, dataSubType, \
resultType, fileType) for fileType in fileTypes]

# create vqa object and vqaRes object
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(resFile, quesFile)

# create vqaEval object by taking vqa and vqaRes
vqaEval = VQAEval(
    vqa, vqaRes, n=2
)  #n is precision of accuracy (number of places after decimal), default is 2

# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""

# !!!SPECIFY quesIds!!!
# utilise plutot le evaluate.py que j'avais fait pour le code tensorflow
# quesIds = [list of the question ids for which you have an answer]
Exemplo n.º 5
0
quesFile    ='%s/Val/%s_%s_%s_questions.json'%(dataDir, taskType, dataType, dataSubType)
imgDir      ='%s/val2014/' %(dataDir)
resultType  ='results.json'
fileTypes   = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']


# In[3]:

[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['results.json' for fileType in fileTypes]


# In[4]:

# create vqa object and vqaRes object
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(resFile, quesFile)


# In[5]:

# create vqaEval object by taking vqa and vqaRes
vqaEval = VQAEval(vqa, vqaRes, n=2)   #n is precision of accuracy (number of places after decimal), default is 2


# In[6]:

# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""
Exemplo n.º 6
0
def evaluate_model(resFile, quest_ids, subset='val', version='v1'):
    ans_type = None
    # set up file names and paths
    taskType = 'OpenEnded'
    dataType = 'mscoco'  # 'mscoco' for real and 'abstract_v002' for abstract
    dataSubType = '%s2014' % subset
    if version == 'v1':
        annFile = '%s/Annotations/%s_%s_annotations.json' % (dataDir, dataType,
                                                             dataSubType)
        quesFile = '%s/Questions/%s_%s_%s_questions.json' % (
            dataDir, taskType, dataType, dataSubType)
    elif version == 'v2':
        anno_dir = '/import/vision-ephemeral/fl302/data/VQA2.0'
        annFile = '%s/v2_%s_%s_annotations.json' % (anno_dir, dataType,
                                                    dataSubType)
        quesFile = '%s/v2_%s_%s_%s_questions.json' % (anno_dir, taskType,
                                                      dataType, dataSubType)
    else:
        raise Exception('unknown version, v1 or v2')
    imgDir = '%s/Images/%s/%s/' % (dataDir, dataType, dataSubType)
    resultType = 'fake'
    fileTypes = ['accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']

    # An example result json file has been provided in './Results' folder.

    [accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = [
        '%s/Results/%s_%s_%s_%s_%s.json' % (dataDir, taskType, dataType, dataSubType, \
                                            resultType, fileType) for fileType in fileTypes]

    # create vqa object and vqaRes object
    vqa = VQA(annFile, quesFile)
    vqaRes = vqa.loadRes(resFile, quesFile)

    # create vqaEval object by taking vqa and vqaRes
    vqaEval = VQAEval(
        vqa, vqaRes, n=2
    )  # n is precision of accuracy (number of places after decimal), default is 2

    # evaluate results
    """
    If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
    By default it uses all the question ids in annotation file
    """
    vqaEval.evaluate(quesIds=quest_ids)

    # print accuracies
    print "\n"
    print "Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall'])
    print "Per Question Type Accuracy is the following:"
    for quesType in vqaEval.accuracy['perQuestionType']:
        print "%s : %.02f" % (quesType,
                              vqaEval.accuracy['perQuestionType'][quesType])
    print "\n"
    print "Per Answer Type Accuracy is the following:"
    for ansType in vqaEval.accuracy['perAnswerType']:
        print "%s : %.02f" % (ansType,
                              vqaEval.accuracy['perAnswerType'][ansType])
    print "\n"

    if ans_type is None:
        return vqaEval.accuracy['overall'], vqaEval.accuracy['perAnswerType']
    else:
        return vqaEval.accuracy['overall'], vqaEval.accuracy['perAnswerType'][
            ans_type]
Exemplo n.º 7
0
file_template = "{data_dir}/Results/{version}_{task}_{data}_{data_subtype}_{file_type}.json"
[res_file, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = [
    file_template.format(data_dir=data_dir,
                         version=version,
                         task=task,
                         data=data,
                         data_subtype=data_subtype,
                         file_type=file_type) for file_type in file_types
]

res_file = args.results or res_file

# create vqa object and vqaRes object
vqa = VQA(ann_file, ques_file)
vqaRes = vqa.loadRes(res_file, ques_file)

# create vqaEval object by taking vqa and vqaRes
# n is precision of accuracy (number of places after decimal), default is 2
vqaEval = VQAEval(vqa, vqaRes, n=3)

# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""
vqaEval.evaluate()

# print accuracies
print("\n")
print("Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall']))