Пример #1
0
def validateInternalTestSet(args):
    from vqaTools.vqaInternal import VQA
    from vqaTools.vqaEval import VQAEval

    #config = Attention_LapConfig(load=True, args)
    config = BOWIMG_GPUConfig(load=True, args=args)

    restoreModel = config.restoreModel
    restoreModelPath = config.restoreModelPath

    print('Running Validation Test on Model')
    valTestReader = BOWIMGProcessor(config.testAnnotFile,
                                    config.rawQnValTestFile,
                                    config.testImgFile,
                                    config,
                                    is_training=False)

    print('Using BOWIMG model')
    model = BOWIMGModel(config)

    model.loadTrainedModel(restoreModel, restoreModelPath)
    predFile = '{}PredsBOW.csv'.format(restoreModelPath)
    results, strictAcc = model.runPredict(valTestReader, predFile)
    model.destruct()
    valTestReader.destruct()
    print('predictions made')

    vqa = VQA(config.testAnnotFileUnresolved, config.originalValQns)
    vqaRes = vqa.loadRes(results, config.originalValQns)
    vqaEval = VQAEval(vqa, vqaRes, n=2)
    vqaEval.evaluate()

    print('Writing to file..')
    writeToFile(vqaEval, restoreModelPath, vqa, vqaRes, strictAcc)
Пример #2
0
def internalValTest(args):
    import sys
    sys.path.insert(
        0, '/home/jwong/Documents/LinuxWorkspace/Visual-Question-Answering')
    from vqaTools.vqa import VQA
    from vqaTools.vqaEval import VQAEval

    config = BOWIMG_LapConfig(load=False, args=args)
    annFile = config.originalAnnotVal
    quesFile = config.valTestQns
    resFile = 'testResFile.json'

    vqa = VQA(annFile, quesFile)
    vqaRes = vqa.loadRes(resFile, quesFile)

    vqaEval = VQAEval(vqa, vqaRes, n=2)
    vqaEval.evaluate()

    # print accuracies
    print "\n"
    print "Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall'])
    print "Per Question Type Accuracy is the following:"
    for quesType in vqaEval.accuracy['perQuestionType']:
        print "%s : %.02f" % (quesType,
                              vqaEval.accuracy['perQuestionType'][quesType])
    print "\n"
    print "Per Answer Type Accuracy is the following:"
    for ansType in vqaEval.accuracy['perAnswerType']:
        print "%s : %.02f" % (ansType,
                              vqaEval.accuracy['perAnswerType'][ansType])
    print "\n"
Пример #3
0
    def runVal(self, valReader, nEpoch, is_training=True):
        """Evaluates performance on val set
        Args:
            valReader: 
        Returns:
            metrics:
        """
        accuracies, res, val_losses = [], [], []
        correct_predictions, total_predictions = 0., 0.
        for qnAsWordIDsBatch, seqLens, img_vecs, labels, rawQns, img_ids, qn_ids in \
            valReader.getNextBatch(self.config.batch_size):
            feed = {
                self.word_ids: qnAsWordIDsBatch,
                self.sequence_lengths: seqLens,
                self.img_vecs: img_vecs,
                self.labels: labels,
                self.dropout: 1.0
            }
            val_loss, labels_pred = self.sess.run(
                [self.loss, self.labels_pred], feed_dict=feed)

            for lab, labPred, qn, img_id, qn_id in zip(labels, labels_pred,
                                                       rawQns, img_ids,
                                                       qn_ids):
                if (lab == labPred):
                    correct_predictions += 1
                total_predictions += 1
                accuracies.append(lab == labPred)

                currentPred = {}
                currentPred['question_id'] = qn_id
                currentPred['answer'] = self.classToAnsMap[labPred]
                res.append(currentPred)

            if not math.isnan(val_loss):
                val_losses.append(val_loss)

        epoch_valLoss = np.mean(val_losses)
        valAcc = np.mean(accuracies)
        vqaRes = self.vqa.loadRes(res, self.config.originalValQns)
        vqaEval = VQAEval(self.vqa, vqaRes, n=2)
        vqaEval.evaluate()
        return valAcc, correct_predictions, total_predictions, vqaEval.accuracy[
            'overall'], epoch_valLoss
Пример #4
0
def validateInternalTestSet(args, model=None, restoreModelPath=None):
    from vqaTools.vqaInternal import VQA
    from vqaTools.vqaEval import VQAEval
    
    #config = Attention_LapConfig(load=True, args)
    config = Attention_GPUConfig(load=True, args=args)
    
    print('Running Validation Test on Model')
    valTestReader = AttModelInputProcessor(config.testAnnotFile, 
                                 config.rawQnValTestFile, 
                                 config.valImgFile, 
                                 config,
                                 is_training=False)
    if restoreModelPath is None:
        restoreModel = config.restoreModel
        restoreModelPath = config.restoreModelPath
    
    if model is None:
        if args.att == 'qn':
            print('Attention over question and image model')
            model = QnAttentionModel(config)
        elif args.att == 'im':
            print('Attention over image model')
            model = ImageAttentionModel(config)
        model.loadTrainedModel(restoreModel, restoreModelPath)
        
    predFile = '{}PredsAtt{}.csv'.format(restoreModelPath, args.att)
    results, strictAcc = model.runPredict(valTestReader, predFile)
    model.destruct()
    valTestReader.destruct()
    print('predictions made')
    
    vqa = VQA(config.testAnnotFileUnresolved, config.originalValQns)
    vqaRes = vqa.loadRes(results, config.originalValQns)
    vqaEval = VQAEval(vqa, vqaRes, n=2)
    vqaEval.evaluate() 
    
    print('Writing to file..')
    writeToFile(vqaEval, restoreModelPath, vqa, vqaRes, args, strictAcc)
    print('Internal test complete')
Пример #5
0
def internalValTest(args):
    import sys
    #sys.path.insert(0, '/home/jwong/Documents/LinuxWorkspace/Visual-Question-Answering')
    from vqaTools.vqaInternal import VQA
    from vqaTools.vqaEval import VQAEval
    
    config = Attention_LapConfig(load=False, args=args)
    annFile = config.originalAnnotVal
    quesFile = config.valTestQns
    resFile = 'testResFile.json'
    
    vqa = VQA(annFile, quesFile)
    vqaRes = vqa.loadRes(resFile, quesFile)
    
    vqaEval = VQAEval(vqa, vqaRes, n=2)
    vqaEval.evaluate() 
    
    # print accuracies
    print "\n"
    print "Overall Accuracy is: %.02f\n" %(vqaEval.accuracy['overall'])
    print "Per Question Type Accuracy is the following:"
    for quesType in vqaEval.accuracy['perQuestionType']:
        print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType])
    print "\n"
    print "Per Answer Type Accuracy is the following:"
    for ansType in vqaEval.accuracy['perAnswerType']:
        print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType])
    print "\n"
    
    # demo how to use evalQA to retrieve low score result
    evals = [quesId for quesId in vqaEval.evalQA if vqaEval.evalQA[quesId]<35]   #35 is per question percentage accuracy
    if len(evals) > 0:
        print('ground truth answers')
        randomEval = random.choice(evals) #
        print('RandomEval {}'.format(randomEval))
        randomAnn = vqa.loadQA(randomEval) 
        qns, answers = vqa.showQA(randomAnn) 
        print(qns)
        print(answers)
        img_ids = vqa.getImgIds(quesIds=[randomEval])
        print(img_ids)
    
        print '\n'
        print 'generated answer (accuracy %.02f)'%(vqaEval.evalQA[randomEval])
        ann = vqaRes.loadQA(randomEval)[0]
        print "Answer:   %s\n" %(ann['answer'])
    
        #imgId = randomAnn[0]['image_id']
        #imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg'
        #if os.path.isfile(imgDir + imgFilename):
        #    I = io.imread(imgDir + imgFilename)
        #    plt.imshow(I)
        #    plt.axis('off')
        #    plt.show()
    
    # plot accuracy for various question types
    plt.bar(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].values(), align='center')
    plt.xticks(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].keys(), rotation='0',fontsize=10)
    plt.title('Per Question Type Accuracy', fontsize=10)
    plt.xlabel('Question Types', fontsize=10)
    plt.ylabel('Accuracy', fontsize=10)
    plt.show()