Exemple #1
0
def visQnImgAtt():
    print('Running qn Visuals')
    config = Attention_LapConfig(load=True, args=args)
    reader = AttModelInputProcessor(config.testAnnotFile,
                                 config.rawQnValTestFile,
                                 config.valImgFile, 
                                 config,
                                 is_training=False)
    
    #reader = AttModelInputProcessor(config.trainAnnotFile, 
    #                             config.rawQnTrain, 
    #                             config.trainImgFile, 
    #                             config,
    #                             is_training=False)
    
    model = QnAttentionModel(config)
    
    saveData = True
    
    model.loadTrainedModel(config.restoreQuAttSigmoidModel, 
                           config.restoreQuAttSigmoidModelPath)
    
    #model.loadTrainedModel(config.restoreQnImAttModel, 
    #                       config.restoreQnImAttModelPath)
    qnAlphas, alphas, img_ids, qns, preds, topk, labs = model.runPredict(
        reader, config.csvResults, 200, mini=True, chooseBatch=30)
    model.destruct()
    
    out = OutputGenerator(config.valImgPaths)
    #out = OutputGenerator(config.trainImgPaths)
    #out.displayQnImgAttention(qnAlphas, alphas, img_ids, qns, preds, topk, labs,saveData)
    out.displayQnImgAttSaveSplit(qnAlphas, alphas, img_ids, qns, preds, topk, labs,saveData)
Exemple #2
0
def runValTest(args):
    #Val set's split -- test
    print('Running Val Test')
    config = Attention_LapConfig(load=True, args=args)
    
    valTestReader = TestProcessor(qnFile=config.valTestQns, 
                               imgFile=config.valImgFile, 
                               config=config)
    
    #valTestReader = TrainProcessors(config.testAnnotFile, 
    #                             config.rawQnValTestFile, 
    #                             config.valImgFile, 
    #                             config,
    #                             is_training=False)
    
    if args.att == 'qn':
        print('Attention over question and image model')
        model = QnAttentionModel(config)
    elif args.att == 'im':
        print('Attention over image model')
        model = ImageAttentionModel(config)
    model.loadTrainedModel(config.restoreQnImAttModel, 
                           config.restoreQnImAttModelPath)
    model.runTest(valTestReader, 'testResFile.json')
    model.destruct()
    valTestReader.destruct()
Exemple #3
0
def solve():
    print('Running solve')
    config = Attention_LapConfig(load=True, args=args)
    out = OutputGenerator(config.trainImgPaths)
    #img_id = raw_input('Img_id--> ')
    img_id = str(262415)
    img = Image.open(out.convertIDtoPath(str(img_id)))
    img.show()
    
    qn = raw_input('Question--> ')
    print(qn)
    model = ImageAttentionModel(config)
    model.loadTrainedModel(config.restoreModel, config.restoreModelPath) 
    alpha, pred = model.solve(qn, img_id)
    out.displaySingleOutput(alpha, img_id, qn, pred)
    
    ''' -a otest -r ./results/Att21Mar1334/att21Mar1334.meta -p ./results/Att21Mar1334/
Exemple #4
0
def solveqn():
    print('Running solve')
    config = Attention_LapConfig(load=True, args=args)
    out = OutputGenerator(config.valImgPaths)
    #img_id = raw_input('Img_id--> ')
    img_id = str(337826) #214587
    img = Image.open(out.convertIDtoPath(str(img_id)))
    img.show()
    model = QnAttentionModel(config)
    model.loadTrainedModel(config.restoreQnImAttModel, 
                           config.restoreQnImAttModelPath)
    processor = OnlineProcessor(config.valImgFile, config)
    
    for i in range(5):
        qn = raw_input('Question--> ')
        print(qn)
        qnalpha, alpha, pred = model.solve(qn, img_id, processor)
        out.displaySingleOutput(alpha, img_id, qn, pred)
Exemple #5
0
def runVisualise():
    print('Running Visuals')
    config = Attention_LapConfig(load=True, args=args)
    reader = AttModelInputProcessor(config.trainAnnotFile, 
                                 config.rawQnTrain, 
                                 config.trainImgFile, 
                                 config,
                                 is_training=False)
    
    model = ImageAttentionModel(config)
    model.loadTrainedModel(config.restoreModel, config.restoreModelPath)
    alphas, img_ids, qns, preds = model.runPredict(
        reader, config.csvResults, 5, mini=True)
    model.destruct()
    reader.destruct()
    
    out = OutputGenerator(config.trainImgPaths)
    out.displayOutput(alphas, img_ids, qns, preds)
def runVisualiseVal():
    print('Running Visuals')
    config = Attention_LapConfig(load=True, args=args)
    reader = AttModelInputProcessor(config.testAnnotFile,
                                 config.rawQnValTestFile,
                                 config.valImgFile, 
                                 config,
                                 is_training=False)
    
    model = ImageAttentionModel(config)
    model.loadTrainedModel(config.restoreModel, config.restoreModelPath)
    alphas, img_ids, qns, preds, labels = model.runPredict(
        reader, config.csvResults, 180, mini=True, chooseBatch=0)
    model.destruct()
    reader.destruct()
    
    out = OutputGenerator(config.valImgPaths)
    out.displayEachSample(alphas, img_ids, qns, preds, labels, saveData=True)
def visQnImgAtt():
    print('Running qn Visuals')
    config = Attention_LapConfig(load=True, args=args)
    reader = AttModelInputProcessor(config.testAnnotFile,
                                 config.rawQnValTestFile,
                                 config.valImgFile, 
                                 config,
                                 is_training=False)
    
    model = QnAttentionModel(config)
    model.loadTrainedModel(config.restoreQnImAttModel, 
                           config.restoreQnImAttModelPath)
    qnAlphas, alphas, img_ids, qns, preds, topk = model.runPredict(
        reader, config.csvResults, 5, mini=True)
    model.destruct()
    
    out = OutputGenerator(config.valImgPaths)
    out.displayQnImgAttention(qnAlphas, alphas, img_ids, qns, preds, topk)
Exemple #8
0
def internalValTest(args):
    import sys
    #sys.path.insert(0, '/home/jwong/Documents/LinuxWorkspace/Visual-Question-Answering')
    from vqaTools.vqaInternal import VQA
    from vqaTools.vqaEval import VQAEval
    
    config = Attention_LapConfig(load=False, args=args)
    annFile = config.originalAnnotVal
    quesFile = config.valTestQns
    resFile = 'testResFile.json'
    
    vqa = VQA(annFile, quesFile)
    vqaRes = vqa.loadRes(resFile, quesFile)
    
    vqaEval = VQAEval(vqa, vqaRes, n=2)
    vqaEval.evaluate() 
    
    # print accuracies
    print "\n"
    print "Overall Accuracy is: %.02f\n" %(vqaEval.accuracy['overall'])
    print "Per Question Type Accuracy is the following:"
    for quesType in vqaEval.accuracy['perQuestionType']:
        print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType])
    print "\n"
    print "Per Answer Type Accuracy is the following:"
    for ansType in vqaEval.accuracy['perAnswerType']:
        print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType])
    print "\n"
    
    # demo how to use evalQA to retrieve low score result
    evals = [quesId for quesId in vqaEval.evalQA if vqaEval.evalQA[quesId]<35]   #35 is per question percentage accuracy
    if len(evals) > 0:
        print('ground truth answers')
        randomEval = random.choice(evals) #
        print('RandomEval {}'.format(randomEval))
        randomAnn = vqa.loadQA(randomEval) 
        qns, answers = vqa.showQA(randomAnn) 
        print(qns)
        print(answers)
        img_ids = vqa.getImgIds(quesIds=[randomEval])
        print(img_ids)
    
        print '\n'
        print 'generated answer (accuracy %.02f)'%(vqaEval.evalQA[randomEval])
        ann = vqaRes.loadQA(randomEval)[0]
        print "Answer:   %s\n" %(ann['answer'])
    
        #imgId = randomAnn[0]['image_id']
        #imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg'
        #if os.path.isfile(imgDir + imgFilename):
        #    I = io.imread(imgDir + imgFilename)
        #    plt.imshow(I)
        #    plt.axis('off')
        #    plt.show()
    
    # plot accuracy for various question types
    plt.bar(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].values(), align='center')
    plt.xticks(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].keys(), rotation='0',fontsize=10)
    plt.title('Per Question Type Accuracy', fontsize=10)
    plt.xlabel('Question Types', fontsize=10)
    plt.ylabel('Accuracy', fontsize=10)
    plt.show()