def runValTest(args): #Val set's split -- test print('Running Val Test') config = Attention_LapConfig(load=True, args=args) valTestReader = TestProcessor(qnFile=config.valTestQns, imgFile=config.valImgFile, config=config) #valTestReader = TrainProcessors(config.testAnnotFile, # config.rawQnValTestFile, # config.valImgFile, # config, # is_training=False) if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.loadTrainedModel(config.restoreQnImAttModel, config.restoreQnImAttModelPath) model.runTest(valTestReader, 'testResFile.json') model.destruct() valTestReader.destruct()
def runtrain(args): #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) trainReader = AttModelInputProcessor(config.trainAnnotFile, config.rawQnTrain, config.trainImgFile, config, is_training=True) valReader = AttModelInputProcessor(config.valAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.construct() model.train(trainReader, valReader, config.logFile) model.destruct() trainReader.destruct() valReader.destruct() return config
def visQnImgAtt(): print('Running qn Visuals') config = Attention_LapConfig(load=True, args=args) reader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) #reader = AttModelInputProcessor(config.trainAnnotFile, # config.rawQnTrain, # config.trainImgFile, # config, # is_training=False) model = QnAttentionModel(config) saveData = True model.loadTrainedModel(config.restoreQuAttSigmoidModel, config.restoreQuAttSigmoidModelPath) #model.loadTrainedModel(config.restoreQnImAttModel, # config.restoreQnImAttModelPath) qnAlphas, alphas, img_ids, qns, preds, topk, labs = model.runPredict( reader, config.csvResults, 200, mini=True, chooseBatch=30) model.destruct() out = OutputGenerator(config.valImgPaths) #out = OutputGenerator(config.trainImgPaths) #out.displayQnImgAttention(qnAlphas, alphas, img_ids, qns, preds, topk, labs,saveData) out.displayQnImgAttSaveSplit(qnAlphas, alphas, img_ids, qns, preds, topk, labs,saveData)
def predAnalysis(args): print('Running Val Test') predFile = 'Pred_QnAtt47.9.csv' config = Attention_GPUConfig(load=True, args=args) valTestReader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) model = QnAttentionModel(config) model.loadTrainedModel(config.restoreModel, config.restoreModelPath) model.runPredict(valTestReader, predFile) model.destruct() valTestReader.destruct()
def visQnImgAtt(): print('Running qn Visuals') config = Attention_LapConfig(load=True, args=args) reader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) model = QnAttentionModel(config) model.loadTrainedModel(config.restoreQnImAttModel, config.restoreQnImAttModelPath) qnAlphas, alphas, img_ids, qns, preds, topk = model.runPredict( reader, config.csvResults, 5, mini=True) model.destruct() out = OutputGenerator(config.valImgPaths) out.displayQnImgAttention(qnAlphas, alphas, img_ids, qns, preds, topk)
def runMetricsForInternalTestSet(args, restoreModel, restoreModelPath): print('Running metrics for model: {}'.format(restoreModel)) #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) print('Running Validation Test on Model') valTestReader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) #restoreModel = config.restoreModel #restoreModelPath = config.restoreModelPath if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.loadTrainedModel(restoreModel, restoreModelPath) lab, pred, classToAnsMap = model.runEvaluationMetrics(valTestReader) model.destruct() valTestReader.destruct() #run metrics & get stats listOfStats = runMetrics(lab, pred, classToAnsMap, restoreModelPath) #save to pickle data = {} data['labels'] = lab data['preds'] = pred data['classToAnsMap'] = classToAnsMap dateID = restoreModelPath.split('/')[-2] saveToPickle(data, 'labpreds{}.pkl'.format(dateID)) print('Metrics Completed.') return listOfStats
def validateInternalTestSet(args, model=None, restoreModelPath=None): from vqaTools.vqaInternal import VQA from vqaTools.vqaEval import VQAEval #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) print('Running Validation Test on Model') valTestReader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) if restoreModelPath is None: restoreModel = config.restoreModel restoreModelPath = config.restoreModelPath if model is None: if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.loadTrainedModel(restoreModel, restoreModelPath) predFile = '{}PredsAtt{}.csv'.format(restoreModelPath, args.att) results, strictAcc = model.runPredict(valTestReader, predFile) model.destruct() valTestReader.destruct() print('predictions made') vqa = VQA(config.testAnnotFileUnresolved, config.originalValQns) vqaRes = vqa.loadRes(results, config.originalValQns) vqaEval = VQAEval(vqa, vqaRes, n=2) vqaEval.evaluate() print('Writing to file..') writeToFile(vqaEval, restoreModelPath, vqa, vqaRes, args, strictAcc) print('Internal test complete')