def get_model_api(): config = Attention_LapConfig(load=True) model = ImageAttentionModel(config) model.loadTrainedModel(config.restoreModel, config.restoreModelPath) processor = OnlineProcessor(config.trainImgFile, config) outputGenerator = OutputGenerator('/media/jwong/Transcend/VQADataset/TrainSet/trainImgPaths.txt') def model_api(input_qn, img_path, usePreprocessedImg=False, img_id = 214587): if usePreprocessedImg: #/media/jwong/Transcend/VQADataset/TrainSet/Images_train2014/COCO_train2014_000000214587.jpg #call model predict function alpha, pred = model.solve(input_qn, str(img_id), processor) alp_imgName = outputGenerator.getSingleOutput(alpha, img_id, input_qn, pred) #/media/jwong/Transcend/VQADataset/TrainSet/Images_train2014/COCO_train2014_000000299333.jpg output_data = {"input": input_qn, "ans": pred, "alpha": alp_imgName} return output_data else: #call model predict function alpha, pred = model.solve(input_qn, img_path, processor) print('Prediction made: {}'.format(pred)) alp_imgName = outputGenerator.getSingleOutput(alpha, img_path, input_qn, pred) print('Palpha made: {}'.format(alp_imgName)) output_data = {"input": 'Question: ' + input_qn, "ans": 'Prediction: ' + pred, "alpha": alp_imgName} return output_data #return lambda func return model_api
def runtrain(args): #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) trainReader = AttModelInputProcessor(config.trainAnnotFile, config.rawQnTrain, config.trainImgFile, config, is_training=True) valReader = AttModelInputProcessor(config.valAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.construct() model.train(trainReader, valReader, config.logFile) model.destruct() trainReader.destruct() valReader.destruct() return config
def loadOfficialTest(args, restoreModel=None, restoreModelPath=None): #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) testReader = TestProcessor(qnFile=config.testOfficialDevQns, imgFile=config.testOfficialImgFeatures, config=config) if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) if restoreModel is None: model.loadTrainedModel(config.restoreModel, config.restoreModelPath) else: model.loadTrainedModel(restoreModel, restoreModelPath) if restoreModelPath is None: testOfficialResultFile = config.testOfficialResultFile else: testOfficialResultFile = '{}AttSubmission.json'.format(restoreModelPath) model.runTest(testReader, testOfficialResultFile) testReader.destruct() print('Official test complete') return model
def runValTest(args): #Val set's split -- test print('Running Val Test') config = Attention_LapConfig(load=True, args=args) valTestReader = TestProcessor(qnFile=config.valTestQns, imgFile=config.valImgFile, config=config) #valTestReader = TrainProcessors(config.testAnnotFile, # config.rawQnValTestFile, # config.valImgFile, # config, # is_training=False) if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.loadTrainedModel(config.restoreQnImAttModel, config.restoreQnImAttModelPath) model.runTest(valTestReader, 'testResFile.json') model.destruct() valTestReader.destruct()
def solve(): print('Running solve') config = Attention_LapConfig(load=True, args=args) out = OutputGenerator(config.trainImgPaths) #img_id = raw_input('Img_id--> ') img_id = str(262415) img = Image.open(out.convertIDtoPath(str(img_id))) img.show() qn = raw_input('Question--> ') print(qn) model = ImageAttentionModel(config) model.loadTrainedModel(config.restoreModel, config.restoreModelPath) alpha, pred = model.solve(qn, img_id) out.displaySingleOutput(alpha, img_id, qn, pred) ''' -a otest -r ./results/Att21Mar1334/att21Mar1334.meta -p ./results/Att21Mar1334/
def runVisualise(): print('Running Visuals') config = Attention_LapConfig(load=True, args=args) reader = AttModelInputProcessor(config.trainAnnotFile, config.rawQnTrain, config.trainImgFile, config, is_training=False) model = ImageAttentionModel(config) model.loadTrainedModel(config.restoreModel, config.restoreModelPath) alphas, img_ids, qns, preds = model.runPredict( reader, config.csvResults, 5, mini=True) model.destruct() reader.destruct() out = OutputGenerator(config.trainImgPaths) out.displayOutput(alphas, img_ids, qns, preds)
def runVisualiseVal(): print('Running Visuals') config = Attention_LapConfig(load=True, args=args) reader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) model = ImageAttentionModel(config) model.loadTrainedModel(config.restoreModel, config.restoreModelPath) alphas, img_ids, qns, preds, labels = model.runPredict( reader, config.csvResults, 180, mini=True, chooseBatch=0) model.destruct() reader.destruct() out = OutputGenerator(config.valImgPaths) out.displayEachSample(alphas, img_ids, qns, preds, labels, saveData=True)
def runMetricsForInternalTestSet(args, restoreModel, restoreModelPath): print('Running metrics for model: {}'.format(restoreModel)) #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) print('Running Validation Test on Model') valTestReader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) #restoreModel = config.restoreModel #restoreModelPath = config.restoreModelPath if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.loadTrainedModel(restoreModel, restoreModelPath) lab, pred, classToAnsMap = model.runEvaluationMetrics(valTestReader) model.destruct() valTestReader.destruct() #run metrics & get stats listOfStats = runMetrics(lab, pred, classToAnsMap, restoreModelPath) #save to pickle data = {} data['labels'] = lab data['preds'] = pred data['classToAnsMap'] = classToAnsMap dateID = restoreModelPath.split('/')[-2] saveToPickle(data, 'labpreds{}.pkl'.format(dateID)) print('Metrics Completed.') return listOfStats
def validateInternalTestSet(args, model=None, restoreModelPath=None): from vqaTools.vqaInternal import VQA from vqaTools.vqaEval import VQAEval #config = Attention_LapConfig(load=True, args) config = Attention_GPUConfig(load=True, args=args) print('Running Validation Test on Model') valTestReader = AttModelInputProcessor(config.testAnnotFile, config.rawQnValTestFile, config.valImgFile, config, is_training=False) if restoreModelPath is None: restoreModel = config.restoreModel restoreModelPath = config.restoreModelPath if model is None: if args.att == 'qn': print('Attention over question and image model') model = QnAttentionModel(config) elif args.att == 'im': print('Attention over image model') model = ImageAttentionModel(config) model.loadTrainedModel(restoreModel, restoreModelPath) predFile = '{}PredsAtt{}.csv'.format(restoreModelPath, args.att) results, strictAcc = model.runPredict(valTestReader, predFile) model.destruct() valTestReader.destruct() print('predictions made') vqa = VQA(config.testAnnotFileUnresolved, config.originalValQns) vqaRes = vqa.loadRes(results, config.originalValQns) vqaEval = VQAEval(vqa, vqaRes, n=2) vqaEval.evaluate() print('Writing to file..') writeToFile(vqaEval, restoreModelPath, vqa, vqaRes, args, strictAcc) print('Internal test complete')