def _test_RCNN2Labels(): inDir = '/mnt/Ext/data/' db.connect() #convert to frames and store in a temp directory imProd = imc.File2Im() bgr = imc.RGB2BGR() #run rcnn on these frames prms = cfg.set_rcnn_prms(trainDataSet='coco', netName='vgg16-coco-rcnn') print cfg.get_rcnn_prms(**prms) rcnn = cc.Im2RCNNDet(prms) # glue to Jpkg jpkg = RCNN2Labels() for session in NSVideoSession.fetch_many(query={'excluded_status': 'MANUAL_STEP_1'}, count=3): vidName = osp.join(inDir, session.storage_path) #tmp directory to store the frames tmpDir = '/tmp/NSBackEnd/alg_output' hashStr = hashlib.sha256(vidName).hexdigest() hashDir = osp.join(tmpDir, hashStr) vid2im = imc.Video2ImNames({'op_dir': hashDir, 'vidName': vidName}) # run chain chain1 = ch.Chainer([vid2im, imProd, bgr, rcnn]) chain1Out = chain1.produce(vidName) chain2 = ch.Chainer([jpkg]) # seperate to obtain aggregated output of chain1 allDet = [] while chain1Out: allDet.append(chain1Out) chain1Out = chain1.produce(vidName) chain2Out = chain2.produce(allDet) # save output to database session.save_annotations(chain2Out, {}, NSExecutorType.ALGORITHM, 'rcnn_' + cfg.get_rcnn_prms()['trainDataSet'], NSProcessStage.AUTO_STEP_1) os.system('rm -r ' + tmpDir + '/*')
def _test_mAP_pipeline(): inDir = '/mnt/Ext/data/' db.connect() #convert to frames and store in a temp directory imProd = imc.File2Im() bgr = imc.RGB2BGR() #run rcnn on these frames prms = cfg.set_rcnn_prms(trainDataSet='coco', netName='vgg16-coco-rcnn') print cfg.get_rcnn_prms(**prms) rcnn = cc.Im2RCNNDet(prms) # glue to Jpkg jlbl = RCNN2Labels() fmt = MetricList2FormatMetricList() scoredImDir = [] sessionQuery={'required_status': 'MANUAL_STEP_1|CHECKED'} for session in NSVideoSession.fetch_many(query=sessionQuery, count=12): seshCheck = [session] # inside mutable object because no nonlocal keyword in python2 while not seshCheck[0].annotations: seshCheck[0] = NSVideoSession.fetch_one(query=sessionQuery) session = seshCheck[0] vidName = osp.join(inDir, session.storage_path) #tmp directory to store the frames tmpDir = '/tmp/NSBackEnd/alg_output' hashStr = hashlib.sha256(vidName).hexdigest() hashDir = osp.join(tmpDir, hashStr) vid2im = imc.Video2ImNames({'op_dir': hashDir, 'vidName': vidName}) # chain 1: vid --> rcnn detections chain1 = ch.Chainer([vid2im, imProd, bgr, rcnn]) chain1Out = chain1.produce(vidName) # seperate chains to obtain aggregated output of chain1 # chain2: detections --> mAP format pred = Labels2MetricList() chain2 = ch.Chainer([jlbl, pred]) allDet = [] while chain1Out: allDet.append(chain1Out) chain1Out = chain1.produce(vidName) chain2Out = chain2.produce(allDet) # chain3: annotations to mAP format act = Labels2MetricList() chain3 = ch.Chainer([act]) chain3Out = chain3.produce(session.annotations) # chain4: mAP calc mapget = mc.Labels2mAP({'object': 'Person', 'occlusion': None}) chain4 = ch.Chainer([fmt, mapget]) chain4Out = chain4.produce([chain3Out, chain2Out]) print 'mAP at {0}:\n{1}'.format(vidName, chain4Out) scoredImDir.append((hashDir, chain2Out, chain4Out)) viser = vc.VismAP({'outDir': 'successes_failures', 'n': 3}) viser.produce(scoredImDir) os.system('rm -r ' + tmpDir + '/*')
def _test_Labels2RCNNTrainTxt(): inDir = '/mnt/Ext/data/' prms = cfg.set_rcnn_prms(trainDataSet='coco', netName='vgg16-coco-rcnn') print cfg.get_rcnn_prms(**prms) # rcnn = cc.Im2RCNNDet(prms) txtProd = Labels2RCNNTrainTxt(prms) db.connect() # add count argument if wish not to create text file from all labeled videos for session in NSVideoSession.fetch_many(query={'required_status': 'MANUAL_STEP_1|CHECKED'}): print txtProd._convert_one(vidName = osp.join(inDir, session.storage_path), labels = session.annotations, height = session.height, width = session.width)
prmsFp = 'input.sweep' fileDict = yaml.safe_load(open(prmsFp, 'r')) ## Set up chains which only need 1-time setup #convert to frames and store in a temp directory imProd = imc.File2Im() bgr = imc.RGB2BGR() # set up RCNN prms = cfg.set_rcnn_prms(**fileDict['input']) print cfg.get_rcnn_prms(**prms) rcnn = cc.Im2RCNNDet(prms) # glue to Jpkg jlbl = jc.RCNN2Labels() fmt = jc.MetricList2FormatMetricList() ## Iterate through a sufficiently large sample of videos to make a good estimate db.connect() pPred = [] pAct = [] files = [] count = 0 for session in NSVideoSession.fetch_many(query={'required_status': 'MANUAL_STEP_1'}, count=30): if count == 15: break if not session.annotations: continue count += 1 vidName = osp.join(inDir, session.storage_path) #tmp directory to store the frames tmpDir = '/tmp/NSBackEnd/alg_output' hashStr = hashlib.sha256(vidName).hexdigest() hashDir = osp.join(tmpDir, hashStr)