示例#1
0
def _test_RCNN2Labels():
    inDir = '/mnt/Ext/data/'
    db.connect()
    #convert to frames and store in a temp directory
    imProd = imc.File2Im()
    bgr    = imc.RGB2BGR()
    #run rcnn on these frames
    prms   = cfg.set_rcnn_prms(trainDataSet='coco', netName='vgg16-coco-rcnn')
    print cfg.get_rcnn_prms(**prms)
    rcnn   = cc.Im2RCNNDet(prms)
    # glue to Jpkg
    jpkg   = RCNN2Labels()
        
    for session in NSVideoSession.fetch_many(query={'excluded_status': 'MANUAL_STEP_1'}, count=3):
        vidName = osp.join(inDir, session.storage_path)
        #tmp directory to store the frames
        tmpDir  = '/tmp/NSBackEnd/alg_output'
        hashStr = hashlib.sha256(vidName).hexdigest()
        hashDir = osp.join(tmpDir, hashStr)
        vid2im = imc.Video2ImNames({'op_dir': hashDir, 'vidName': vidName})
        # run chain
        chain1    = ch.Chainer([vid2im, imProd, bgr, rcnn])
        chain1Out = chain1.produce(vidName)
        chain2    = ch.Chainer([jpkg]) # seperate to obtain aggregated output of chain1
        allDet = []
        while chain1Out:
            allDet.append(chain1Out)
            chain1Out = chain1.produce(vidName)
        chain2Out = chain2.produce(allDet)
        # save output to database
        session.save_annotations(chain2Out, {}, NSExecutorType.ALGORITHM, 'rcnn_' + cfg.get_rcnn_prms()['trainDataSet'], NSProcessStage.AUTO_STEP_1)
        
    os.system('rm -r ' + tmpDir + '/*')
示例#2
0
def _test_mAP_pipeline():
    inDir = '/mnt/Ext/data/'
    db.connect()
    #convert to frames and store in a temp directory
    imProd = imc.File2Im()
    bgr    = imc.RGB2BGR()
    #run rcnn on these frames
    prms   = cfg.set_rcnn_prms(trainDataSet='coco', netName='vgg16-coco-rcnn')
    print cfg.get_rcnn_prms(**prms)
    rcnn   = cc.Im2RCNNDet(prms)
    # glue to Jpkg
    jlbl   = RCNN2Labels()
    fmt    = MetricList2FormatMetricList()
    scoredImDir = []
    sessionQuery={'required_status': 'MANUAL_STEP_1|CHECKED'}
        
    for session in NSVideoSession.fetch_many(query=sessionQuery, count=12):
        seshCheck = [session] # inside mutable object because no nonlocal keyword in python2
        while not seshCheck[0].annotations:
            seshCheck[0] = NSVideoSession.fetch_one(query=sessionQuery) 
        session = seshCheck[0]
        vidName = osp.join(inDir, session.storage_path)
        #tmp directory to store the frames
        tmpDir  = '/tmp/NSBackEnd/alg_output'
        hashStr = hashlib.sha256(vidName).hexdigest()
        hashDir = osp.join(tmpDir, hashStr)
        vid2im  = imc.Video2ImNames({'op_dir': hashDir, 'vidName': vidName})
        # chain 1: vid --> rcnn detections
        chain1    = ch.Chainer([vid2im, imProd, bgr, rcnn])
        chain1Out = chain1.produce(vidName)
        # seperate chains to obtain aggregated output of chain1
        # chain2: detections --> mAP format
        pred      = Labels2MetricList()
        chain2    = ch.Chainer([jlbl, pred]) 
        allDet = []
        while chain1Out:
            allDet.append(chain1Out)
            chain1Out = chain1.produce(vidName)
        chain2Out = chain2.produce(allDet)
        # chain3: annotations to mAP format
        act       = Labels2MetricList()
        chain3    = ch.Chainer([act])
        chain3Out = chain3.produce(session.annotations)
        
        # chain4: mAP calc
        mapget    = mc.Labels2mAP({'object': 'Person', 'occlusion': None})
        chain4    = ch.Chainer([fmt, mapget])
        chain4Out = chain4.produce([chain3Out, chain2Out])
        print 'mAP at {0}:\n{1}'.format(vidName, chain4Out)
        scoredImDir.append((hashDir, chain2Out, chain4Out))

    viser   = vc.VismAP({'outDir': 'successes_failures', 'n': 3})
    viser.produce(scoredImDir)
    os.system('rm -r ' + tmpDir + '/*')
示例#3
0
def _test_Labels2RCNNTrainTxt():
    inDir = '/mnt/Ext/data/'
    prms   = cfg.set_rcnn_prms(trainDataSet='coco', netName='vgg16-coco-rcnn')
    print cfg.get_rcnn_prms(**prms)
    # rcnn   = cc.Im2RCNNDet(prms)
    txtProd = Labels2RCNNTrainTxt(prms)
    db.connect()
    # add count argument if wish not to create text file from all labeled videos
    for session in NSVideoSession.fetch_many(query={'required_status': 'MANUAL_STEP_1|CHECKED'}):
        print txtProd._convert_one(vidName = osp.join(inDir, session.storage_path), 
                                    labels = session.annotations, 
                                    height = session.height, 
                                     width = session.width)
示例#4
0
    def __init__(self, prms={}):
        # define location for output file and image folder
        try:
            self.outFile = prms['outFile']
        except KeyError:
            outFolder = '/mnt/Ext/training_data/rcnn' 
            dateExt = cuu.get_datetime_ext()
            self.outFile = osp.join(outFolder, dateExt, 'rcnn_' + dateExt + '.txt')
            os.system('mkdir -p ' + osp.dirname(self.outFile))
            prms['outFile'] = self.outFile
        try:
            self.imageFolder = prms['imageFolder']
        except KeyError:
            self.imageFolder = osp.join(osp.dirname(self.outFile), 'imageFolder')
            os.system('mkdir -p ' + self.imageFolder)
            prms['imageFolder'] = self.imageFolder

        # define dataset used for pretraining
        self.pretrainDataSet = cfg.get_rcnn_prms(**prms)['trainDataSet']
        
        # match available labels with available classes from the pretraining dataset
        clsNames = cfg.dataset2classnames(self.pretrainDataSet)
        labeledObjs = cfg.get_label_types(category='object')
        self.clsLookup = {}
        for obj in labeledObjs:
            try:
                self.clsLookup[obj] = clsNames.index(cfg.label2dataset(obj, dataset=self.pretrainDataSet))
            except ValueError as e:
                print e
        ch.ChainObject.__init__(self, prms)
示例#5
0
	def __init__(self, prms={}):
		prms = edict(cfg.get_rcnn_prms(**prms))
		ch.ChainObject.__init__(self, prms)
		self.setup_net()
示例#6
0
文件: sweep.py 项目: pulkitag/chainer
import metric_chains as mc
import config as cfg
from ns_backend import db, NSVideoSession

# setup input/output
inDir  = '/mnt/Ext/data/'
outDir = 'output/'
prmsFp = 'input.sweep'
fileDict = yaml.safe_load(open(prmsFp, 'r'))
## Set up chains which only need 1-time setup 
#convert to frames and store in a temp directory
imProd = imc.File2Im()
bgr    = imc.RGB2BGR()
# set up RCNN
prms   = cfg.set_rcnn_prms(**fileDict['input'])
print cfg.get_rcnn_prms(**prms)
rcnn   = cc.Im2RCNNDet(prms)
# glue to Jpkg
jlbl   = jc.RCNN2Labels()
fmt    = jc.MetricList2FormatMetricList()

## Iterate through a sufficiently large sample of videos to make a good estimate
db.connect()
pPred  = []
pAct   = []
files  = []
count  = 0
for session in NSVideoSession.fetch_many(query={'required_status': 'MANUAL_STEP_1'}, count=30):
    if count == 15:
	break
    if not session.annotations:
示例#7
0
 def __init__(self, prms={}):
     prms = cfg.get_rcnn_prms(**prms)
     ch.ChainObject.__init__(self, prms)
     self.setup_net()