def learnModel(data): """ Access the sample information to learn a model. """ print("Learning the model"); # Get the list of training samples samples=os.listdir(data); #samples = ['Seq01.zip', 'Seq03.zip', 'Seq04.zip']; # Hard coded for experiments # Initialize the model #model=[]; model = None yy = [] ff = [] wordIDs = None words = None t1 = time() dataHOG = None fMap = {} fMapS = [] featureVectorNum = 0 fMap = defaultdict(list) print 'Training Set: ', samples if not os.path.exists("Features.mat"): # Access to each sample for file in samples: if not file.endswith(".zip"): continue; print("\t Processing file " + file) # Create the object to access the sample smp=ActionSample(os.path.join(data,file)); # ############################################### # USE Ground Truth information to learn the model # ############################################### # Get the list of actions for this frame actionList=smp.getActions(); # Iterate for each action in this sample proc = {} stdout = {} stderr = {} print actionList sortedActionList = sorted(actionList) noActionList = getNoActionList(sortedActionList) for nal in noActionList: actionList.append([NumActions+1,nal[0],nal[1]]) # continue for action in actionList: # Get the action ID, and start and end frames for the action actionID,startFrame,endFrame=action; print 'Action: ', actionID, '\t', 'startFrame: ', startFrame, '\t', 'endFrame:', endFrame #output = subprocess.check_output('/bin/ps') #print output #print denseTrajectoryExe, os.path.splitext(file)[0], startFrame, endFrame #cmd = [] #cmd.append(denseTrajectoryExe) #seqn = os.path.splitext(file)[0]; #cmd.append('training/train/%s/%s_color.mp4' % (seqn,seqn)) #cmd.append('-S') #cmd.append(str(startFrame)) #cmd.append('-E') #cmd.append(str(endFrame)) #print cmd #proc[actionID] = Popen(cmd, stdout=PIPE, stderr=PIPE) #stdout[actionID], stderr[actionID] = proc[actionID].communicate() #for line in stdout[actionID]: # print line, # NOTE: We use random predictions on this example, therefore, nothing is done with the image. No model is learnt. # Iterate frame by frame to get the information to learn the model seqn = os.path.splitext(file)[0] actionFileName = "output_%s_%d_frame%d-%d.avi" % (seqn,actionID,startFrame,endFrame) actionFileFullName = "%s%s%s%s%s" % (trainingVideos,os.path.sep,seqn, os.path.sep, actionFileName) featureFileName = "densetr_%s_%d_frame%d-%d.txt" % (seqn,actionID,startFrame,endFrame) featureFileFullName = "%s%s%s%s%s" % (denseFeatures,os.path.sep,seqn, os.path.sep, featureFileName) #if not os.path.exists(featureFileFullName): # continue if not os.path.exists(actionFileFullName): print actionFileFullName + ' not present' continue if os.path.exists(featureFileFullName) and os.path.getsize(featureFileFullName) > 0: print featureFileFullName, ' exists' elif endFrame - startFrame + 1 <= 15: print featureFileFullName, 'too small' else: fout = open(featureFileFullName,"w") cmd = [] cmd.append(denseTrajectoryExe) seqn = os.path.splitext(file)[0]; cmd.append(actionFileFullName) print cmd proc[actionID] = Popen(cmd, stdout=PIPE, stderr=PIPE) #proc[actionID].stdout.flush() #proc[actionID].stderr.flush() stdout[actionID], stderr[actionID] = proc[actionID].communicate() fout.write(stdout[actionID]) fout.close() #if not os.path.exists('Features.mat'): fin = open(featureFileFullName,"r") for line in fin: D.read(line) #print 'featureVectorNum: ', featureVectorNum fMap[(actionID, startFrame, endFrame)].append(featureVectorNum) descHOG.append(D.HOG) fMapSTuple = (actionID, startFrame, endFrame, featureVectorNum) fMapS.append(fMapSTuple) featureVectorNum = featureVectorNum + 1 #yy.append(actionID) #ff.append(D.frameNum) #break fin.close() #y = numpy.array(yy) #frameNum = numpy.array(ff) #break # TODO: remove #for numFrame in range(startFrame,endFrame): # Get the image for this frame #image=smp.getRGB(numFrame); #img = cv2.cv.fromarray(image) #print type(img) #print actionName #cv2.imshow(actionName,image) #cv2.waitKey(10) # ############################################### # Remove the sample object del smp; #break # TODO: remove if not os.path.exists('Features.mat'): if descHOG: dataHOG = scipy.vstack(tuple(descHOG)) fMapSr = scipy.vstack(tuple(fMapS)) #scipy.io.savemat('Features.mat', {'frameNum':frameNum,'y':y, 'HOG':dataHOG}, format='5') scipy.io.savemat('Features.mat', {'fMap':fMapSr, 'HOG':dataHOG}, format='5') else: dct = {} print 'Loading pre calculated features' scipy.io.loadmat('Features.mat',dct) #y = dct['y'] dataHOG = dct['HOG'] fMapSr = dct['fMap'] for t in fMapSr: fMap[(t[0],t[1],t[2])].append(t[3]) #frameNum = dct['frameNum'] t2 = time() print 'Dense Trajectory Feature Extraction: %f seconds' % (t2-t1) # Extract words if not os.path.exists("BOWFeatures.mat"): bowTraj.build(dataHOG,None,None,None) wordIDs = bowTraj.bowHOG.pred_labels words = bowTraj.bowHOG.centroids #print wordIDs # nearest centroid for word #print words # centroids #t3 = time() #$print 'BoW build : %f seconds' % (t3-t2) #X = bowTraj.calcFeatures(dataHOG,None,None,None) #t4 = time() scipy.io.savemat('BOWFeatures.mat', {'words':words,'wordIDs':wordIDs}, format='5') else: dct2 = {} dct2 = scipy.io.loadmat('BOWFeatures.mat') wordIDs = dct2['wordIDs'] words = dct2['words'] #centroids print 'words.shape', words.shape print 'wordIDs.shape', wordIDs.shape t3 = time() print 'Quantization into words : %f seconds' % (t3-t2) # Now we create feature vectors print 'Creating feature vectors' XX = [] yy = [] print 'Training Set: ', samples for file in samples: if not file.endswith(".zip"): continue; print("\t Processing file " + file) # Create the object to access the sample smp=ActionSample(os.path.join(data,file)); # Get the list of actions for this frame actionList=smp.getActions(); noActionList = getNoActionList(sorted(actionList)) for nal in noActionList: actionList.append([NumActions+1,nal[0],nal[1]]) cnt = 0 for action in actionList: cnt = cnt + 1 # Get the action ID, and start and end frames for the action actionID,startFrame,endFrame=action; print 'PASS 2: ', cnt, ' : ', 'Action: ', actionID, '\t', 'startFrame: ', startFrame, '\t', 'endFrame:', endFrame h = numpy.zeros(bowTraj.vocszHOG) seqn = os.path.splitext(file)[0] featureFileName = "densetr_%s_%d_frame%d-%d.txt" % (seqn,actionID,startFrame,endFrame) featureFileFullName = "%s%s%s%s%s" % (denseFeatures,os.path.sep,seqn, os.path.sep, featureFileName) if not os.path.exists(featureFileFullName): print featureFileFullName, ' does not exist' yy.append(actionID) XX.append(h) continue htot = 0 if (actionID,startFrame,endFrame) in fMap: # print (actionID,startFrame,endFrame), fMap[(actionID,startFrame,endFrame)] for fID in fMap[(actionID,startFrame,endFrame)]: idx = wordIDs[fID] h[idx] = h[idx] + 1 htot = htot + 1 if htot > 0: h = (1.0 / float(htot)) * h #print h yy.append(actionID) XX.append(h) X = scipy.vstack(tuple(XX)) y = numpy.array(yy) #print X #print y #X = bowTraj.calcFeatures(dataActionHOG,None,None,None) t4 = time() print 'BoW histogram creation for training samples', (t4-t3) # sys.exit(0) # Create chi squared SVM kernel model clf = SVC(kernel=chi2_kernel) clf.fit(X,y) print clf t5 = time() print 'SVM train : %f seconds', (t5-t4) #numpy.savez('model', X=X, y=y, clf=clf) #scipy.io.savemat('model.mat', {'X':X,'y':y,'clf':clf}, format='5') model = clf; # # Return the model return model;
NumActions = 11 data='./data/'; samples=os.listdir(data); # Initialize the model model=[]; #fourcc = cv2.VideoWriter_fourcc(*'XVID') # Access to each sample for file in samples: # Create the object to access the sample print file smp=ActionSample(os.path.join(data,file)); # Get the list of actions for this frame actionList=smp.getActions(); noActionList = getNoActionList(sorted(actionList)) print smp.getNumFrames() seqn=os.path.splitext(file)[0] name='trainingVideos/'+seqn if os.path.exists(name): pass else: os.mkdir(name) for action in noActionList: # Get the action ID, and start and end frames for the action actionID = NumActions + 1 startFrame,endFrame=action; print startFrame,endFrame # fourcc=int(smp.rgb.get(cv2.cv.CV_CAP_PROP_FOURCC)) w=int(smp.rgb.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))