def main(pixThreshold, frameRate, videoStream): expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed saveFreq = 4500 # how often to save data, in frames i, m = imageTools.loadImageAndMask() m, w = imageTools.convertMaskToWeights(m) videoType, displayDiffs = imageTools.getVideoType(videoStream) cap = cv2.VideoCapture(videoStream) print 'Camera resolution is %s x %s' % (str(m.shape[1]), str(m.shape[0])) cap.set(3, m.shape[1]) cap.set(4, m.shape[0]) ret, frame = cap.read() storedFrame = imageTools.grayBlur(frame) pixThreshold = int(np.floor(pixThreshold * storedFrame.shape[0])) print('PixelThreshold is %i') % pixThreshold pixData = np.zeros([saveFreq, len(np.unique(w)) + 1]) i = 0 # a counter for saving chunks of data totalFrames = 0 startTime = datetime.now() oldTime = startTime elapsed = 0 print('Analyzing motion data...') moviedeq = [] while (cap.isOpened()): ret, frame = cap.read() if ret == False: print 'End of Video' break currentFrame = imageTools.grayBlur(frame) moviedeq.append(currentFrame) diff = imageTools.diffImage(storedFrame, currentFrame, pixThreshold, displayDiffs) timeDiff = 1. / frameRate elapsed = elapsed + timeDiff counts = np.bincount(w, weights=diff.ravel()) pixData[i, :] = np.hstack((elapsed, counts)) totalFrames += 1 storedFrame = currentFrame # comment out if nothing is in first frame i += 1 pixData = pixData[:i, :] pixData = pixData[:, 2:] # get rid of timing column and background column file = open(videoStream + ".motion2", 'w') file.write("12/8/2015" + '\015') for x in range(0, 285): #print "x", x for y in range(0, 96): file.write(str(int(pixData[x, :][y])) + '\n') cap.release() cv2.destroyAllWindows() vidInfo = {} return vidInfo
def main(pixThreshold, frameRate, videoStream): expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed saveFreq = 4500 # how often to save data, in frames i, m = imageTools.loadImageAndMask() # convert mask to integer values for bincount weights m, w = imageTools.convertMaskToWeights(m) #print m,w # start camera or open video videoType, displayDiffs = imageTools.getVideoType(videoStream) cap = cv2.VideoCapture(videoStream) # adjust video resolution if necessary (sized to mask) print 'Camera resolution is %s x %s' % (str(m.shape[1]), str(m.shape[0])) cap.set(3, m.shape[1]) cap.set(4, m.shape[0]) # Set Pixel Threshold ret, frame = cap.read() storedFrame = imageTools.grayBlur(frame) pixThreshold = int(np.floor(pixThreshold * storedFrame.shape[0])) print('PixelThreshold is %i') % pixThreshold # Acquire data if saveFreq / frameRate > expDuration: # do shorter of expDuration vs. saveFreq saveFreq = expDuration * frameRate pixData = np.zeros([saveFreq, len(np.unique(w)) + 1]) i = 0 # a counter for saving chunks of data totalFrames = 0 startTime = datetime.now() oldTime = startTime elapsed = 0 print('Analyzing motion data...') moviedeq = [] while (cap.isOpened()): ret, frame = cap.read() if ret == False: print 'End of Video' break currentFrame = imageTools.grayBlur(frame) moviedeq.append(currentFrame) # check if i bigger than saveFreq. If yes, save and reset values ## if i >= saveFreq: # save data ## timeStamp = datetime.now() # make a directory to save stuff in? %m%d%y%H%M ## fname = 'data' + videoStream + timeStamp.strftime('%m%d%y%H%M%S') + '.npy' #fname = 'data' + timeStamp.strftime('%m%d%y%H%M%S') + '.npy' ## np.save(fname,pixData) # reset pixData and i ## pixData = np.zeros([ saveFreq, len(np.unique(w)) + 1]) ## i = 0 # stop experiment if user presses 'q' or if experiment duration is up if (cv2.waitKey(1) & 0xFF == ord('q') or len(sys.argv) == 1 and datetime.now() > startTime + timedelta(seconds=expDuration)): break # record pixel differences in all of the ROIs diff = imageTools.diffImage(storedFrame, currentFrame, pixThreshold, displayDiffs) timeDiff = 1. / frameRate elapsed = elapsed + timeDiff #print elapsed # calculate and record pixel differences counts = np.bincount(w, weights=diff.ravel()) #print counts # output pixData[i, :] = np.hstack((elapsed, counts)) totalFrames += 1 storedFrame = currentFrame # comment out if nothing is in first frame #oldTime = newTime i += 1 # done recording. Remove empty rows (those bigger than i) from PixData pixData = pixData[:i, :] #print np.shape(pixData) #print pixData #testing = calc_mode(moviedeq, np.zeros([660,1088])) #cv2.imwrite('test_img.jpg', testing) # Write data to file with timestamp: # np.save(movieName[:-4] + '.npy', pixData) ##timeStamp = datetime.now() ##fname = 'data' + videoStream + timeStamp.strftime('%m%d%y%H%M%S') + '.npy' #fname = 'data' + timeStamp.strftime('%m%d%y%H%M%S') + '.npy' ##np.save(fname,pixData) pixData = pixData[:, 2:] # get rid of timing column and background column #print np.shape(pixData) #print pixData #pixData = deltaPix[:,1:] # get rid of background column file = open(videoStream + ".motion2", 'w') file.write("12/8/2015" + '\015') for x in range(0, 285): #print "x", x for y in range(0, 96): #print "y", y #print str(int(pixData[x,:][y])) file.write(str(int(pixData[x, :][y])) + '\n') # Save info (elapsed time and framerate) for later use vidInfo = {} #analysisTime = timeStamp - startTime #vidInfo['analysisTime'] = float(str(analysisTime.seconds) + '.' + str(analysisTime.microseconds)) vidInfo['TotalFrames'] = totalFrames #vidInfo['fps'] = int(totalFrames/vidInfo['analysisTime']) vidInfo['pixThreshold'] = pixThreshold vidInfo['CameraResolution'] = '%s x %s' % (str(m.shape[1]), str( m.shape[0])) vidInfo['NamePrefix'] = videoStream #vidInfo['NamePrefix'] = videoStream.split('.')[0] #print ('Analyzed %i frames in %f seconds') % (vidInfo['TotalFrames'],vidInfo['analysisTime']) #print('FrameRate is about %i fps') % vidInfo['fps'] print 'Motion threshold is %i pixels' % int(pixThreshold) print 'Camera resolution is %s' % vidInfo['CameraResolution'] # release camera cap.release() cv2.destroyAllWindows() return vidInfo
def main(pixThreshold,frameRate,videoStream): #elate_loc={(1,1):1, (1,2):2, (1,3):3, (1,4):4, (1:5):5, (1,6):6, (1,7):7, (1,8):8, (1,9):9,(1,10):10,(1,11):11,(1,12):12,(2,1):13,(2,2):14,(2,3):15,(2,4):16,(2:5):17,(2,6):18,(2,7):19,(2,8):20,(2,9):21,(2,10):22,(2,11):23,(2,12):24,(3,1):25,(3,2):26,(3,3):27,(3,4):28,(3,5):29,(3,6):30,(3,7)} rowold={1:0,2:12,3:24,4:36,5:48,6:60,7:72,8:84} row={0:0,1:12,2:24,3:36,4:48,5:60,6:72,7:84} expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed saveFreq = 4500 # how often to save data, in frames i,m = imageTools.loadImageAndMask() e = imageTools.loadModeImage() roimask = np.zeros((660,1088)) (maxxysnp, minxysnp) = max_min() print "maxxysnp: ", maxxysnp print "minxysnp: ", minxysnp maxxs = [] minxs = [] maxys = [] minys = [] for j in range (0, numberofwells*2,2): maxx = maxxysnp[j] maxxs.append(maxxysnp[j]) maxy = maxxysnp[j+1] maxys.append(maxxysnp[j+1]) minx = minxysnp[j] minxs.append(minxysnp[j]) miny = minxysnp[j+1] minys.append(minxysnp[j+1]) roimask[miny:maxy,minx:maxx] = j+1 maxxs.sort() maxys.sort() minxs.sort() minys.sort() #smaxxs = [] #sminxs = [] #smaxys = [] #sminys = [] #rx = 8 #cy = 12 #realmaxx = 0 #realmaxxs = [] #for z in range(0, len(maxxs)): # if z == cy - 1: # realmaxxs.append(realmaxx) # realmaxx = 0 # if maxxs[z] > realmaxx: # realmaxx = maxxs[z] np.set_printoptions(threshold=np.nan) # printing entire array print roimask # print maxx,maxy,minx,miny #print e #cv2.imwrite('testinge.jpg', e) #cv2.imwrite('testingm.jpg', m) #cv2.imwrite('testingi.jpg', i) # convert mask to integer values for bincount weights #print "mask1: ", np.shape(m), m m,w = imageTools.convertMaskToWeights(m) rm,roimaskweights = imageTools.convertMaskToWeights(roimask) #print "mask2: ", np.shape(m), m #print "weights: ", np.shape(w), w unique = np.unique(m) print "unique: ", unique unique2 = np.unique(rm) print "unique2: ", unique2 #print np.shape(m) rminr = set() rminc = set() rmaxr = set() rmaxc = set() for x in unique2: #print "x: ", x #if x == 0: # continue #print np.shape(np.where(m==x)) #print np.where(m==x) rmaxdimc = np.amax(np.where(rm==x)[0]) rmaxc.add(rmaxdimc) #print "max dimc: ", np.amax(np.where(m==x)[0]) rmaxdimr = np.amax(np.where(rm==x)[1]) rmaxr.add(rmaxdimr) #print "max dimr: ", np.amax(np.where(m==x)[1]) rmindimc = np.amin(np.where(rm==x)[0]) rminc.add(rmindimc) #print "min dimc: ", np.amin(np.where(m==x)[0]) rmindimr = np.amin(np.where(rm==x)[1]) rminr.add(rmindimr) #print "min dimr: ", np.amin(np.where(m==x)[1]) rlminx = list(rminr) rlminx.sort() rlmaxx = list(rmaxr) rlmaxx.sort() rlminy = list(rminc) rlminy.sort() rlmaxy = list(rmaxc) rlmaxy.sort() print rlminx, rlmaxx, rlminy, rlmaxy minr = set() minc = set() maxr = set() maxc = set() for x in unique: #print "x: ", x if x == 0: continue #print np.shape(np.where(m==x)) #print np.where(m==x) maxdimc = np.amax(np.where(m==x)[0]) maxc.add(maxdimc) #print "max dimc: ", np.amax(np.where(m==x)[0]) maxdimr = np.amax(np.where(m==x)[1]) maxr.add(maxdimr) #print "max dimr: ", np.amax(np.where(m==x)[1]) mindimc = np.amin(np.where(m==x)[0]) minc.add(mindimc) #print "min dimc: ", np.amin(np.where(m==x)[0]) mindimr = np.amin(np.where(m==x)[1]) minr.add(mindimr) #print "min dimr: ", np.amin(np.where(m==x)[1]) #print minr, maxr, minc, maxc lminx = list(minr) lminx.sort() lmaxx = list(maxr) lmaxx.sort() lminy = list(minc) lminy.sort() lmaxy = list(maxc) lmaxy.sort() print "real mask: ", lminx, lmaxx, lminy, lmaxy #print lminr, lmaxr, lminc, lmaxc #print np.where(m==75) #for x in len(m): #print m[x] #print "mask2: ", np.shape(m), m #print "mask: ", np.nonzero(m) #print m,w # start camera or open video videoType, displayDiffs = imageTools.getVideoType(videoStream) cap = cv2.VideoCapture(videoStream) # adjust video resolution if necessary (sized to mask) print 'Camera resolution is %s x %s' % (str(m.shape[1]),str(m.shape[0])) cap.set(3,m.shape[1]) cap.set(4,m.shape[0]) # Set Pixel Threshold ret,frame = cap.read() storedImage = np.array(e * 255, dtype = np.uint8) # have to convert the float32 to uint8 storedMode = imageTools.Blur(storedImage) storedFrame = imageTools.grayBlur(frame) pixThreshold = int(np.floor( pixThreshold * storedFrame.shape[0] )) print('PixelThreshold is %i') % pixThreshold cenData = np.zeros([ saveFreq, len(np.unique(w))*2 -2]) pixData = np.zeros([ saveFreq, len(np.unique(w)) + 1]) i = 0 # a counter for saving chunks of data totalFrames = 0 startTime = datetime.now() oldTime = startTime elapsed = 0 print('Analyzing motion data...') frame_roi = [] while(cap.isOpened()): #print "frames", totalFrames ret,frame = cap.read() if ret == False: print 'End of Video' break currentFrame = imageTools.grayBlur(frame) currentFrame2 = imageTools.grayBlur(frame) diffpix = imageTools.diffImage(storedFrame,currentFrame2,pixThreshold,displayDiffs) #print np.shape(diffpix) #print diffpix # This is 660x1088 diff = imageTools.trackdiffImage(storedMode,currentFrame,pixThreshold,displayDiffs) diff.dtype = np.uint8 _,contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) MIN_THRESH = 20.0 MIN_THRESH_P = 20.0 #if cv2.contourArea(contours[0]) > MIN_THRESH: # M = cv2.moments(contours[0]) # cX = int(M["m10"] / M["m00"]) # cY = int(M["m01"] / M["m00"]) # print cX,cY roi_dict = {} for r in range(0,numberofwells): roi_dict[r+1] = [] for cs in range(0,len(contours)): if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P: #if cv2.contourArea(contours[cs]) > MIN_THRESH and cv2.arcLength(contours[cs],True) > MIN_THRESH_P: M = cv2.moments(contours[cs]) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) print i, " cX and cY:", cX, cY #print lmaxx #print lmaxy r=1 c=1 for x in range(0,len(lmaxx)): if cX > lmaxx[x]: r=x+2 print "Lx,cX,lmaxx[x],lmin[x] ",x, cX, lmaxx[x], lminx[x] for y in range(0, len(lmaxy)): if cY > lmaxy[y]: c=y+2 print "Ly,cY,maxy[y],lmin[x],r,c ",y, cY, lmaxy[y], lminy[y],r,c area = cv2.contourArea(contours[cs]) perim = cv2.arcLength(contours[cs],True) perim = cv2.arcLength(contours[cs],True) print "L r + c + row[c]: ", r, c, rowold[c]," final well: ", r + rowold[c] if not roi_dict[r+rowold[c]]: # roi_dict[r+row[c]].append((area*perim)) roi_dict[r+rowold[c]].append((area*perim,cX,cY)) #roi_dict[r+rowold[c]].append((area*perim,contours[cs])) else: if roi_dict[r+rowold[c]] < area*perim: roi_dict[r+rowold[c]][0] = (area*perim,cX,cY) print len(maxxs), maxxs, maxys, minxs, minys for x in range(0,len(maxxs)): if cX > maxxs[x]: r=x+1 # maybe DONT ADD TWO? #r=x+2 # maybe DONT ADD TWO? print "x,cX,maxx[x],minx[x],r,c: ",x, cX, maxxs[x], minxs[x],r,c for y in range(0, len(maxys)): if cY > maxys[y]: c=y+1 #c=y+2 print "y,cY,maxy[y].miny[y],r,c", y, cY, maxys[y], minys[y],r,c area = cv2.contourArea(contours[cs]) perim = cv2.arcLength(contours[cs],True) print "r + c + r/8+1 + c/12: ", r, c, r/8+1, c/12, " row[c/12]: ", row[c/12], " final well: ", r/8 + 1 + row[c/12] if not roi_dict[r/8+1+row[c/12]]: # roi_dict[r+row[c]].append((area*perim)) roi_dict[r/8+1+row[c/12]].append((area*perim,cX,cY)) #roi_dict[r+row[c]].append((area*perim,contours[cs])) else: if roi_dict[r/8+1+row[c/12]] < area*perim: roi_dict[r/8+1+row[c/12]][0] = (area*perim,cX,cY) frame_roi.append(roi_dict) timeDiff = 1. / frameRate elapsed = elapsed + timeDiff pixcounts = [] pixcounts = np.bincount(w, weights=diffpix.ravel()) pixData[i,:] = np.hstack((elapsed,pixcounts)) counts = [] keys = roi_dict.keys() keys.sort() for k in keys: x = -10000 y = -10000 if roi_dict[k]: x = roi_dict[k][0][1] y = roi_dict[k][0][2] counts.append(x) counts.append(y) cv2.line(storedImage,(x,y),(x,y),(255,255,255),2) if i == 284: cv2.imwrite('trackedimagewithlines_' + str(i) + ".png", storedImage) cenData[i,:] = np.asarray(counts) totalFrames += 1 storedFrame = currentFrame i += 1 file = open(videoStream + ".centroid2",'w') for x in range(0,285): for y in range(0,192): file.write(str(int(cenData[x,:][y])) + '\n') pixData = pixData[:i,:] pixData = pixData[:,2:] # get rid of timing column and background column file = open(videoStream + ".motion2",'w') file.write("12/8/2015" + '\015') for x in range(0,285): #print "x", x for y in range(0,numberofwells): file.write(str(int(pixData[x,:][y])) + '\n') vidInfo = {} # release camera cap.release() cv2.destroyAllWindows() return vidInfo
def main(pixThreshold, frameRate, videoStream): #elate_loc={(1,1):1, (1,2):2, (1,3):3, (1,4):4, (1:5):5, (1,6):6, (1,7):7, (1,8):8, (1,9):9,(1,10):10,(1,11):11,(1,12):12,(2,1):13,(2,2):14,(2,3):15,(2,4):16,(2:5):17,(2,6):18,(2,7):19,(2,8):20,(2,9):21,(2,10):22,(2,11):23,(2,12):24,(3,1):25,(3,2):26,(3,3):27,(3,4):28,(3,5):29,(3,6):30,(3,7)} col = {1: 0, 2: 12, 3: 24, 4: 36, 5: 48, 6: 60, 7: 72, 8: 84} expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed saveFreq = 4500 # how often to save data, in frames i, m = imageTools.loadImageAndMask() e = imageTools.loadModeImage() #print e #cv2.imwrite('testinge.jpg', e) #cv2.imwrite('testingm.jpg', m) #cv2.imwrite('testingi.jpg', i) # convert mask to integer values for bincount weights np.set_printoptions(threshold=np.nan) # printing entire array #print "mask1: ", np.shape(m), m m, w = imageTools.convertMaskToWeights(m) #print "mask2: ", np.shape(m), m unique = np.unique(m) #print np.shape(m) minr = set() minc = set() maxr = set() maxc = set() for x in unique: #print "x: ", x if x == 0: continue #print np.shape(np.where(m==x)) #print np.where(m==x) maxdimc = np.amax(np.where(m == x)[0]) maxc.add(maxdimc) #print "max dimc: ", np.amax(np.where(m==x)[0]) maxdimr = np.amax(np.where(m == x)[1]) maxr.add(maxdimr) #print "max dimr: ", np.amax(np.where(m==x)[1]) mindimc = np.amin(np.where(m == x)[0]) minc.add(mindimc) #print "min dimc: ", np.amin(np.where(m==x)[0]) mindimr = np.amin(np.where(m == x)[1]) minr.add(mindimr) #print "min dimr: ", np.amin(np.where(m==x)[1]) #print minr, maxr, minc, maxc lminx = list(minr) lminx.sort() lmaxx = list(maxr) lmaxx.sort() lminy = list(minc) lminy.sort() lmaxy = list(maxc) lmaxy.sort() #print lminr, lmaxr, lminc, lmaxc #print np.where(m==75) #for x in len(m): #print m[x] #print "mask2: ", np.shape(m), m #print "mask: ", np.nonzero(m) #print m,w # start camera or open video videoType, displayDiffs = imageTools.getVideoType(videoStream) cap = cv2.VideoCapture(videoStream) # adjust video resolution if necessary (sized to mask) print 'Camera resolution is %s x %s' % (str(m.shape[1]), str(m.shape[0])) cap.set(3, m.shape[1]) cap.set(4, m.shape[0]) # Set Pixel Threshold ret, frame = cap.read() storedImage = np.array(e * 255, dtype=np.uint8) # have to convert the float32 to uint8, hopefully this is doing it correctly (found online) storedFrame = imageTools.Blur(storedImage) #storedFrame = imageTools.grayBlur(frame) pixThreshold = int(np.floor(pixThreshold * storedFrame.shape[0])) print('PixelThreshold is %i') % pixThreshold # Acquire data if saveFreq / frameRate > expDuration: # do shorter of expDuration vs. saveFreq saveFreq = expDuration * frameRate pixData = np.zeros([saveFreq, len(np.unique(w)) * 2 - 2]) i = 0 # a counter for saving chunks of data totalFrames = 0 startTime = datetime.now() oldTime = startTime elapsed = 0 print('Analyzing motion data...') moviedeq = [] frame_roi = [] while (cap.isOpened()): ret, frame = cap.read() if ret == False: print 'End of Video' break currentFrame = imageTools.grayBlur(frame) moviedeq.append(currentFrame) # stop experiment if user presses 'q' or if experiment duration is up if (cv2.waitKey(1) & 0xFF == ord('q') or len(sys.argv) == 1 and datetime.now() > startTime + timedelta(seconds=expDuration)): break diff = imageTools.trackdiffImage(storedFrame, currentFrame, pixThreshold, displayDiffs) diff.dtype = np.uint8 _, contours, hierarchy = cv2.findContours(diff, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) MIN_THRESH = 20.0 MIN_THRESH_P = 20.0 #if cv2.contourArea(contours[0]) > MIN_THRESH: # M = cv2.moments(contours[0]) # cX = int(M["m10"] / M["m00"]) # cY = int(M["m01"] / M["m00"]) # print cX,cY roi_dict = {} for r in range(0, 96): roi_dict[r + 1] = [] for cs in range(0, len(contours)): if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength( contours[cs], True) > MIN_THRESH_P: #if cv2.contourArea(contours[cs]) > MIN_THRESH and cv2.arcLength(contours[cs],True) > MIN_THRESH_P: M = cv2.moments(contours[cs]) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) #print i, " cX and cY:", cX, cY #print lmaxx #print lmaxy r = 1 c = 1 for x in range(0, len(lmaxx)): if cX > lmaxx[x]: r = x + 2 # print x, cX, lmaxx[x], lminx[x] for y in range(0, len(lmaxy)): if cY > lmaxy[y]: c = y + 2 # print y, cY, lmaxy[y], lminy[y] area = cv2.contourArea(contours[cs]) perim = cv2.arcLength(contours[cs], True) if not roi_dict[r + col[c]]: # roi_dict[r+col[c]].append((area*perim)) roi_dict[r + col[c]].append((area * perim, cX, cY)) #roi_dict[r+col[c]].append((area*perim,contours[cs])) else: if roi_dict[r + col[c]] < area * perim: roi_dict[r + col[c]][0] = (area * perim, cX, cY) frame_roi.append(roi_dict) timeDiff = 1. / frameRate elapsed = elapsed + timeDiff counts = [] keys = roi_dict.keys() keys.sort() for k in keys: x = -10000 y = -10000 if roi_dict[k]: x = roi_dict[k][0][1] y = roi_dict[k][0][2] counts.append(x) counts.append(y) cv2.line(storedImage, (x, y), (x, y), (255, 255, 255), 2) cv2.imwrite('withlines' + str(i) + ".png", storedImage) pixData[i, :] = np.asarray(counts) totalFrames += 1 file = open(videoStream + ".centroid2", 'w') for x in range(0, 285): for y in range(0, 192): file.write(str(int(pixData[x, :][y])) + '\n') # Save info (elapsed time and framerate) for later use vidInfo = {} analysisTime = timeStamp - startTime vidInfo['analysisTime'] = float( str(analysisTime.seconds) + '.' + str(analysisTime.microseconds)) vidInfo['TotalFrames'] = totalFrames vidInfo['fps'] = int(totalFrames / vidInfo['analysisTime']) vidInfo['pixThreshold'] = pixThreshold vidInfo['CameraResolution'] = '%s x %s' % (str(m.shape[1]), str( m.shape[0])) vidInfo['NamePrefix'] = videoStream #vidInfo['NamePrefix'] = videoStream.split('.')[0] print('Analyzed %i frames in %f seconds') % (vidInfo['TotalFrames'], vidInfo['analysisTime']) print('FrameRate is about %i fps') % vidInfo['fps'] print 'Motion threshold is %i pixels' % int(pixThreshold) print 'Camera resolution is %s' % vidInfo['CameraResolution'] # release camera cap.release() cv2.destroyAllWindows() return vidInfo
def main(pixThreshold, frameRate, videoStream): expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed saveFreq = 4500 # how often to save data, in frames i, m = imageTools.loadImageAndMask() # convert mask to integer values for bincount weights m, w = imageTools.convertMaskToWeights(m) #print m,w moviedeq = [] i2 = 0 for file in glob.glob("*_4*avi"): if i2 == 15: break #for x in range(0,5): # start camera or open video videoType, displayDiffs = imageTools.getVideoType(videoStream) # print "testing: ", file #print "testing: ", videoStream.split('-')[x] cap = cv2.VideoCapture(file) #cap = cv2.VideoCapture(videoStream.split('-')[x]) # adjust video resolution if necessary (sized to mask) # print 'Camera resolution is %s x %s' % (str(m.shape[1]),str(m.shape[0])) # cap.set(3,m.shape[1]) # cap.set(4,m.shape[0]) # Set Pixel Threshold ret, frame = cap.read() storedFrame = imageTools.grayBlur(frame) #pixThreshold = int(np.floor( pixThreshold * storedFrame.shape[0] )) #print('PixelThreshold is %i') % pixThreshold # Acquire data if saveFreq / frameRate > expDuration: # do shorter of expDuration vs. saveFreq saveFreq = expDuration * frameRate pixData = np.zeros([saveFreq, len(np.unique(w)) + 1]) #i = 0 # a counter for saving chunks of data totalFrames = 0 startTime = datetime.now() oldTime = startTime elapsed = 0 print('Analyzing motion data...') #moviedeq = [] while (cap.isOpened()): ret, frame = cap.read() if ret == False: print 'End of Video' break currentFrame = imageTools.grayBlur(frame) moviedeq.append(currentFrame) # stop experiment if user presses 'q' or if experiment duration is up if (cv2.waitKey(1) & 0xFF == ord('q') or len(sys.argv) == 1 and datetime.now() > startTime + timedelta(seconds=expDuration)): break # record pixel differences in all of the ROIs #diff = imageTools.diffImage(storedFrame,currentFrame,pixThreshold,0) timeDiff = 1. / frameRate elapsed = elapsed + timeDiff # print elapsed # calculate and record pixel differences #counts = np.bincount(w, weights=diff.ravel()) # print counts # output #pixData[i,:] = np.hstack((elapsed,counts)) totalFrames += 1 storedFrame = currentFrame # comment out if nothing is in first frame #oldTime = newTime i2 += 1 # done recording. Remove empty rows (those bigger than i) from PixData #pixData = pixData[:i,:] #print pixData testing = calc_mode(moviedeq, np.zeros([660, 1088])) print "saving mode.png" cv2.imwrite('mode.png', testing) # Save info (elapsed time and framerate) for later use vidInfo = {} # analysisTime = timeStamp - startTime # vidInfo['analysisTime'] = float(str(analysisTime.seconds) + '.' + str(analysisTime.microseconds)) # vidInfo['TotalFrames'] = totalFrames # vidInfo['fps'] = int(totalFrames/vidInfo['analysisTime']) # #vidInfo['pixThreshold']=pixThreshold # vidInfo['CameraResolution']='%s x %s' % (str(m.shape[1]),str(m.shape[0])) #vidInfo['NamePrefix'] = videoStream.split('-')[x] #vidInfo['NamePrefix'] = videoStream.split('.')[0] # print ('Analyzed %i frames in %f seconds') % (vidInfo['TotalFrames'],vidInfo['analysisTime']) # print('FrameRate is about %i fps') % vidInfo['fps'] # print 'Motion threshold is %i pixels' % int(pixThreshold) # print 'Camera resolution is %s' % vidInfo['CameraResolution'] # release camera cap.release() cv2.destroyAllWindows() return vidInfo