def compute_flow_opencv(alpha, iterations, ifile1, ifile2): import cv i1 = cv.LoadImageM(os.path.join("flow", ifile1), iscolor=False) i2 = cv.LoadImageM(os.path.join("flow", ifile2), iscolor=False) u = cv.CreateMat(i1.rows, i1.cols, cv.CV_32F) cv.SetZero(u) v = cv.CreateMat(i1.rows, i1.cols, cv.CV_32F) cv.SetZero(v) l = 1.0/(alpha**2) cv.CalcOpticalFlowHS(i1, i2, 0, u, v, l, (cv.CV_TERMCRIT_ITER, iterations, 0)) # return blitz arrays return numpy.array(u, 'float64'), numpy.array(v, 'float64')
def horn_schunck(self): vid = cv.CaptureFromFile(self.path) term_crit = (cv.CV_TERMCRIT_ITER, 100, 0) first_frame = cv_compat.get_gray_frame(vid) velx = cv.CreateImage(cv.GetSize(first_frame), cv.IPL_DEPTH_32F, 1) vely = cv.CreateImage(cv.GetSize(first_frame), cv.IPL_DEPTH_32F, 1) for prev_frame, curr_frame, curr_frame_color in self._iter_frames(vid): cv.CalcOpticalFlowHS(prev_frame, curr_frame, False, velx, vely, 0.001, term_crit) flow = np.dstack( (np.asarray(cv.GetMat(velx)), np.asarray(cv.GetMat(vely)))) yield Flow(flow, curr_frame, prev_frame, curr_frame_color)
##using horn schunk if args.algorithm == 'HS': dst_im1 = cv.LoadImage(args.im2, cv.CV_LOAD_IMAGE_COLOR) dst_im2 = dst_im1 #size is tuple type cols = src_im1.width rows = src_im1.height velx = cv.CreateMat(rows, cols, cv.CV_32FC1) vely = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.SetZero(velx) cv.SetZero(vely) cv.CalcOpticalFlowHS(src_im1, src_im2, 0, velx, vely, 100.0, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 64, 0.01)) #cv.CalcOpticalFlowLK(src_im1, src_im2, (10,10), velx, vely) print velx print vely for i in range(0, (cols - 1), 5): for j in range(0, (rows - 1), 5): dx = cv.GetReal2D(velx, j, i) dy = cv.GetReal2D(vely, j, i) cv.Line(dst_im1, (i, j), (int(i + dx), int(j + dy)), cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0) cv.NamedWindow("w", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("w", dst_im1) cv.WaitKey()
# desImageHS = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) # desImageLK = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR) desImageHS = cv.LoadImage('./A/8.0/shuibo_9.jpg', cv.CV_LOAD_IMAGE_COLOR) desImageLK = cv.LoadImage('./A/8.0/shuibo_9.jpg', cv.CV_LOAD_IMAGE_COLOR) cols = inputImageFirst.width rows = inputImageFirst.height velx = cv.CreateMat(rows, cols, cv.CV_32FC1) vely = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.SetZero(velx) cv.SetZero(vely) cv.CalcOpticalFlowHS(inputImageFirst, inputImageSecond, False, velx, vely, 100.0, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 64, 0.01)) f = open('./A/8.0/shuibo_8_HS(x1,y1,x1,y2).txt', 'w') count = 0 for i in range(0, cols, FLOWSKIP): for j in range(0, rows, FLOWSKIP): dx = int(cv.GetReal2D(velx, j, i)) dy = int(cv.GetReal2D(vely, j, i)) cv.Line(desImageHS, (i, j), (i + dx, j + dy), (0, 0, 255), 1, cv.CV_AA, 0) f.writelines( [str(i), ' ', str(j), ' ', str(i + dx), ' ', str(j + dy), '\n']) # count+=1
def main(): if len(sys.argv) == 1: print 'Usage: %s [inputfile]' % sys.argv[0] sys.exit(1) # initialize window cv.NamedWindow('video', cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow('video', 10, 10) cv.NamedWindow('threshold', cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow('threshold', 10, 500) cv.NamedWindow('flow', cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow('flow', 500, 10) cv.NamedWindow('edges', cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow('edges', 500, 500) cv.NamedWindow('combined', cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow('combined', 1000, 10) capture = cv.CreateFileCapture(sys.argv[1]) if not capture: print 'Error opening capture' sys.exit(1) # Load bg image bg = cv.LoadImage('bg.png') # Discard some frames for i in xrange(2300): cv.GrabFrame(capture) frame = cv.QueryFrame(capture) frame_size = cv.GetSize(frame) # vars for playback fps = 25 play = True velx = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 1) vely = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 1) combined = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) prev = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) curr = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) frame_sub = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 3) edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) prev_edges = None storage = cv.CreateMemStorage(0) blob_mask = cv0.cvCreateImage(frame_size, cv.IPL_DEPTH_8U, 1) cv0.cvSet(blob_mask, 1) hough_in = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) hough_storage = cv.CreateMat(100, 1, cv.CV_32FC3) ''' cv.CvtColor(frame, prev, cv.CV_BGR2GRAY) frame = cv.QueryFrame(capture) cv.CvtColor(frame, curr, cv.CV_BGR2GRAY) # winSize can't have even numbers cv.CalcOpticalFlowLK(prev, curr, (3,3), velx, vely) cv.ShowImage('video', frame) cv.ShowImage('flow', velx) cv.WaitKey(0) ''' while True: if play: frame = cv.QueryFrame(capture) cv.Sub(frame, bg, frame_sub) '''#detect people found = list(cv.HOGDetectMultiScale(frame, storage, win_stride=(8,8), padding=(32,32), scale=1.05, group_threshold=2)) for r in found: (rx, ry), (rw, rh) = r tl = (rx + int(rw*0.1), ry + int(rh*0.07)) br = (rx + int(rw*0.9), ry + int(rh*0.87)) cv.Rectangle(frame, tl, br, (0, 255, 0), 3) ''' #color thresholding hsv = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) mask = cv.CreateMat(frame_size[1], frame_size[0], cv.CV_8UC1) cv.InRangeS(hsv, (0.06 * 256, 0.2 * 256, 0.6 * 256, 0), (0.16 * 256, 1.0 * 256, 1.0 * 256, 0), mask) cv.ShowImage('threshold', mask) #optical flow method # store previous frame prev, curr = curr, prev # convert next frame to single channel grayscale cv.CvtColor(frame_sub, curr, cv.CV_BGR2GRAY) #cv.CalcOpticalFlowLK(prev, curr, (3,3), velx, vely) #cv.Threshold(velx, velx, 8.0, 0, cv.CV_THRESH_TOZERO) cv.CalcOpticalFlowHS(prev, curr, 1, velx, vely, 0.5, (cv.CV_TERMCRIT_ITER, 10, 0)) cv.Threshold(velx, velx, 0.5, 0, cv.CV_THRESH_TOZERO) cv.Threshold(vely, vely, 0.5, 0, cv.CV_THRESH_TOZERO) cv.Erode( vely, vely, cv.CreateStructuringElementEx(2, 2, 0, 0, cv.CV_SHAPE_ELLIPSE)) cv.Add(vely, velx, vely) cv.ShowImage('flow', vely) #edge detection cv.Canny(curr, edges, 50, 100) cv.Dilate( edges, edges, cv.CreateStructuringElementEx(7, 7, 0, 0, cv.CV_SHAPE_ELLIPSE)) cv.ShowImage('edges', edges) if prev_edges: cv.CalcOpticalFlowHS(prev_edges, edges, 1, velx, vely, 0.5, (cv.CV_TERMCRIT_ITER, 10, 0)) cv.Threshold(velx, velx, 0.5, 0, cv.CV_THRESH_TOZERO) cv.Threshold(vely, vely, 0.5, 0, cv.CV_THRESH_TOZERO) cv.ShowImage('flow', vely) prev_edges = edges cv.Threshold(vely, combined, 0.5, 255, cv.CV_THRESH_BINARY) cv.Min(combined, edges, combined) cv.ShowImage('combined', combined) # blobs myblobs = CBlobResult(edges, blob_mask, 100, False) myblobs.filter_blobs(10, 10000) blob_count = myblobs.GetNumBlobs() for i in range(blob_count): my_enumerated_blob = myblobs.GetBlob(i) # print "%d: Area = %d" % (i, my_enumerated_blob.Area()) my_enumerated_blob.FillBlob(frame, hsv2rgb(i * 180.0 / blob_count), 0, 0) cv.ShowImage('video', frame) ''' crashes #hough transform on dilated image #http://wiki.elphel.com/index.php? # title=OpenCV_Tennis_balls_recognizing_tutorial&redirect=no cv.Copy(edges, hough_in) cv.Smooth(hough_in, hough_in, cv.CV_GAUSSIAN, 15, 15, 0, 0) cv.HoughCircles(hough_in, hough_storage, cv.CV_HOUGH_GRADIENT, 4, frame_size[1]/10, 100, 40, 0, 0) print hough_storage ''' k = cv.WaitKey(1000 / fps) if k == 27: # ESC key break elif k == 'p': # play/pause play = not play
def calcOpticalFlow(self, curImageGray, method="BlockMatching"): if curImageGray.channels != 1: raise Exception("Only able to process gray-scale images") if self.lastImageGray == None: lastImageGray = curImageGray else: lastImageGray = self.lastImageGray # Create storage for the optical flow storageWidth = self.calcOpticalFlowWidth(lastImageGray.width) storageHeight = self.calcOpticalFlowHeight(lastImageGray.height) if method == "BlockMatching": opticalFlowArrayX = np.ndarray(shape=(storageHeight, storageWidth), dtype=np.float32) opticalFlowArrayY = np.ndarray(shape=(storageHeight, storageWidth), dtype=np.float32) cv.CalcOpticalFlowBM( lastImageGray, curImageGray, (self.opticalFlowBlockWidth, self.opticalFlowBlockHeight), (self.opticalFlowBlockWidth, self.opticalFlowBlockHeight), (self.opticalFlowRangeWidth, self.opticalFlowRangeHeight), 0, cv.fromarray(opticalFlowArrayX), cv.fromarray(opticalFlowArrayY)) elif method == "LucasKanade": largeOpticalFlowArrayX = np.ndarray(shape=(lastImageGray.height, lastImageGray.width), dtype=np.float32) largeOpticalFlowArrayY = np.ndarray(shape=(lastImageGray.height, lastImageGray.width), dtype=np.float32) cv.CalcOpticalFlowLK( lastImageGray, curImageGray, ( 15, 15 ), #( self.opticalFlowBlockWidth, self.opticalFlowBlockHeight ), cv.fromarray(largeOpticalFlowArrayX), cv.fromarray(largeOpticalFlowArrayY)) indexGrid = np.mgrid[0:storageHeight, 0:storageWidth] indexGrid[0] = indexGrid[ 0] * self.opticalFlowBlockHeight + self.opticalFlowBlockHeight / 2 indexGrid[1] = indexGrid[ 1] * self.opticalFlowRangeWidth + self.opticalFlowRangeWidth / 2 opticalFlowArrayX = largeOpticalFlowArrayX[indexGrid[0], indexGrid[1]] opticalFlowArrayY = largeOpticalFlowArrayY[indexGrid[0], indexGrid[1]] elif method == "HornSchunck": largeOpticalFlowArrayX = np.ndarray(shape=(lastImageGray.height, lastImageGray.width), dtype=np.float32) largeOpticalFlowArrayY = np.ndarray(shape=(lastImageGray.height, lastImageGray.width), dtype=np.float32) cv.CalcOpticalFlowHS( lastImageGray, curImageGray, 0, cv.fromarray(largeOpticalFlowArrayX), cv.fromarray(largeOpticalFlowArrayY), 1.0, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01)) indexGrid = np.mgrid[0:storageHeight, 0:storageWidth] indexGrid[0] = indexGrid[ 0] * self.opticalFlowBlockHeight + self.opticalFlowBlockHeight / 2 indexGrid[1] = indexGrid[ 1] * self.opticalFlowRangeWidth + self.opticalFlowRangeWidth / 2 opticalFlowArrayX = largeOpticalFlowArrayX[indexGrid[0], indexGrid[1]] opticalFlowArrayY = largeOpticalFlowArrayY[indexGrid[0], indexGrid[1]] else: raise Exception("Unhandled method") # Save the current image self.lastImageGray = curImageGray return (opticalFlowArrayX, opticalFlowArrayY)