def calc_flow_old(img0, img1, p0): p0 = [(x, y) for x, y in p0.reshape(-1, 2)] h, w = img0.shape[:2] img0_cv = cv.CreateMat(h, w, cv.CV_8U) img1_cv = cv.CreateMat(h, w, cv.CV_8U) np.asarray(img0_cv)[:] = img0 np.asarray(img1_cv)[:] = img1 t = clock() features, status, error = cv.CalcOpticalFlowPyrLK( img0_cv, img1_cv, None, None, p0, lk_params['winSize'], lk_params['maxLevel'], (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 0.03), 0, p0) return np.float32(features), status, error, clock() - t
def compute(playerList, video): videoName = video capture = cv.CaptureFromFile(videoName) count = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) # store the last frame preFrame = cv.CreateImage((width, height), 8, 1) # store the current frame curFrame = cv.CreateImage((width, height), 8, 1) prePyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) curPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1) numOfPlayers = len(playerList) # store players moving distance players = np.zeros(numOfPlayers) # store players position of last frame prePlayers = playerList # store players position of current frame curPlayers = [] img = cv.CreateImage((width, height), 8, 1) #flag of storing player info flagInfo = True for f in xrange(count): frame = cv.QueryFrame(capture) if (flagInfo): cv.CvtColor(frame, img, cv.CV_BGR2GRAY) for i in range(numOfPlayers): font = cv.InitFont(cv.CV_FONT_HERSHEY_SCRIPT_SIMPLEX, 0.4, 0.4, 0, 2, 3) cv.PutText( img, str(i), (int(prePlayers[i][0][0]), int(prePlayers[i][0][1])), font, (255, 255, 255)) cv.SaveImage(playerInfo, img) flagInfo = False #Convert to gray cv.CvtColor(frame, curFrame, cv.CV_BGR2GRAY) #Calculate the movement using the previous and the current frame using the previous points curPlayers, status, err = cv.CalcOpticalFlowPyrLK( preFrame, curFrame, prePyr, curPyr, prePlayers, (10, 10), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) ###temp = frame # add new distance to list for i in range(numOfPlayers): players[i] += getDistance(prePlayers[i], curPlayers[i]) ###cv.Line(temp, (int(prePlayers[i][0]), int(prePlayers[i][1])), (int(curPlayers[i][0]), int(curPlayers[i][1])), (255,122,122),3) ###cv.ShowImage("test", temp) ###cv2.waitKey(20) #Put the current frame preFrame cv.Copy(curFrame, preFrame) prePlayers = curPlayers ###cv2.destroyAllWindows() # print distance i = 0 f = open(recordFile, 'w') for player in players: i += 1 print "player", i, "running distance: ", player, "\n" f.write("player" + str(i) + " running distance: " + str(player) + "meters\n")
# the default parameters quality = 0.01 min_distance = 10 # search the good points features = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations features = cv.FindCornerSubPix( grey, features, (win_size, win_size), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) # calculate the optical flow features, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, features, (win_size, win_size), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) # set back the points we keep features = [p for (st, p) in zip(status, features) if not st] # draw the points as green circles for the_point in features: cv.Circle(image, (int(the_point[0]), int(the_point[1])), 3, (0, 255, 0, 0), -1, 8, 0) # swapping prev_grey, grey = grey, prev_grey prev_pyramid, pyramid = pyramid, prev_pyramid # we can now display the image
cv.Copy(frame, output) if (len(prev_points) <= 10): #Try to get more points #Detect points on the image features = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist) prev_points.extend(features) #Add the new points to list initial.extend(features) #Idem if begin: cv.Copy(gray, prev_gray) #Now we have two frames to compare begin = False #Compute movement curr_points, status, err = cv.CalcOpticalFlowPyrLK( prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) #If points status are ok and distance not negligible keep the point k = 0 for i in range(len(curr_points)): nb = abs(int(prev_points[i][0]) - int(curr_points[i][0])) + abs( int(prev_points[i][1]) - int(curr_points[i][1])) if status[i] and nb > 2: initial[k] = initial[i] curr_points[k] = curr_points[i] k += 1 curr_points = curr_points[:k] initial = initial[:k] #At the end only interesting points are kept
); # Call the Lucas Kanade algorithm # # features_found = [ MAX_CORNERS ]; # feature_errors = [ MAX_CORNERS ]; pyr_sz = (imgA.width + 8, imgB.height / 3); pyrA = cv.CreateImage(pyr_sz, cv.IPL_DEPTH_32F, 1); pyrB = cv.CreateImage(pyr_sz, cv.IPL_DEPTH_32F, 1); cornersB = []; cornersB, features_found, feature_errors = cv.CalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, # corner_count, (win_size, win_size), 5, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0 ); # Now make some image of what we are looking at: # for i in range(100): if (features_found[i] == 0 or feature_errors[i] > 550): # printf("Error is %f/n",feature_errors[i]); continue; print("Got it");
def sd_loop(self): """ The main seizure detector loop - call this function to start the seizure detector. """ self.timeSeries = [] # array of times that data points were collected. self.maxFreq = None if (self.X11): cv.NamedWindow('Seizure_Detector', cv.CV_WINDOW_AUTOSIZE) cv.CreateTrackbar('FeatureTrackbar', 'Seizure_Detector', 0, self.MAX_COUNT, self.onTrackbarChanged) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8) # Intialise the video input source # ('camera' - may be a file or network stream though). #camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp") #camera = cv.CaptureFromFile("../testcards/testcard.mpg") #camera = cv.CaptureFromFile("/home/graham/laura_sample.mpeg") camera = cv.CaptureFromCAM(0) # Set the VideoWriter that produces the output video file. frameSize = (640, 480) videoFormat = cv.FOURCC('p', 'i', 'm', '1') # videoFormat = cv.FOURCC('l','m','p','4') vw = cv.CreateVideoWriter(self.videoOut, videoFormat, self.outputfps, frameSize, 1) if (vw == None): print "ERROR - Failed to create VideoWriter...." # Get the first frame. last_analysis_time = datetime.datetime.now() last_feature_search_time = datetime.datetime.now() last_frame_time = datetime.datetime.now() frame = cv.QueryFrame(camera) print "frame=" print frame # Main loop - repeat forever while 1: # Carry out initialisation, memory allocation etc. if necessary if self.image is None: self.image = cv.CreateImage(cv.GetSize(frame), 8, 3) self.image.origin = frame.origin grey = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1) pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) # self.features = [] # copy the captured frame to our self.image object. cv.Copy(frame, self.image) # create a grey version of the image cv.CvtColor(self.image, grey, cv.CV_BGR2GRAY) # Look for features to track. if self.need_to_init: #cv.ShowImage ('loop_grey',grey) self.initFeatures(grey) self.timeSeries = [] self.maxFreq = None last_analysis_time = datetime.datetime.now() self.need_to_init = False # Now track the features, if we have some. if self.features != []: # we have points to track, so track them and add them to # our time series of positions. self.features, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, self.features, (self.win_size, self.win_size), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), self.flags) self.timeSeries.append((last_frame_time, self.features)) print "Features..." for featNo in range(len(self.features)): if (status[featNo] == 0): self.features[featNo] = (-1, -1) print status[featNo], self.features[featNo] # and plot them. for featNo in range(len(self.features)): pointPos = self.features[featNo] cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])), 3, (0, 255, 0, 0), -1, 8, 0) if (self.alarmActive[featNo] == 2): cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])), 10, (0, 0, 255, 0), 5, 8, 0) if (self.alarmActive[featNo] == 1): cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])), 10, (0, 0, 255, 0), 2, 8, 0) # there will be no maxFreq data until we have # run doAnalysis for the first time. if (not self.maxFreq == None): msg = "%d-%3.1f" % (featNo, self.maxFreq[featNo]) cv.PutText( self.image, msg, (int(pointPos[0] + 5), int(pointPos[1] + 5)), font, (255, 255, 255)) # end of for loop over features else: #print "Oh no, no features to track, and you haven't told me to look for more." # no features, so better look for some more... self.need_to_init = True # Is it time to analyse the captured time series. if ((datetime.datetime.now() - last_analysis_time).total_seconds() > self.Analysis_Period): if (len(self.timeSeries) > 0): self.doAnalysis() self.doAlarmCheck() last_analysis_time = datetime.datetime.now() else: # print "Not doing analysis - no time series data..." a = True # Is it time to re-acquire the features to track. if ((datetime.datetime.now() - last_feature_search_time).total_seconds() > self.Feature_Search_Period): print "resetting..." last_feature_search_time = datetime.datetime.now() self.need_to_init = True # save current data for use next time around. prev_grey, grey = grey, prev_grey prev_pyramid, pyramid = pyramid, prev_pyramid # we can now display the image if (self.X11): cv.ShowImage('Seizure_Detector', self.image) cv.WriteFrame(vw, self.image) # handle events c = cv.WaitKey(10) if c == 27: # user has press the ESC key, so exit break # Control frame rate by pausing if we are going too fast. frameTime = (datetime.datetime.now() - last_frame_time)\ .total_seconds() actFps = 1.0 / frameTime if (frameTime < 1 / self.inputfps): cv.WaitKey(1 + int(1000. * (1. / self.inputfps - frameTime))) # Grab the next frame last_frame_time = datetime.datetime.now() frame = cv.QueryFrame(camera)
# refine the corner locations cv.FindCornerSubPix ( grey, points [1], (win_size, win_size) (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) elif len (points [0]) > 0: # we have points, so display them # calculate the optical flow [points [1], status], something = cv.CalcOpticalFlowPyrLK ( prev_grey, grey, prev_pyramid, pyramid, points [0], len (points [0]), (win_size, win_size), 3, len (points [0]), None, (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.03),flags) # initializations point_counter = -1 new_points = [] for the_point in points [1]: # go trough all the points # increment the counter point_counter += 1 if add_remove_pt: