def getFeatures(self, grey): """ Returns a list of features generated by the OpenCV GoodFeaturesToTrack() function in the gray scale image 'gray'. """ #cv.ShowImage ('getFeatures() grey',grey) eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) mask = cv.CreateImage(cv.GetSize(grey), 8, 1) # Create a mask image to hide the top 10% of the image (which contains text) (w, h) = cv.GetSize(grey) cv.Rectangle(mask, (0, 0), (w, h), cv.Scalar(255, 0, 0), -1) cv.Rectangle(mask, (0, 0), (w, int(0.1 * h)), cv.Scalar(0, 0, 0), -1) # cv.ShowImage ('mask',mask) # search for the good points feat = cv.GoodFeaturesToTrack(grey, eig, temp, self.MAX_COUNT, self.quality, self.min_distance, mask, 3, 0, 0.04) print "found %d features (MAX_COUNT=%d)" % (len(feat), self.MAX_COUNT) # refine the corner locations feat = cv.FindCornerSubPix( grey, feat, (self.win_size, self.win_size), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) return (feat)
def GoodFeaturesToTrack(image, max_count=100, quality=0.1, min_distance=1): grey = toGreyScale(image) eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) return cv.GoodFeaturesToTrack(grey, eig, temp, max_count, quality, min_distance, None, 3, 0, 0.04)
def test(self): arr = cv.LoadImage("../samples/c/lena.jpg", 0) original = cv.CloneImage(arr) size = cv.GetSize(arr) eig_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) temp_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) threshes = [x / 100. for x in range(1, 10)] results = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, useHarris=1)) for t in threshes]) # Check that GoodFeaturesToTrack has not modified input image self.assert_(arr.tostring() == original.tostring()) # Check for repeatability for i in range(10): results2 = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, useHarris=1)) for t in threshes]) self.assert_(results == results2) for t0, t1 in zip(threshes, threshes[1:]): r0 = results[t0] r1 = results[t1] # Increasing thresh should make result list shorter self.assert_(len(r0) > len(r1)) # Increasing thresh should monly truncate result list self.assert_(r0[:len(r1)] == r1)
def get_corners(self): eig_image = cv.CreateMat(self.grid.rows, self.grid.cols, cv.CV_32FC1) temp_image = cv.CreateMat(self.grid.rows, self.grid.cols, cv.CV_32FC1) features_x_y_vector = cv.GoodFeaturesToTrack(self.grid, eig_image, temp_image, 10, 0.025, 1.0, useHarris=True) print "get corners finished" return features_x_y_vector
def features(image): image_size = cv.GetSize(image) # to grayscale grayscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(image, grayscale, cv.CV_RGB2GRAY) # equalize cv.EqualizeHist(grayscale, grayscale) # detections features = cv.GoodFeaturesToTrack(grayscale, temp_eigen, temp_image, 10, 0.04, 1.0, useHarris=True) if features: for (x, y) in features: cv.Rectangle(image, (x, y), (x + 4, y + 4), cv.RGB(0, 255, 0), 3, 8, 0)
def getplayers(frame): return cv.GoodFeaturesToTrack(frame, None, None, numOfPlayers, 0.1, 1)
# copy the frame, so we can draw on it cv.Copy(frame, image) # create a grey version of the image cv.CvtColor(image, grey, cv.CV_BGR2GRAY) # create the wanted images eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) # the default parameters quality = 0.01 min_distance = 10 # search the good points features = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations features = cv.FindCornerSubPix( grey, features, (win_size, win_size), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) # calculate the optical flow features, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, features, (win_size, win_size), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) # set back the points we keep features = [p for (st, p) in zip(status, features) if not st] # draw the points as green circles
initial = [] features = [] prev_points = [] curr_points = [] for f in xrange(nbFrames): frame = cv.QueryFrame(capture) cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray cv.Copy(frame, output) if (len(prev_points) <= 10): #Try to get more points #Detect points on the image features = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist) prev_points.extend(features) #Add the new points to list initial.extend(features) #Idem if begin: cv.Copy(gray, prev_gray) #Now we have two frames to compare begin = False #Compute movement curr_points, status, err = cv.CalcOpticalFlowPyrLK( prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) #If points status are ok and distance not negligible keep the point k = 0 for i in range(len(curr_points)):
import cv2.cv as cv import math im = cv.LoadImage("../img/build.png", cv.CV_LOAD_IMAGE_GRAYSCALE) im2 = cv.CloneImage(im) # Goodfeatureto track algorithm eigImage = cv.CreateMat(im.height, im.width, cv.IPL_DEPTH_32F) tempImage = cv.CloneMat(eigImage) cornerCount = 500 quality = 0.01 minDistance = 10 corners = cv.GoodFeaturesToTrack(im, eigImage, tempImage, cornerCount, quality, minDistance) radius = 3 thickness = 2 for (x, y) in corners: cv.Circle(im, (int(x), int(y)), radius, (255, 255, 255), thickness) cv.ShowImage("GoodfeaturesToTrack", im) #SURF algorithm hessthresh = 1500 # 400 500 dsize = 0 # 1 layers = 1 # 3 10 keypoints, descriptors = cv.ExtractSURF(im2, None, cv.CreateMemStorage(), (dsize, hessthresh, 3, layers))
# The first thing we need to do is get the features # we want to track. # eig_image = cv.CreateImage(img_sz, cv.IPL_DEPTH_32F, 1); tmp_image = cv.CreateImage(img_sz, cv.IPL_DEPTH_32F, 1); corner_count = MAX_CORNERS; cornersA = [] # CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ]; # cornersA =cvPointTo32f(MAX_CORNERS) cornersA = cv.GoodFeaturesToTrack( imgA, # image eig_image, # Temporary floating-point 32-bit image tmp_image, # Another temporary image # cornersA,#number of coners to detect corner_count, # number of coners to detect 0.01, # quality level 5.0, # minDistace useHarris=0, ); cornerA = cv.FindCornerSubPix( imgA, cornersA, # corner_count, (win_size, win_size), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03) ); # Call the Lucas Kanade algorithm # # features_found = [ MAX_CORNERS ];
cv.Merge(img_b, img_g, img_r, img_a, rgb_img) """Precorner detect""" corners = cv.CreateMat(float_img.rows, float_img.cols, float_img.type) cv.PreCornerDetect(float_img, corners, 3) """Canny""" edges = cv.CreateImage((img.cols, img.rows), 8, 1) print img.rows, img.cols, edges.height cv.Canny(img, edges, 20.0, 160.0) disp2 = edges """Good features to track""" eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1) temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1) features_x_y_vector = cv.GoodFeaturesToTrack(img, eig_image, temp_image, 10, 0.002, 1.0, useHarris=True) disp3 = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1) cv.Set(disp3, 0) for (x, y) in features_x_y_vector: disp3[y, x] = 255 """Visualization""" tmp = 1.0 * np.asarray(corners) tmp = 255 * (tmp / np.max(tmp)) disp1 = cv.fromarray(tmp.astype(np.uint8)) cv.ShowImage("mi grid2", rgb_img) cv.WaitKey(1000) cv.ShowImage("mi grid", disp1)
max_count = 500 qLevel = 0.01 minDist = 10 prev_points = [] #Points at t-1 curr_points = [] #Points at t lines = [] #To keep all the lines overtime for f in xrange(nbFrames): frame = cv.QueryFrame(capture) #Take a frame of the video cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) #Convert to gray output = cv.CloneImage(frame) prev_points = cv.GoodFeaturesToTrack(gray, None, None, max_count, qLevel, minDist) #Find points on the image #Calculate the movement using the previous and the current frame using the previous points curr_points, status, err = cv.CalcOpticalFlowPyrLK( prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0) #If points status are ok and distance not negligible keep the point k = 0 for i in range(len(curr_points)): nb = abs(int(prev_points[i][0]) - int(curr_points[i][0])) + abs( int(prev_points[i][1]) - int(curr_points[i][1])) if status[i] and nb > 2: prev_points[k] = prev_points[i] curr_points[k] = curr_points[i] k += 1
def get_corners(self): eig_image = cv.CreateMat(self.map_img.height, self.map_img.width, cv.CV_32FC1) temp_image = cv.CreateMat(self.map_img.height, self.map_img.width, cv.CV_32FC1) features_x_y_vector = cv.GoodFeaturesToTrack(self.map_img, eig_image, temp_image, self.map_img.width, 0.15, 10, useHarris=True) print "get corners finished" return features_x_y_vector
#!/usr/bin/python import cv2 import cv2.cv as cv import numpy as np imcolor = cv.LoadImage('field3.jpg ') image = cv.LoadImage('field3.jpg ', cv.CV_LOAD_IMAGE_GRAYSCALE) img = cv2.imread('field3.jpg') img = cv2.GaussianBlur(img, (5, 5), 0) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) eig_image = cv.CreateImage(cv.GetSize(image), 8, 1) temp_image = cv.CreateImage(cv.GetSize(image), 8, 1) cornerMap = cv.CreateMat(image.height, image.width, cv.CV_32FC1) cornerMap = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 4, 0.04, 1, useHarris=True) src = np.array([[114, 56], [885, 292], [0, 292], [751, 74]], np.float32) print src dst = np.array( [[0, 0], [image.width, image.height], [0, image.height], [image.width, 0]], np.float32) print dst retval = cv2.getPerspectiveTransform(src, dst) warp = cv2.warpPerspective(gray, retval, (image.width, image.height)) cv2.imshow('a_window', warp) cv.ShowImage('image', image) cv.WaitKey()