def test(self): arr = cv.LoadImage("../samples/c/lena.jpg", 0) original = cv.CloneImage(arr) size = cv.GetSize(arr) eig_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) temp_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) threshes = [ x / 100. for x in range(1,10) ] results = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes]) # Check that GoodFeaturesToTrack has not modified input image self.assert_(arr.tostring() == original.tostring()) # Check for repeatability for i in range(10): results2 = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes]) self.assert_(results == results2) for t0,t1 in zip(threshes, threshes[1:]): r0 = results[t0] r1 = results[t1] # Increasing thresh should make result list shorter self.assert_(len(r0) > len(r1)) # Increasing thresh should monly truncate result list self.assert_(r0[:len(r1)] == r1)
def get_image(): global corners corners = [] im = cv.QueryFrame(camera) im_rgb = cv.GetMat(im) cv.CvtColor(im, im_rgb, cv.CV_BGR2RGB) #getting the rgbimage #worst image grayscaling of all time. #image = cv.CreateImage(cv.GetSize(im),cv.IPL_DEPTH_32F,3) #cv.ConvertScale(im,image) #new_image = cv.CreateImage(cv.GetSize(im),cv.IPL_DEPTH_32F,1) #cv.CvtColor(image,new_image,cv.CV_BGR2GRAY) #gray_image = cv.CreateImage(cv.GetSize(im),cv.IPL_DEPTH_8U,1) #cv.ConvertScale(new_image,gray_image) yuv = cv.CreateImage(cv.GetSize(im), 8, 3) gray_image = cv.CreateImage(cv.GetSize(im), 8, 1) cv.CvtColor(im, yuv, cv.CV_BGR2YCrCb) cv.Split(yuv, gray_image, None, None, None) eig_image = cv.CreateImage(cv.GetSize(gray_image), cv.IPL_DEPTH_32F, 1) temp_image = cv.CreateImage(cv.GetSize(gray_image), cv.IPL_DEPTH_32F, 1) for (x, y) in cv.GoodFeaturesToTrack(gray_image, eig_image, temp_image, 300, 0.01, 1.0, useHarris=True): corners.append([WIDTH - x, y]) return pygame.transform.flip( pygame.image.frombuffer(im_rgb.tostring(), cv.GetSize(im_rgb), "RGB"), True, False)
def getFeatures(self, count=10, quality=None, dist=1.0, use_harris=True): """Get good features to track""" tmp = cv.CreateImage((self.width, self.height), self.depth, self.channels) results = cv.GoodFeaturesToTrack(img, self.frame, tmp, count, quality, dist, use_harris) return (tmp, results)
def _selectTrackingPoints(self,frame): ''' This uses the OpenCV get good features to track to initialize a set of tracking points_b. ''' quality = 0.01 min_distance = 15 w,h = self.tile_size tw = w//self.grid th = h//self.grid for i in range(self.grid): for j in range(self.grid): ul = pv.Point(i*tw,j*th) rect = pv.Rect(i*tw,j*th,tw,th) count = 0 for pt in self.tracks: if rect.containsPoint(pt): count += 1 if count < self.min_points: gray = cv.CreateImage ((tw,th), 8, 1) faceim = cv.GetSubRect(frame, rect.asOpenCV()) cv.Resize(faceim,gray) eig = cv.CreateImage ((tw,th), 32, 1) temp = cv.CreateImage ((tw,th), 32, 1) # search the good points_b points_b = cv.GoodFeaturesToTrack (gray, eig, temp, 2*self.min_points, quality, min_distance, None, 3, 0, 0.04) for pt in points_b: self.tracks.append(ul+pv.Point(pt))
def find_features(self, frame, debug_image): '''Find features in an image.''' # Get Channels hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) grey = libvision.misc.get_channel(hsv, 2) # Feature Detection eigimage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 1) tmpimage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 1) cornercount = 4 qualitylevel = .2 mindistance = 40 blockSize = 7 corners = cv.GoodFeaturesToTrack(grey, eigimage, tmpimage, cornercount, qualitylevel, mindistance, None, blockSize, 0, 0.4) # determine if three corners are in a reasonable orientation if debug_image: for corner in corners: corner_color = (0, 0, 255) cv.Circle(debug_image, (int(corner[0]), int(corner[1])), 15, corner_color, 2, 8, 0) return corners
def detect(self, callback): engine = self.context.modules.engine sz = engine.size image = cv.CreateImageHeader(sz, cv.IPL_DEPTH_8U, 3) cv.SetData(image, engine.get_image_data()) gray_image = cv.CreateImage(engine.size, 8, 1) convert_mode = getattr(cv, 'CV_%s2GRAY' % engine.get_image_mode()) cv.CvtColor(image, gray_image, convert_mode) image = gray_image rows = sz[0] cols = sz[1] eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) if points: for x, y in points: self.context.request.focal_points.append(FocalPoint(x, y, 1)) callback() else: self.next(callback)
def corners(im, max_corners=100, quality=0.1, min_dist=5, block_size=3, use_harris=False, mask=None, k=0.04): eig = new_from(im, depth=cv.IPL_DEPTH_32F, nChannels=1) tmp = new_from(im, depth=cv.IPL_DEPTH_32F, nChannels=1) gray = rgb2gray(im) corners = cv.GoodFeaturesToTrack(gray, eig, tmp, max_corners, quality, min_dist, mask, block_size, use_harris, k) #cv.Scale(eig, eig, 100, 0.00) return corners
def harris(image_gray): eig_image = cv.CreateImage(cv.GetSize(image_gray), cv.IPL_DEPTH_32F, 1) temp_image = cv.CreateImage(cv.GetSize(image_gray), cv.IPL_DEPTH_32F, 1) return cv.GoodFeaturesToTrack(image_gray, eig_image, temp_image, 300, .1, 1.0, useHarris=True) #list of (x,y)
def add_features(self, cv_image, face): """ Look for any new features around the current feature cloud """ """ Create the ROI mask""" roi = cv.CreateImage(cv.GetSize(cv_image), 8, 1) """ Begin with all black pixels """ cv.Zero(roi) """ Get the coordinates and dimensions of the current track box """ try: ((x, y), (w, h), a) = face.track_box except: logger.info("Track box has shrunk to zero...") return """ Expand the track box to look for new features """ w = int(face.expand_roi * w) h = int(face.expand_roi * h) roi_box = ((x, y), (w, h), a) """ Create a filled white ellipse within the track_box to define the ROI. """ cv.EllipseBox(roi, roi_box, cv.CV_RGB(255, 255, 255), cv.CV_FILLED) """ Create the temporary scratchpad images """ eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1) temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1) if self.feature_type == 0: """ Get the new features using Good Features to Track """ features = cv.GoodFeaturesToTrack(self.grey, eig, temp, self.max_count, self.quality, self.good_feature_distance, mask=roi, blockSize=3, useHarris=0, k=0.04) elif self.feature_type == 1: """ Get the new features using SURF """ features = [] (surf_features, descriptors) = cv.ExtractSURF( self.grey, roi, cv.CreateMemStorage(0), (0, self.surf_hessian_quality, 3, 1)) for feature in surf_features: features.append(feature[0]) """ Append new features to the current list if they are not too far from the current cluster """ for new_feature in features: try: distance = self.distance_to_cluster(new_feature, face.features) if distance > self.add_feature_distance: face.features.append(new_feature) except: pass """ Remove duplicate features """ face.features = list(set(face.features))
def detect_gftt(image, equalize=False, cornerCount=500, qualityLevel=0.005, minDistance=30): if equalize: cv.EqualizeHist(image, image) eigImage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) tempImage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) cornerMem = cv.GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, None, 2, False) for point in cornerMem: x, y = int(point[0]), int(point[1]) cv.Circle(image, (x, y), 1, cv.RGB(0, 255, 0), 3, 8, 0) return image
def get_features(self, frame, target_points): arr = cv.CreateImage(frame.size, cv.IPL_DEPTH_8U, 1) cv.SetData(arr, frame.rawdata, frame.size[0]) eig_image = cv.CreateImage(frame.size, cv.IPL_DEPTH_32F, 1) temp_image = cv.CreateImage(frame.size, cv.IPL_DEPTH_32F, 1) pts = cv.GoodFeaturesToTrack(arr, eig_image, temp_image, target_points, self.thresh, 2, use_harris=1) return [(int(x), int(y)) for (x, y) in pts]
def detect_features(self): if opencv_available: image = self.opencv_grey_image() rows = self.image_size[0] cols = self.image_size[1] eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) if points: return points return []
def detect_features(self): cv = _cv() rows, cols = self.size eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) points = cv.GoodFeaturesToTrack(self.image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) return points
def puntosParaTemplate (imagen, template): res_width = imagen.width - template.width + 1; res_height = imagen.height - template.height + 1; resultado = cv.CreateImage( ( res_width, res_height ), cv.IPL_DEPTH_32F, 1 ) cv.MatchTemplate(imagen,template,resultado, cv.CV_TM_SQDIFF) pos = [] eig_image = cv.CreateMat(imagen.rows, imagen.cols, cv.CV_32FC1) temp_image = cv.CreateMat(imagen.rows, imagen.cols, cv.CV_32FC1) for (x,y) in cv.GoodFeaturesToTrack(resultado, eig_image, temp_image, 0, 0.2, template.width, useHarris = True): pos.append((x,y)) pos = sorted(pos) opencv.cvReleaseImage(resultado) opencv.cvReleaseImage(eig_image) opencv.cvReleaseImage(temp_image) return pos
def find_features(img, num_corners=MAX_CORNERS, quality=QUALITY, min_dist=MIN_DISTANCE, mask=None, block_size=BLOCK_SIZE, use_harris=False, harris_param=HARRIS_PARAM): image_size = cv.GetSize(img) # cv.GoodFeaturesToTrack needs two temp images eig_image = cv.CreateImage(image_size, cv.IPL_DEPTH_32F, 1) tmp_image = cv.CreateImage(image_size, cv.IPL_DEPTH_32F, 1) return cv.GoodFeaturesToTrack(img, eig_image, tmp_image, num_corners, quality, min_dist, mask, block_size, 1 if use_harris else 0, harris_param)
def detect(self, image): rows, cols = image.size eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) points = cv.GoodFeaturesToTrack(image.image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) if points: return [[x, y, 1, 1] for x, y in points] return None
def find_corners(frame, pf): # Resize to 640x480 frame_small = cv.CreateMat(480, 640, cv.CV_8UC3) cv.Resize(frame, frame_small) frame_size = cv.GetSize(frame_small) frame_gray = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) cv.CvtColor(frame_small, frame_gray, cv.CV_BGR2GRAY) cv.Canny(frame_gray, edges, 400, 400) cv.Dilate(edges, edges) line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(edges, line_storage, cv.CV_HOUGH_PROBABILISTIC, 1, cv.CV_PI / 180.0, 300, 100, 40) print len(lines), 'lines found' for i in range(len(lines)): line = lines[i] cv.Line(frame_small, line[0], line[1], hv2rgb(360.0 * i / len(lines), 1.0), 3, 8) print line # Generate an observation: (dist, heading) to line if i < 4: p1 = util.pixelToDistance(line[0]) p2 = util.pixelToDistance(line[1]) dist = util.pointLineDistance((0, 0), (p1, p2)) pf.observeLine((dist, 0)) # Find corners eig_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 1) temp_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 1) corners = cv.GoodFeaturesToTrack(frame_gray, eig_image, temp_image, 10, 0.04, 1.0, useHarris=True) # Take 2 strongest corners for pt in corners[:2]: print "good feature at", pt[0], pt[1] cv.Circle(frame_small, pt, 5, cv.CV_RGB(255, 0, 0), 2, 5, 0) cv.ShowImage('frame', frame_small) cv.ShowImage('edges', edges)
def detect_features(backend): cv = backend.get_opencv() image = backend.opencv_grey_image() rows = backend.image_size[0] cols = backend.image_size[1] eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) return points
def add_good_features(source_image, destination_image, MAX_COUNT=50): # GoodFeatures..() can only track 1 channel, use grayscale image_gray = cv.CreateImage(cv.GetSize(source_image), 8, 1) cv.CvtColor(source_image, image_gray, cv.CV_RGB2GRAY) # create temps used by algorithm images eig = cv.CreateImage(cv.GetSize(source_image), cv.IPL_DEPTH_32F, 1) temp = cv.CreateImage(cv.GetSize(source_image), cv.IPL_DEPTH_32F, 1) # the default parameters quality = 0.01 min_distance = 10 # search the good points features = cv.GoodFeaturesToTrack(image_gray, eig, temp, MAX_COUNT, quality, min_distance) for (x, y) in features: cv.Circle(destination_image, (x, y), 3, (0, 255, 0), -1, 8, 0)
def GoodFeaturesToTrack(image, mask): list_gftt = list() weights = list() existence = list() initpoint = 0 eig_image = cv.CreateMat(image.height, image.width, cv.CV_32FC1) temp_image = cv.CreateMat(image.height, image.width, cv.CV_32FC1) gfttar = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 25, 0.01, 5.0, mask, 3, 0, 0.04) gfttar = cv.FindCornerSubPix( image, gfttar, (10, 10), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) for i in range(0, len(gfttar)): weights.append(1) existence.append(1) if len(gfttar) == 0: return None, None, None return gfttar, weights, existence
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) self.test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) cv.Copy(frame, self.test_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) # Adaptive Threshold cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB) # Find Corners temp1 = cv.CreateImage(cv.GetSize(frame), 8, 1) temp2 = cv.CreateImage(cv.GetSize(frame), 8, 1) self.corners = cv.GoodFeaturesToTrack(binary, temp1, temp2, self.max_corners, self.quality_level, self.min_distance, None, self.good_features_blocksize, 0, 0.4) # Display Corners for corner in self.corners: corner_color = (0, 0, 255) text_color = (0, 255, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, .6, .6, 0, 1, 1) cv.Circle(self.debug_frame, (int(corner[0]), int(corner[1])), 15, corner_color, 2, 8, 0) # Find Candidates for confirmed in self.confirmed: confirmed.corner1_repl_check = 0 confirmed.corner2_repl_check = 0 confirmed.corner3_repl_check = 0 confirmed.corner4_repl_check = 0 for corner in self.corners: if math.fabs(confirmed.corner1[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner1[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner1_repl_check = 1 confirmed.corner1_repl = corner elif math.fabs(confirmed.corner2[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner2[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner2_repl_check = 1 confirmed.corner2_repl = corner elif math.fabs(confirmed.corner3[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner3[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner3_repl_check = 1 confirmed.corner3_repl = corner elif math.fabs(confirmed.corner4[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner4[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner4_repl_check = 1 confirmed.corner4_repl = corner if confirmed.corner4_repl_check == 1 and confirmed.corner3_repl_check == 1 and confirmed.corner2_repl_check == 1 and confirmed.corner1_repl_check == 1: confirmed.corner1 = confirmed.corner1_repl confirmed.corner2 = confirmed.corner2_repl confirmed.corner3 = confirmed.corner3_repl confirmed.corner4 = confirmed.corner4_repl confirmed.midx = rect_midpointx(confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4) confirmed.midy = rect_midpointy(confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4) if confirmed.last_seen < self.last_seen_max: confirmed.last_seen += 5 for corner1 in self.corners: for corner2 in self.corners: for corner3 in self.corners: for corner4 in self.corners: # Checks that corners are not the same and are in the proper orientation if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \ corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \ corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \ corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \ corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0]: # Checks that the side ratios are correct if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold or \ math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold: # Checks that angles are roughly 90 degrees angle_cnr_2 = math.fabs(angle_between_lines(line_slope(corner1, corner2), line_slope(corner2, corner4))) if self.angle_min < angle_cnr_2 < self.angle_max: angle_cnr_3 = math.fabs(angle_between_lines(line_slope(corner1, corner3), line_slope(corner3, corner4))) if self.angle_min2 < angle_cnr_3 < self.angle_max2: new_bin = Bin(corner1, corner2, corner3, corner4) self.match_bins(new_bin) self.sort_bins() ''' #START SHAPE PROCESSING #TODO load these ONCE somewhere samples = np.loadtxt('generalsamples.data',np.float32) responses = np.loadtxt('generalresponses.data',np.float32) responses = responses.reshape((responses.size,1)) model = cv2.KNearest() model.train(samples,responses) for bin in self.confirmed: try: bin.speedlimit except: continue transf = cv.CreateMat(3, 3, cv.CV_32FC1) corner_orders = [ [bin.corner1, bin.corner2, bin.corner3, bin.corner4], #0 degrees [bin.corner4, bin.corner3, bin.corner2, bin.corner1], #180 degrees [bin.corner2, bin.corner4, bin.corner1, bin.corner3], #90 degrees [bin.corner3, bin.corner1, bin.corner4, bin.corner2], #270 degrees [bin.corner3, bin.corner4, bin.corner1, bin.corner2], #0 degrees and flipped X [bin.corner2, bin.corner1, bin.corner4, bin.corner3], #180 degrees and flipped X [bin.corner1, bin.corner3, bin.corner2, bin.corner4], #90 degrees and flipped X [bin.corner4, bin.corner2, bin.corner3, bin.corner1]] #270 degrees andf flipped X for i in range(0, 8): cv.GetPerspectiveTransform( corner_orders[i], [(0, 0), (0, 256), (128, 0), (128, 256)], transf ) shape = cv.CreateImage([128, 256], 8, 3) cv.WarpPerspective(frame, shape, transf) shape_thresh = np.zeros((256-104,128,1), np.uint8) j = 104 while j<256: i = 0 while i<128: pixel = cv.Get2D(shape, j, i) if int(pixel[2]) > (int(pixel[1]) + int(pixel[0])) * 0.7: shape_thresh[j-104,i] = 255 else: shape_thresh[j-104,i] = 0 i = i+1 j = j+1 cv2.imshow("Bin " + str(i), shape_thresh) contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) for cnt in contours: if cv2.contourArea(cnt)>50: [x,y,w,h] = cv2.boundingRect(cnt) if h>54 and w>36: roi = thresh[y:y+h,x:x+w] roismall = cv2.resize(roi,(10,10)) roismall = roismall.reshape((1,100)) roismall = np.float32(roismall) retval, results, neigh_resp, dists = model.find_nearest(roismall, k = 1) digit_tuples.append( (x, int((results[0][0]))) ) if len(digit_tuples) == 2: digit_tuples_sorted = sorted(digit_tuples, key=lambda digit_tuple: digit_tuple[0]) speedlimit = 0 for i in range(0, len(digit_tuples_sorted)): speedlimit = speedlimit * 10 + digit_tuples_sorted[i][1] bin.speedlimit = speedlimit print "Found speed limit: " + str(speedlimit) break else: print "Unable to determine speed limit" #... TODO more #END SHAPE PROCESSING ''' svr.debug("Bins", self.debug_frame) svr.debug("Bins2", self.test_frame) # Output bins self.output.bins = self.confirmed anglesum = 0 for bins in self.output.bins: bins.theta = (bins.midx - frame.width / 2) * 37 / (frame.width / 2) bins.phi = -1 * (bins.midy - frame.height / 2) * 36 / (frame.height / 2) bins.shape = bins.object anglesum += bins.angle # bins.orientation = bins.angle if len(self.output.bins) > 0: self.output.orientation = anglesum / len(self.output.bins) else: self.output.orientation = None self.return_output()
for j in range(0, (rows - 1), 5): dx = cv.GetReal2D(velx, j, i) dy = cv.GetReal2D(vely, j, i) cv.Line(dst_im1, (i, j), (int(i + dx), int(j + dy)), cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0) cv.NamedWindow("w", cv.CV_WINDOW_AUTOSIZE) cv.ShowImage("w", dst_im1) cv.WaitKey() ##using Lucas Kanade if args.algorithm == 'LK': dst_img = cv.LoadImage(args.im2, cv.CV_LOAD_IMAGE_COLOR) eign_img = cv.CreateImage(cv.GetSize(src_im1), cv.IPL_DEPTH_32F, 1) temp_img = cv.CreateImage(cv.GetSize(src_im1), cv.IPL_DEPTH_32F, 1) features = cv.GoodFeaturesToTrack(src_im1, eign_img, temp_img, 5000, 0.1, 10, None, True) #features = [] #for i in range(1, dst_img.width, 1): # for j in range(1, dst_img.height, 1): # features.append((i,j)) #cornerMap = cv.CreateMat(src_im1.height, src_im1.width, cv.CV_32FC1) #cv.CornerHarris(src_im1,cornerMap,3) #features = [] #for y in range(0, src_im1.height): # for x in range(0, src_im1.width): # harris = cv.Get2D(cornerMap, y, x) # if harris[0] > 10e-6: # features.append((x, y))
import cv import numpy IMAGE="data/fish_ss.png" img = cv.LoadImageM(IMAGE, cv.CV_LOAD_IMAGE_GRAYSCALE) eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1) temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1) for (x,y) in cv.GoodFeaturesToTrack(img, eig_image, temp_image, 15, 0.54, 1.0, useHarris = False): print "good feature at", x,y cv.Circle(img, (int(x),int(y)), 7, cv.RGB(250, 7, 10), 2) cv.ShowImage("foo", img) cv.WaitKey()
def run(self): image = None MAX_COUNT = 500 win_size = (32, 32) line_draw = 2 frame = cv.QueryFrame(self.capture) image = cv.CreateImage(cv.GetSize(frame), 8, 3) image.origin = frame.origin grey = cv.CreateImage(cv.GetSize(frame), 8, 1) edges = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey2 = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey3 = cv.CreateImage(cv.GetSize(frame), 8, 1) pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) points = [] prev_points = [] count = 0 criteria = (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03) while True: frame = cv.QueryFrame(self.capture) # cv.Rectangle( frame, self.last_rect[0], self.last_rect[1], cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) # cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 15, 0) cv.Copy(frame, image) cv.CvtColor(image, grey, cv.CV_BGR2GRAY) if count == 0: eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) quality = 0.01 min_distance = 10 points = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) points = cv.FindCornerSubPix(grey, points, win_size, (-1, -1), criteria) else: flags = 0 points, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, prev_points, win_size, 2, criteria, flags) diff_points = [] for i, j in enumerate(points): print j if not j == prev_points[i]: diff_points.append(j) print 'len %d' % len(diff_points) prev_points == points count = len(points) print count prev_grey = grey prev_pyramid = pyramid prev_points = points if line_draw: cv.Canny(grey, edges, 30, 150, 3) if line_draw == 1: cv.CvtColor(edges, image, cv.CV_GRAY2BGR) elif line_draw > 1: cv.Merge(edges, prev_grey2, prev_grey3, None, image) cv.Copy(prev_grey2, prev_grey3, None) cv.Copy(edges, prev_grey2, None) cv.ShowImage("Target", image) # Listen for ESC key c = cv.WaitKey(7) % 0x100 if c == 27: break
def track_lk(self, cv_image, face): feature_box = None """ Initialize intermediate images if necessary """ if not face.pyramid: face.grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1) face.prev_grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1) face.pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1) face.prev_pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1) face.features = [] """ Create a grey version of the image """ cv.CvtColor(cv_image, face.grey, cv.CV_BGR2GRAY) """ Equalize the histogram to reduce lighting effects """ cv.EqualizeHist(face.grey, face.grey) if face.track_box and face.features != []: """ We have feature points, so track and display them """ """ Calculate the optical flow """ face.features, status, track_error = cv.CalcOpticalFlowPyrLK( face.prev_grey, face.grey, face.prev_pyramid, face.pyramid, face.features, (self.win_size, self.win_size), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.01), self.flags) """ Keep only high status points """ face.features = [p for (st, p) in zip(status, face.features) if st] elif face.track_box and self.is_rect_nonzero(face.track_box): """ Get the initial features to track """ """ Create a mask image to be used to select the tracked points """ mask = cv.CreateImage(cv.GetSize(cv_image), 8, 1) """ Begin with all black pixels """ cv.Zero(mask) """ Get the coordinates and dimensions of the track box """ try: x, y, w, h = face.track_box except: return None if self.auto_face_tracking: # """ For faces, the detect box tends to extend beyond the actual object so shrink it slightly """ # x = int(0.97 * x) # y = int(0.97 * y) # w = int(1 * w) # h = int(1 * h) """ Get the center of the track box (type CvRect) so we can create the equivalent CvBox2D (rotated rectangle) required by EllipseBox below. """ center_x = int(x + w / 2) center_y = int(y + h / 2) roi_box = ((center_x, center_y), (w, h), 0) """ Create a filled white ellipse within the track_box to define the ROI. """ cv.EllipseBox(mask, roi_box, cv.CV_RGB(255, 255, 255), cv.CV_FILLED) else: """ For manually selected regions, just use a rectangle """ pt1 = (x, y) pt2 = (x + w, y + h) cv.Rectangle(mask, pt1, pt2, cv.CV_RGB(255, 255, 255), cv.CV_FILLED) """ Create the temporary scratchpad images """ eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1) temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1) if self.feature_type == 0: """ Find keypoints to track using Good Features to Track """ face.features = cv.GoodFeaturesToTrack( face.grey, eig, temp, self.max_count, self.quality, self.good_feature_distance, mask=mask, blockSize=self.block_size, useHarris=self.use_harris, k=0.04) elif self.feature_type == 1: """ Get the new features using SURF """ (surf_features, descriptors) = cv.ExtractSURF( face.grey, mask, cv.CreateMemStorage(0), (0, self.surf_hessian_quality, 3, 1)) for feature in surf_features: face.features.append(feature[0]) # if self.auto_min_features: """ Since the detect box is larger than the actual face or desired patch, shrink the number of features by 10% """ face.min_features = int(len(face.features) * 0.9) face.abs_min_features = int(0.5 * face.min_features) """ Swapping the images """ face.prev_grey, face.grey = face.grey, face.prev_grey face.prev_pyramid, face.pyramid = face.pyramid, face.prev_pyramid """ If we have some features... """ if len(face.features) > 0: """ The FitEllipse2 function below requires us to convert the feature array into a CvMat matrix """ try: self.feature_matrix = cv.CreateMat(1, len(face.features), cv.CV_32SC2) except: pass """ Draw the points as green circles and add them to the features matrix """ i = 0 for the_point in face.features: if self.show_features: cv.Circle(self.marker_image, (int(the_point[0]), int(the_point[1])), 2, (0, 255, 0, 0), cv.CV_FILLED, 8, 0) try: cv.Set2D(self.feature_matrix, 0, i, (int(the_point[0]), int(the_point[1]))) except: pass i = i + 1 """ Draw the best fit ellipse around the feature points """ if len(face.features) > 6: feature_box = cv.FitEllipse2(self.feature_matrix) else: feature_box = None """ Publish the ROI for the tracked object """ # try: # (roi_center, roi_size, roi_angle) = feature_box # except: # logger.info("Patch box has shrunk to zeros...") # feature_box = None # if feature_box and not self.drag_start and self.is_rect_nonzero(face.track_box): # self.ROI = RegionOfInterest() # self.ROI.x_offset = min(self.image_size[0], max(0, int(roi_center[0] - roi_size[0] / 2))) # self.ROI.y_offset = min(self.image_size[1], max(0, int(roi_center[1] - roi_size[1] / 2))) # self.ROI.width = min(self.image_size[0], int(roi_size[0])) # self.ROI.height = min(self.image_size[1], int(roi_size[1])) # self.pubROI.publish(self.ROI) if feature_box is not None and len(face.features) > 0: return feature_box else: return None
# create the wanted images import cv image=cv.LoadImage('picture.png', cv.CV_LOAD_IMAGE_COLOR) grey=cv.CreateImage((100,100),8,1) eig = cv.CreateImage (cv.GetSize (grey), 32, 1) temp = cv.CreateImage (cv.GetSize (grey), 32, 1) # the default parameters quality = 0.01 min_distance = 10 # search the good points features = cv.GoodFeaturesToTrack ( grey, eig, temp, 1000, quality, min_distance, None, 3, 0, 0.04) for (x,y) in features: x) + ',' + str(y) cv.Circle (image, (int(x), int(y)), 3, (0, 255, 0), -1, 8, 0) cv.ResetImageROI(image) W,H=cv.GetSize(image) w,h=cv.GetSize(template) width=W-w+1 height=H-h+1 result=cv.CreateImage((width,height),32,1) cv.MatchTemplate(frame,template, result,cv.CV_TM_SQDIFF) (min_x,max_y,minloc,maxloc)=cv.MinMaxLoc(result) (x,y)=minloc cv.Rectangle(image2,(int(x),int(y)),(int(x)+w,int(y)+h),(255,255,255),1,0)
#hardcoded optimizable params, might be implemented with a slider afterwards quality = 0.1 #cvGoodFeaturesTrack Quality factor corner_count = 50 #Maximum corners to find with GoodFeaturesToTrack min_distance = 10 #minimum distance between two corners threshold_limit1_upper = 40 #Threshold for difference image calculation threshold_limit1_lower = 20 fading_factor = 40 #Fading factor for sum image threshold_limit2_lower = 100 #Threshold for sum image calculation threshold_limit2_upper = 255 skip = 10 #Good Feature Skipper param1 = 1.5 #Rotation parameter #Primary initialization cv.CvtColor(img, gray_image, cv.CV_RGB2GRAY) cv.Copy(gray_image, prev_image) corners = cv.GoodFeaturesToTrack(gray_image, eigen_image, temp_image, cornerCount = corner_count, qualityLevel = quality, minDistance = min_distance) #Good features to track #Initializing GoodFeatureToTrack execution skip counter counter = 0 #misc initialization flag = False #GoodFeatureToTrack execution flag while True: #Main loop #Acquiring the image img = cv.QueryFrame(capture) #Showing image if flag_true_image: cv.ShowImage(window1, gray_image) else: cv.ShowImage(window1, render_image)
cv.NamedWindow("Good Features to Track", 1) while True: frame = cv.QueryFrame(capture) frame_size = cv.GetSize(frame) #print frame_size[1], frame_size[0] eig_image = cv.CreateMat(frame_size[1], frame_size[0], cv.CV_32FC1) temp_image = cv.CreateMat(frame_size[1], frame_size[0], cv.CV_32FC1) grayframe = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, grayframe, cv.CV_RGB2GRAY) if frame: for (x, y) in cv.GoodFeaturesToTrack(grayframe, eig_image, temp_image, 20, 0.08, 1.0, blockSize=6, useHarris=True): #print "good feature at", x,y #Circle(img, center, radius, color, thickness=1, lineType=8, shift=0) cv.Circle(frame, (x, y), 6, (255, 0, 0), 1, cv.CV_AA, 0) cv.ShowImage("Good Features to Track", frame) if cv.WaitKey(10) != -1: break cv.DestroyWindow("Laplacian") """ GoodFeaturesToTrack(image, eigImage, tempImage, cornerCount, qualityLevel, minDistance, mask=NULL, blockSize=3, useHarris=0, k=0.04) corners
import cv filename = "burke94.jpg" # replace w/ your filename grayImage = cv.LoadImage(filename, 2) eigImage = cv.CreateImage(cv.GetSize(grayImage), cv.IPL_DEPTH_32F, 1) tempImage = cv.CreateImage(cv.GetSize(grayImage), cv.IPL_DEPTH_32F, 1) cornerMem = [] cornerCount = 300 qualityLevel = 0.1 minDistance = 5 cornerMem = cv.GoodFeaturesToTrack(grayImage, eigImage, tempImage, cornerCount, qualityLevel, minDistance, None, 3, False) print len(cornerMem), " corners found" print cornerMem for point in cornerMem: center = int(point[0]), int(point[1]) cv.Circle(colorImage, (center), 2, (0, 255, 255)) cv.SaveImage("savedcolor.jpg", colorImage)
goldencorners = [[], [], [], []] for (xt, yt) in goldencorners2d: x = -xt / sqrt(2) y = yt z = d - xt / sqrt(2) goldencorners[0].append((x, y, z)) goldencorners[1].append((-x, y, z)) goldencorners[2].append((-x, -y, z)) goldencorners[3].append((x, -y, z)) goodcorners = [[], [], [], []] for (x, y) in cv.GoodFeaturesToTrack(img_gs, eig_image, temp_image, 50, 0.04, 3.0, blockSize=5): ptype = GetCornerType(x, y, img_gs) if (ptype[0] in [0, 1, 2, 3]): goodcorners[ptype[0]].append((x, y, ptype[1])) goodcorners[0].sort(key=lambda point: point[0], reverse=True) goodcorners[1].sort(key=lambda point: point[0]) goodcorners[2].sort(key=lambda point: point[0]) goodcorners[3].sort(key=lambda point: point[0], reverse=True) for typ in range(4): for n in range(len(goodcorners[typ])): (x, y, t1) = goodcorners[typ][n]