def _improve_corners(self, new_corners): ''' new corners are made only from small rectangles, that means that corners arms are very short, and we should expand them as far as we can @param new_corners: list of corners ''' L = len(new_corners) ncorners = list(new_corners) #print 'improve' crit = (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1) cv.ResetImageROI(self.gray_img) for i, cor in enumerate(new_corners): if cor is not None: #TODO Check efficiency, maybe we should do it once for all corners? ncorners[i] = self._improve_corner(new_corners, i) if ncorners[i] is None and i > 0: #this corner is not valid #previous corner was already improved by wrong data #we have to correct that ncorners[i - 1] = self._improve_corner(new_corners, i - 1) if self.m_d<>0: scale_factor = 1<<self.m_d.scale for cor in ncorners: if cor is not None: cor.scale_up(self.m_d) cor.p = cv.FindCornerSubPix(self.gray_img, [cor.p], (scale_factor+1, scale_factor+1), (0, 0), crit)[0] return ncorners
def detect(image, config): angle = None image_size = cv.GetSize(image) # create grayscale version grayscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY) if config["EqualizeHist"]: cv.EqualizeHist(grayscale, grayscale) pattern_width = 5 pattern_height = 4 found, corners = cv.FindChessboardCorners(grayscale, (pattern_width, pattern_height)) if found: new_corners = cv.FindCornerSubPix( grayscale, corners, (11, 11), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) angle = corners_to_angle(new_corners) #print "angle: ", angle def to_int(t): return (int(t[0]), int(t[1])) #nc = [to_int(corner) for corner in new_corners] #cv.Line( grayscale, nc[0], nc[4], (255,0,0), 2) #cv.Line( grayscale, nc[4], nc[19], (0,255,0),2) #cv.Line( grayscale, nc[19], nc[15], (0,0,255),2) #cv.Line( grayscale, nc[15], nc[0], (255,255,0),2) #cv.ShowImage('Processed', grayscale) return angle
def detect(self, image): corners_x = self.chess_size[0] corners_y = self.chess_size[1] #Here, we'll actually call the openCV detector found, corners = cv.FindChessboardCorners( image, (corners_x, corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found: board_corners = (corners[0], corners[corners_x - 1], corners[(corners_y - 1) * corners_x], corners[len(corners) - 1]) #find the perimeter of the checkerboard perimeter = 0.0 for i in range(len(board_corners)): next = (i + 1) % 4 xdiff = board_corners[i][0] - board_corners[next][0] ydiff = board_corners[i][1] - board_corners[next][1] perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff) #estimate the square size in pixels square_size = perimeter / ((corners_x - 1 + corners_y - 1) * 2) radius = int(square_size * 0.5 + 0.5) corners = cv.FindCornerSubPix( image, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.1)) #uncomment to debug chessboard detection #cv.DrawChessboardCorners(image, (corners_x, corners_y), corners, 1) #cv.NamedWindow("image") #cv.ShowImage("image", image) #cv.WaitKey(600) #we'll also generate the object points if the user has specified spacing object_points = cv.CreateMat(3, corners_x * corners_y, cv.CV_32FC1) for y in range(corners_y): for x in range(corners_x): cv.SetReal2D(object_points, 0, y * corners_x + x, x * self.dim) cv.SetReal2D(object_points, 1, y * corners_x + x, y * self.dim) cv.SetReal2D(object_points, 2, y * corners_x + x, 0.0) #not sure why opencv functions return non opencv compatible datatypes... but they do so we'll convert corners_cv = cv.CreateMat(2, corners_x * corners_y, cv.CV_32FC1) for i in range(corners_x * corners_y): cv.SetReal2D(corners_cv, 0, i, corners[i][0]) cv.SetReal2D(corners_cv, 1, i, corners[i][1]) return (corners_cv, object_points) else: #cv.NamedWindow("image_scaled") #cv.ShowImage("image_scaled", image_scaled) #cv.WaitKey(600) rospy.logwarn("Didn't find checkerboard") return (None, None) return
def detect(self, cvimage): corners_x = self.pattern['corners_x'] corners_y = self.pattern['corners_y'] found, corners = cv.FindChessboardCorners( cvimage, (corners_x, corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found: board_corners = (corners[0], corners[corners_x - 1], corners[(corners_y - 1) * corners_x], corners[len(corners) - 1]) #find the perimeter of the checkerboard perimeter = 0.0 for i in range(len(board_corners)): next = (i + 1) % 4 xdiff = board_corners[i][0] - board_corners[next][0] ydiff = board_corners[i][1] - board_corners[next][1] perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff) #estimate the square size in pixels square_size = perimeter / ((corners_x - 1 + corners_y - 1) * 2) radius = int(square_size * 0.5 + 0.5) corners = array( cv.FindCornerSubPix( cvimage, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.01)), float64) return corners else: return None
def find_chessboard(img, ncol=5, nrow=4, useSubPix=True): ''' wrapper for the open cv chessboard detection img: color sensor_msgs.Image or cvmat of the image image ''' # convert image msg to cv if (type(img) == sensor_msgs.Image): img = msg2cvmat(img); # create chessboard size tuple chessboardSize = (ncol, nrow); # find corners (success, chessboard) = cv.FindChessboardCorners(img, chessboardSize, flags=cv.CV_CALIB_CB_ADAPTIVE_THRESH + \ cv.CV_CALIB_CB_NORMALIZE_IMAGE + cv.CV_CALIB_CB_FILTER_QUADS) if (success != 0): if (useSubPix): # create grayscale gray = cv.CreateImage((img.cols, img.rows), cv.IPL_DEPTH_8U, 1); cv.CvtColor(img, gray, cv.CV_RGB2GRAY); # compute window (1/2 distance betweeen two corners) c0 = np.array(chessboard[0]); c1 = np.array(chessboard[1]); w = int(np.ceil(np.sqrt(np.dot((c1 - c0), (c1 - c0)))/2.0)); # sub pixel refinement chessboard = cv.FindCornerSubPix(gray, chessboard, (w,w), (-1,-1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.01)) return (success, np.array(chessboard));
def process(self, gui): """Process file, find corners on chessboard and keep points """ gui.setMessage("Analyzing movie... get frame count") #open capture file capture = cv.CaptureFromFile(self.filename) frame = cv.QueryFrame(capture) #count... I know it sucks numframes=1 while frame: frame = cv.QueryFrame(capture) numframes +=1 step = int(numframes/Calibration.NUMPOINT) capture = cv.CaptureFromFile(self.filename) frame = cv.QueryFrame(capture) #grab a frame to get some information self.framesize = (frame.width, frame.height) gray = cv.CreateImage((frame.width,frame.height), 8, 1) points = [] nframe = 0 f=0 cv.NamedWindow("ChessBoard",cv.CV_WINDOW_NORMAL) gui.setMessage("Analyzing movie... find chessCorner for %d frames" % Calibration.NUMPOINT) while frame: f+=1 #find corners state,found = cv.FindChessboardCorners(frame, self.chessSize, flags=cv.CV_CALIB_CB_FILTER_QUADS) if state==1: #affine search cv.CvtColor( frame, gray, cv.CV_BGR2GRAY ) found = cv.FindCornerSubPix(gray, found, (11,11), (-1,-1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 30, 0.1)) #draw corners on image cv.DrawChessboardCorners(frame, self.chessSize, found, state) #compute points for x,y in found: points.append((x,y)) nframe+=1 cv.ResizeWindow("ChessBoard",640,480) cv.ShowImage("ChessBoard",frame) cv.WaitKey(4) frame = cv.QueryFrame(capture) #grab a frame to get some information #skip frames to get only NUMPOINT of point to calculate while f%step != 0 and frame: f+=1 frame = cv.QueryFrame(capture) self.points = points self.nframe = nframe gui.setMessage("Analyze end, %d points found" % len(self.points))
def detect(self, image): #resize the image base on the scaling parameters we've been configured with scaled_width = int(.5 + image.width * self.width_scaling) scaled_height = int(.5 + image.height * self.height_scaling) #in cvMat its row, col so height comes before width image_scaled = cv.CreateMat(scaled_height, scaled_width, cv.GetElemType(image)) cv.Resize(image, image_scaled, cv.CV_INTER_LINEAR) #Here, we'll actually call the openCV detector found, corners = cv.FindChessboardCorners(image_scaled, (self.corners_x, self.corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found: board_corners = self.get_board_corners(corners) #find the perimeter of the checkerboard perimeter = 0.0 for i in range(len(board_corners)): next = (i + 1) % 4 xdiff = board_corners[i][0] - board_corners[next][0] ydiff = board_corners[i][1] - board_corners[next][1] perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff) #estimate the square size in pixels square_size = perimeter / ((self.corners_x - 1 + self.corners_y - 1) * 2) radius = int(square_size * 0.5 + 0.5) corners = cv.FindCornerSubPix(image_scaled, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.1)) if self.display: #uncomment to debug chessboard detection cv.DrawChessboardCorners(image_scaled, (self.corners_x, self.corners_y), corners, 1) cv.NamedWindow("image_scaled") cv.ShowImage("image_scaled", image_scaled) cv.WaitKey(5) object_points = None #we'll also generate the object points if the user has specified spacing if self.spacing_x != None and self.spacing_y != None: object_points = cv.CreateMat(3, self.corners_x * self.corners_y, cv.CV_32FC1) for y in range(self.corners_y): for x in range(self.corners_x): cv.SetReal2D(object_points, 0, y*self.corners_x + x, x * self.spacing_x) cv.SetReal2D(object_points, 1, y*self.corners_x + x, y * self.spacing_y) cv.SetReal2D(object_points, 2, y*self.corners_x + x, 0.0) #not sure why opencv functions return non opencv compatible datatypes... but they do so we'll convert corners_cv = cv.CreateMat(2, self.corners_x * self.corners_y, cv.CV_32FC1) for i in range(self.corners_x * self.corners_y): cv.SetReal2D(corners_cv, 0, i, corners[i][0]) cv.SetReal2D(corners_cv, 1, i, corners[i][1]) return (corners_cv, object_points) else: rospy.logdebug("Didn't find checkerboard") return (None, None)
def downsample_and_detect(self, img): """ Downsample the input image to approximately VGA resolution and detect the calibration target corners in the full-size image. Combines these apparently orthogonal duties as an optimization. Checkerboard detection is too expensive on large images, so it's better to do detection on the smaller display image and scale the corners back up to the correct size. Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)). """ # Scale the input image down to ~VGA size (width, height) = cv.GetSize(img) scale = math.sqrt( (width*height) / (640.*480.) ) if scale > 1.0: scrib = cv.CreateMat(int(height / scale), int(width / scale), cv.GetElemType(img)) cv.Resize(img, scrib) else: scrib = cv.CloneMat(img) # Due to rounding, actual horizontal/vertical scaling may differ slightly x_scale = float(width) / scrib.cols y_scale = float(height) / scrib.rows if self.pattern == Patterns.Chessboard: # Detect checkerboard (ok, downsampled_corners, board) = self.get_corners(scrib, refine = True) # Scale corners back to full size image corners = None if ok: if scale > 1.0: # Refine up-scaled corners in the original full-res image # TODO Does this really make a difference in practice? corners_unrefined = [(c[0]*x_scale, c[1]*y_scale) for c in downsampled_corners] radius = int(math.ceil(scale)) if img.channels == 3: mono = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1) cv.CvtColor(img, mono, cv.CV_BGR2GRAY) else: mono = img corners = cv.FindCornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 )) else: corners = downsampled_corners else: # Circle grid detection is fast even on large images (ok, corners, board) = self.get_corners(img) # Scale corners to downsampled image for display downsampled_corners = None if ok: print corners if scale > 1.0: downsampled_corners = [(c[0]/x_scale, c[1]/y_scale) for c in corners] else: downsampled_corners = corners return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
def get_corners(mono, refine=False): (ok, corners) = cv.FindChessboardCorners( mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE) if refine and ok: corners = cv.FindCornerSubPix( mono, corners, (5, 5), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) return (ok, corners)
def detect(self, image): #resize the image base on the scaling parameters we've been configured with scaled_width = int(.5 + image.width * self.width_scaling) scaled_height = int(.5 + image.height * self.height_scaling) #in cvMat its row, col so height comes before width image_scaled = cv.CreateMat(scaled_height, scaled_width, cv.GetElemType(image)) cv.Resize(image, image_scaled, cv.CV_INTER_LINEAR) found, corners = cv.FindChessboardCorners( image_scaled, (self.corners_x, self.corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found: rospy.logdebug("Found cb") board_corners = self.get_board_corners(corners) #find the perimeter of the checkerboard perimeter = 0.0 for i in range(len(board_corners)): next = (i + 1) % 4 xdiff = board_corners[i][0] - board_corners[next][0] ydiff = board_corners[i][1] - board_corners[next][1] perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff) #estimate the square size in pixels square_size = perimeter / ( (self.corners_x - 1 + self.corners_y - 1) * 2) radius = int(square_size * 0.5 + 0.5) corners = cv.FindCornerSubPix( image_scaled, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.1)) #cv.DrawChessboardCorners(image_scaled, (self.corners_x, self.corners_y), corners, 1) #cv.NamedWindow("image_scaled") #cv.ShowImage("image_scaled", image_scaled) #cv.WaitKey() object_points = None #we'll also generate the object points if they've been requested if self.spacing_x != None and self.spacing_y != None: object_points = [None] * (self.corners_x * self.corners_y) for i in range(self.corners_y): for j in range(self.corners_x): object_points[i * self.corners_x + j] = (j * self.spacing_x, i * self.spacing_y) return (corners, object_points) else: rospy.logdebug("Didn't find checkerboard") return (None, None)
def detect_chessboard(image, corners_x, corners_y, spacing_x, spacing_y): #Here, we'll actually call the openCV detector found, corners = cv.FindChessboardCorners(image, (corners_x, corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH) if found: board_corners = get_board_corners(corners, corners_x, corners_y) #find the perimeter of the checkerboard perimeter = 0.0 for i in range(len(board_corners)): next = (i + 1) % 4 xdiff = board_corners[i][0] - board_corners[next][0] ydiff = board_corners[i][1] - board_corners[next][1] perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff) #estimate the square size in pixels square_size = perimeter / ((corners_x - 1 + corners_y - 1) * 2) radius = int(square_size * 0.5 + 0.5) corners = cv.FindCornerSubPix( image, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.1)) #uncomment to debug chessboard detection print 'Chessboard found' #cv.DrawChessboardCorners(image_scaled, (corners_x, corners_y), corners, 1) #cv.NamedWindow("image_scaled") #cv.ShowImage("image_scaled", image_scaled) #cv.WaitKey(600) object_points = None #we'll also generate the object points if the user has specified spacing if spacing_x != None and spacing_y != None: object_points = cv.CreateMat(1, corners_x * corners_y, cv.CV_32FC3) for y in range(corners_y): for x in range(corners_x): object_points[0, y * corners_x + x] = (x * spacing_x, y * spacing_y, 0.0) #not sure why opencv functions return non opencv compatible datatypes... but they do so we'll convert corners_cv = cv.CreateMat(1, corners_x * corners_y, cv.CV_32FC2) for i in range(corners_x * corners_y): corners_cv[0, i] = (corners[i][0], corners[i][1]) return (corners_cv, object_points) else: #cv.NamedWindow("image_scaled") #cv.ShowImage("image_scaled", image_scaled) #cv.WaitKey(600) rospy.logwarn("Didn't find checkerboard") return (None, None)
def scale_up(self, m_d): self.p = m_d.scale_up(self.p) crit = (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1) if not self.is_predicted: self.p = cv.FindCornerSubPix(m_d.gray_img, [self.p], (5, 5), (0, 0), crit)[0] self.prev = m_d.scale_up(self.prev) self.next = m_d.scale_up(self.next) self.measure() self.abs_v = m_d.scale_up(self.abs_v) self.vps = m_d.scale_up(self.vps)
def set_new_position(self, points_or_corners, offset=True, scale=1): ''' Sets new position for this marker using points (in order) @param points_or_corners: list of points or corners @param offset: if true, image ROI is checked and points are shifted ''' if len(points_or_corners) > 0 and type(points_or_corners[0]) == tuple: self.predicted = -1 points = points_or_corners img = self.m_d.img (x, y, _, _) = rect = cv.GetImageROI(img) if offset and (x, y) <> (0, 0): points = map(lambda z: add((x, y), z), points) cv.ResetImageROI(img) crit = (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1) if (scale > 1): points = cv.FindCornerSubPix(self.m_d.gray_img, points, (scale * 2 + 4, scale * 2 + 4), (-1, -1), crit) else: points = cv.FindCornerSubPix(self.m_d.gray_img, points, (3, 3), (-1, -1), crit) ncorners = Corner.get_corners(points, self.m_d.time) if len(self.corners) <> 0: for i, cor in enumerate(ncorners): cor.compute_change(self.corners[i]) cv.SetImageROI(img, rect) else: ncorners = points_or_corners self.predicted += len(filter(lambda x: x.is_predicted, ncorners)) for i, c in enumerate(ncorners): c.black_inside = self.black_inside # if len(self.corners)==4: # if dist_points(c.p, self.corners[i].p)<4: # c.p=self.corners[i].p self.corners = ncorners self.area = abs(cv.ContourArea(self.points)) self.last_seen = self.m_d.time self.model_view = None
def _get_corners(img, board, refine=True): """ Get corners for a particular chessboard for an image """ w, h = cv.GetSize(img) if img.channels == 3: mono = cv.CreateMat(h, w, cv.CV_8UC1) cv.CvtColor(img, mono, cv.CV_BGR2GRAY) else: mono = img (ok, corners) = cv.FindChessboardCorners( mono, (board.n_cols, board.n_rows), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE | cv2.CALIB_CB_FAST_CHECK) # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras BORDER = 8 if not all([(BORDER < x < (w - BORDER)) and (BORDER < y < (h - BORDER)) for (x, y) in corners]): ok = False if refine and ok: # Use a radius of half the minimum distance between corners. This should be large enough to snap to the # correct corner, but not so large as to include a wrong corner in the search window. min_distance = float("inf") for row in range(board.n_rows): for col in range(board.n_cols - 1): index = row * board.n_rows + col min_distance = min(min_distance, _pdist(corners[index], corners[index + 1])) for row in range(board.n_rows - 1): for col in range(board.n_cols): index = row * board.n_rows + col min_distance = min( min_distance, _pdist(corners[index], corners[index + board.n_cols])) radius = int(math.ceil(min_distance * 0.5)) corners = cv.FindCornerSubPix( mono, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) return (ok, corners)
def GoodFeaturesToTrack(image, mask): list_gftt = list() weights = list() existence = list() initpoint = 0 eig_image = cv.CreateMat(image.height, image.width, cv.CV_32FC1) temp_image = cv.CreateMat(image.height, image.width, cv.CV_32FC1) gfttar = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 25, 0.01, 5.0, mask, 3, 0, 0.04) gfttar = cv.FindCornerSubPix( image, gfttar, (10, 10), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) for i in range(0, len(gfttar)): weights.append(1) existence.append(1) if len(gfttar) == 0: return None, None, None return gfttar, weights, existence
def detect(self, msg): """ Downsample the input image to approximately VGA resolution and detect the calibration target corners in the full-size image. Combines these apparently orthogonal duties as an optimization. Checkerboard detection is too expensive on large images, so it's better to do detection on the smaller display image and scale the corners back up to the correct size. Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)). """ mono = self.mk_gray(msg) # Scale the input image down to ~VGA size (width, height) = cv.GetSize(mono) #print "width: " + str(width) + " height: " + str(height) scale = math.sqrt( (width*height) / (640.*480.) ) if scale > 1.0: scrib = cv.CreateMat(int(height / scale), int(width / scale), cv.GetElemType(mono)) cv.Resize(mono, scrib) else: scrib = cv.CloneMat(mono) # Due to rounding, actual horizontal/vertical scaling may differ slightly x_scale = float(width) / scrib.cols y_scale = float(height) / scrib.rows (ok, downsampled_corners) = self.get_corners(scrib, refine = True) # Scale corners back to full size image if scale > 1.0: # Refine up-scaled corners in the original full-res image # TODO Does this really make a difference in practice? corners_unrefined = [(c[0]*x_scale, c[1]*y_scale) for c in downsampled_corners] radius = int(math.ceil(scale)) corners = cv.FindCornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 )) else: corners = downsampled_corners return (ok, corners, mono, (x_scale, y_scale))
def get_corners(self, mono, refine=True): """ Get corners for a particular chessboard for an image """ (ok, corners) = cv.FindChessboardCorners(mono, (self.n_cols, self.n_rows), self.flags) if refine and ok: # Use a radius of half the minimum distance between corners. This should be large enough to snap to the # correct corner, but not so large as to include a wrong corner in the search window. min_distance = float("inf") for row in range(self.n_rows): for col in range(self.n_cols - 1): index = row*self.n_cols + col min_distance = min(min_distance, _pdist(corners[index], corners[index + 1])) for row in range(self.n_rows - 1): for col in range(self.n_cols): index = row*self.n_cols + col min_distance = min(min_distance, _pdist(corners[index], corners[index + self.n_cols])) radius = int(math.ceil(min_distance * 0.5)) corners = cv.FindCornerSubPix(mono, corners, (radius,radius), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 )) return (ok, corners)
def _improve_corner(self, new_corners, index): cor = new_corners[index] if cor is None: return None L = len(new_corners) prev_index = (index + L - 1) % L next_index = (index + 1) % L if new_corners[prev_index] is None: side_length = length(vector(self.corners[index].p, self.corners[prev_index].p)) prev = self.find_better_point(cor.p, cor.vp, side_length) else: prev = new_corners[prev_index].p if new_corners[next_index] is None: side_length = length(vector(self.corners[index].p, self.corners[next_index].p)) next = self.find_better_point(cor.p, cor.vn, side_length) else: next = new_corners[next_index].p if prev is None or next is None: return None else: scale_f = self.m_d.scale_factor if scale_f <>1: cor.prev = self.find_corner_in_full_scale(prev) cor.next = self.find_corner_in_full_scale(next) cor.p = self.md.scale_up(cor.p,scale_f) else: cor.prev= prev cor.next = next crit = (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1) cor.p = cv.FindCornerSubPix(self.m_d.gray_imgs[0], [cor.p], (scale_f+4, scale_f+4), (0, 0), crit)[0] cor.measure() cor.compute_change(self.corners[index]) return cor
def _decode_rect(self,rect): img= self.m_d.draw_img cv.ResetImageROI(img) nrect=cv.FindCornerSubPix(self.m_d.gray_img, rect, (2,2), (-1,-1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01)) for i,p in enumerate(nrect): rect[i]=p lines=[] r=rect def add_line(a,b,c,d,factor): p1=v.add(r[a],v.vector(r[a],r[b]),factor) p2=v.add(r[c],v.vector(r[c],r[d]),factor) lines.append(((p1,p2),(a,c))) add_line(0,0,2,2,0) add_line(1,1,3,3,0) add_line(0,1,3,2,0.25) add_line(1,0,2,3,0.25) add_line(0,3,1,2,0.25) add_line(3,0,2,1,0.25) positions,values=[],[] for (p1,p2),_ in lines: pt,rpt=self.get_line(p1, p2, self.m_d.gray_img) values.append(pt) positions.append(rpt) for pt in values: if len(pt)<32*1.5: return self.FAILED,"to short %d"%len(pt) black,white,real_p=self.get_border_points(rect) self.threshold_lines(values+[black,white]) self.draw_lines(img, rect,values +[black,white], positions+[real_p]) # db.show(img, "draw_lines", 0, 0, 0) if sum(black)>BORDER_ACCURACY*len(black): return self.FAILED,"no black border" if sum(white)<(1-BORDER_ACCURACY)*len(white): return self.FAILED,"no white border" res=[0]*4 for line,(_,(a,b)) in zip(values,lines): comp=self.compress_line(line) res[a]+=1 if self.check_begining(comp, len(line)) else 0 comp.reverse() res[b]+=1 if self.check_begining(comp, len(line)) else 0 # print res min_s=min(res) i=0; count=0 while i<7: count=count+1 if res[i%4]>min_s else 0 if count==3: break; i+=1 if i==7: rotation=self.FAILED else: rotation=(i+3)%4 ass_rot=self.get_assumed_rotation(rect) if rotation==self.FAILED: # print "Assumed rotation:",rotation return ass_rot,self.ident if ass_rot==self.FAILED: return rotation,self.ident if ass_rot!=rotation: # print "Rotation conflict: assumed:%d, calculated: %d" self.rot_conflict+=1 if self.rot_conflict<=CONFLICT_THRESHOLD: # print "Assumed used" rotation=ass_rot else: self.rot_conflict=0 return rotation,self.ident
def calibrate(gridFiles, gridSize, gridBlockSize): cpts = [] imageSize = None for gf in gridFiles: image = cv.LoadImage(gf, False) success, corners = cv.FindChessboardCorners(image, gridSize) corners = cv.FindCornerSubPix( image, corners, (5, 5), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) gridN = gridSize[0] * gridSize[1] if len(corners) != gridN: logging.debug("File failed: %s" % gf) continue # fix corners so that the first point is top left # compare corner[0] to corner[-1] if corners[0][0] > corners[1][0]: # flip left/right logging.debug("Grid is horizontally flipped") flipped = [] for x in xrange(gridSize[0]): for y in xrange(gridSize[1]): cx = gridSize[0] - x - 1 cy = y flipped.append(corners[cx + cy * gridSize[0]]) corners = flipped if corners[0][1] > corners[-1][1]: # flip top/bottom logging.debug("Grid is vertically flipped") flipped = [] for x in xrange(gridSize[0]): for y in xrange(gridSize[1]): cx = x cy = gridSize[1] - y - 1 flipped.append(corners[cx + cy * gridSize[0]]) corners = flipped cpts.append(corners) imageSize = cv.GetSize(image) nGrids = len(cpts) logging.debug("Found %i grids" % nGrids) if nGrids < 7: logging.warning("Few grids found: %i" % nGrids) if nGrids < 5: raise ValueError("Too few grids: %i" % nGrids) camMatrix = cv.CreateMat(3, 3, cv.CV_64FC1) cv.SetZero(camMatrix) camMatrix[0, 0] = 1. camMatrix[1, 1] = 1. distCoeffs = cv.CreateMat(5, 1, cv.CV_64FC1) cv.SetZero(distCoeffs) gridN = gridSize[0] * gridSize[1] imPts = cv.CreateMat(nGrids * gridN, 2, cv.CV_64FC1) objPts = cv.CreateMat(nGrids * gridN, 3, cv.CV_64FC1) ptCounts = cv.CreateMat(nGrids, 1, cv.CV_32SC1) # organize self.calibrationImgPts (to imPts) and construct objPts and ptCounts for (i, c) in enumerate(cpts): for j in xrange(gridN): imPts[j + i * gridN, 0] = c[j][0] imPts[j + i * gridN, 1] = c[j][1] # TODO should thes be actual points? how do I know what they are? objPts[j + i * gridN, 0] = j % gridSize[0] * gridBlockSize objPts[j + i * gridN, 1] = j / gridSize[0] * gridBlockSize objPts[j + i * gridN, 2] = 0. ptCounts[i, 0] = len(c) cv.CalibrateCamera2(objPts, imPts, ptCounts, imageSize, camMatrix, distCoeffs, cv.CreateMat(nGrids, 3, cv.CV_64FC1), cv.CreateMat(nGrids, 3, cv.CV_64FC1), 0) cv.Save("camMatrix.xml", camMatrix) cv.Save("distCoeffs.xml", distCoeffs)
def calibrate(imagedir): nimages = 0 datapoints = [] im_dims = (0,0) for f in os.listdir(imagedir): if (f.find('pgm')<0): continue image = imagedir+'/'+f grey = cv.LoadImage(image,cv.CV_LOAD_IMAGE_GRAYSCALE) found,points=cv.FindChessboardCorners(grey,dims,cv.CV_CALIB_CB_ADAPTIVE_THRESH) points=cv.FindCornerSubPix(grey,points,(11,11),(-1,-1),(cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER,30,0.1)) if (found): print 'using ', image nimages += 1 datapoints.append(points) im_dims = (grey.width, grey.height) #Number of points in chessboard num_pts = dims[0] * dims[1] #image points ipts = cv.CreateMat(nimages * num_pts, 2, cv.CV_32FC1) #object points opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1) npts = cv.CreateMat(nimages, 1, cv.CV_32SC1) for i in range(0,nimages): k=i*num_pts squareSize = 1.0 # squareSize is 1.0 (i.e. units of checkerboard) for j in range(num_pts): cv.Set2D(ipts,k,0,datapoints[i][j][0]) cv.Set2D(ipts,k,1,datapoints[i][j][1]) cv.Set2D(opts,k,0,float(j%dims[0])*squareSize) cv.Set2D(opts,k,1,float(j/dims[0])*squareSize) cv.Set2D(opts,k,2,0.0) k=k+1 cv.Set2D(npts,i,0,num_pts) K = cv.CreateMat(3, 3, cv.CV_64FC1) D = cv.CreateMat(5, 1, cv.CV_64FC1) cv.SetZero(K) cv.SetZero(D) # focal lengths have 1/1 ratio K[0,0] = im_dims[0] K[1,1] = im_dims[0] K[0,2] = im_dims[0]/2 K[1,2] = im_dims[1]/2 K[2,2] = 1.0 rcv = cv.CreateMat(nimages, 3, cv.CV_64FC1) tcv = cv.CreateMat(nimages, 3, cv.CV_64FC1) #print 'object' #print array(opts) #print 'image' #print array(ipts) #print 'npts' #print array(npts) size=cv.GetSize(grey) flags = 0 #flags |= cv.CV_CALIB_FIX_ASPECT_RATIO #flags |= cv.CV_CALIB_USE_INTRINSIC_GUESS #flags |= cv.CV_CALIB_ZERO_TANGENT_DIST #flags |= cv.CV_CALIB_FIX_PRINCIPAL_POINT cv.CalibrateCamera2(opts, ipts, npts, size, K, D, rcv, tcv, flags) # storing results using CameraParams C = CameraParams(xresolution=im_dims[0], yresolution=im_dims[1]) print array(K) print array(D) C.setParams(K, D) C.save(imagedir+"/params.json")
def run(self): image = None MAX_COUNT = 500 win_size = (32, 32) line_draw = 2 frame = cv.QueryFrame(self.capture) image = cv.CreateImage(cv.GetSize(frame), 8, 3) image.origin = frame.origin grey = cv.CreateImage(cv.GetSize(frame), 8, 1) edges = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey2 = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_grey3 = cv.CreateImage(cv.GetSize(frame), 8, 1) pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1) points = [] prev_points = [] count = 0 criteria = (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03) while True: frame = cv.QueryFrame(self.capture) # cv.Rectangle( frame, self.last_rect[0], self.last_rect[1], cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) # cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 15, 0) cv.Copy(frame, image) cv.CvtColor(image, grey, cv.CV_BGR2GRAY) if count == 0: eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) quality = 0.01 min_distance = 10 points = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) points = cv.FindCornerSubPix(grey, points, win_size, (-1, -1), criteria) else: flags = 0 points, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, prev_points, win_size, 2, criteria, flags) diff_points = [] for i, j in enumerate(points): print j if not j == prev_points[i]: diff_points.append(j) print 'len %d' % len(diff_points) prev_points == points count = len(points) print count prev_grey = grey prev_pyramid = pyramid prev_points = points if line_draw: cv.Canny(grey, edges, 30, 150, 3) if line_draw == 1: cv.CvtColor(edges, image, cv.CV_GRAY2BGR) elif line_draw > 1: cv.Merge(edges, prev_grey2, prev_grey3, None, image) cv.Copy(prev_grey2, prev_grey3, None) cv.Copy(edges, prev_grey2, None) cv.ShowImage("Target", image) # Listen for ESC key c = cv.WaitKey(7) % 0x100 if c == 27: break
step = 0 frame = 0 image = cv.QueryFrame(cam) gray_image = cv.CreateImage(cv.GetSize(image), 8, 1) while (successes < num_boards): frame += 1 if (frame % num_framestep == 0): corners = cv.FindChessboardCorners( image, board_size, cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_FILTER_QUADS) corners = corners[1] cv.CvtColor(image, gray_image, cv.CV_BGR2GRAY) cv.FindCornerSubPix( gray_image, corners, (11, 11), (0, 0), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) if (len(corners) > 1): cv.DrawChessboardCorners(image, board_size, corners, 1) if (len(corners) == board_total): cv.ShowImage("Snapshot", image) step = successes * board_total i = step for j in range(board_total): cv.Set2D(image_points, i, 0, corners[j][0]) cv.Set2D(image_points, i, 1, corners[j][1]) cv.Set2D(object_points, i, 0, float(j) / num_horizontal) cv.Set2D(object_points, i, 1, float(j % num_horizontal)) cv.Set2D(object_points, i, 2, 0.0) i += 1 cv.Set1D(point_counts, successes, board_total)
def image_filter(self, cv_image, info, copy=None): image = cv_image #Only works on a grayscale image gray_image = cv.CreateImage(cv.GetSize(image), 8, 1) cv.CvtColor(image, gray_image, cv.CV_BGR2GRAY) print "Called with mode: %s" % self.mode if self.mode == "default" or self.mode == "save_h": print "Computing homography matrix from checkerboard" #Get the width and height of the board board_w = self.cols board_h = self.rows #Area of the board = "board_n" board_n = board_w * board_h board_sz = (board_w, board_h) #This needs to be fed with a "height", so it knows how high up the perspective transform should be. #I've found for the wide_stereo cameras, a value of -15 works well. For the prosilica, -40. Don't ask me why init_height = self.height #Uses openCV to find the checkerboard (found, corners) = cv.FindChessboardCorners( image, board_sz, (cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_FILTER_QUADS)) if (not found): print "Couldn't aquire checkerboard, only found 0 of %d corners\n" % board_n gr = CloneImage(image) cv.CvtColor(gray_image, gr, cv.CV_GRAY2BGR) return gr #We need subpixel accuracy, so we tell it where the corners are and it magically does the rest. I forget what (11,11) and (-1,-1) mean. cv.FindCornerSubPix( gray_image, corners, (11, 11), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) #Pull out the Image Points (3d location of the checkerboard corners, in the camera frame) #and the Object Points (2d location of the corners on the checkerboard object) objPts = point_array(4) imgPts = point_array(4) objPts[0] = (0, 0) objPts[1] = (board_w - 1, 0) objPts[2] = (0, board_h - 1) objPts[3] = (board_w - 1, board_h - 1) imgPts[0] = corners[0] imgPts[1] = corners[board_w - 1] imgPts[2] = corners[(board_h - 1) * board_w] imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1] #Use GetPerspectiveTransform to populate our Homography matrix H = cv.CreateMat(3, 3, cv.CV_32FC1) cv.GetPerspectiveTransform(objPts, imgPts, H) #Since we don't get any z information from this, we populate H[2,2] with our hard-coded height H[2, 2] = init_height if self.mode == "save_h": print "Saving Homography matrix to %s" % self.matrix_location cv.Save(self.matrix_location, H) else: print "Loading Homography matrix from %s" % self.matrix_location H = cv.Load(self.matrix_location) birds_image = CloneImage(image) #birds_image = cv.CreateImage((image.width*3,image.height*3),8,3) #Uses the homography matrix to warp the perspective. cv.WarpPerspective( image, birds_image, H, cv.CV_INTER_LINEAR + cv.CV_WARP_INVERSE_MAP + cv.CV_WARP_FILL_OUTLIERS) #Note: If you need to undo the transformation, you can simply invert H and call cv.WarpPerspective again. return birds_image
# create the wanted images eig = cv.CreateImage(cv.GetSize(grey), 32, 1) temp = cv.CreateImage(cv.GetSize(grey), 32, 1) # the default parameters quality = 0.01 min_distance = 10 # search the good points features = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations features = cv.FindCornerSubPix( grey, features, (win_size, win_size), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) elif features != []: # we have points, so display them # cv.ShowImage ('prev_grey', prev_grey) # cv.ShowImage ('grey', grey) # calculate the optical flow features, status, track_error = cv.CalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, features, (win_size, win_size), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), flags) # print "num features: ", sum(status) # print "status: ", status
def refine_features(img, corners, win=WIN, zero_zone=(-1, -1), criteria=REFINE_CRITERIA): return cv.FindCornerSubPix(img, corners, win, zero_zone, criteria)