Example #1
0
    def getFeatures(self, grey):
        """ 
        Returns a list of features generated by the OpenCV 
        GoodFeaturesToTrack() function in the gray scale image 'gray'.
        """
        #cv.ShowImage ('getFeatures() grey',grey)
        eig = cv.CreateImage(cv.GetSize(grey), 32, 1)
        temp = cv.CreateImage(cv.GetSize(grey), 32, 1)
        mask = cv.CreateImage(cv.GetSize(grey), 8, 1)

        # Create a mask image to hide the top 10% of the image (which contains text)
        (w, h) = cv.GetSize(grey)
        cv.Rectangle(mask, (0, 0), (w, h), cv.Scalar(255, 0, 0), -1)
        cv.Rectangle(mask, (0, 0), (w, int(0.1 * h)), cv.Scalar(0, 0, 0), -1)
        # cv.ShowImage ('mask',mask)
        # search for the good points
        feat = cv.GoodFeaturesToTrack(grey, eig, temp, self.MAX_COUNT,
                                      self.quality, self.min_distance, mask, 3,
                                      0, 0.04)
        print "found %d features (MAX_COUNT=%d)" % (len(feat), self.MAX_COUNT)
        # refine the corner locations
        feat = cv.FindCornerSubPix(
            grey, feat, (self.win_size, self.win_size), (-1, -1),
            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

        return (feat)
Example #2
0
def get_corners(mono, refine=False):
    (ok, corners) = cv.FindChessboardCorners(
        mono, (num_x_ints, num_y_ints),
        cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
    if refine and ok:
        corners = cv.FindCornerSubPix(
            mono, corners, (5, 5), (-1, -1),
            (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
    return (ok, corners)
def _get_corners(img, board, refine=True):
    """
    Get corners for a particular chessboard for an image
    """
    w, h = cv.GetSize(img)
    mono = cv.CreateMat(h, w, cv.CV_8UC1)
    cv.CvtColor(img, mono, cv.CV_BGR2GRAY)
    (ok, corners) = cv.FindChessboardCorners(
        mono, (board.n_cols, board.n_rows), cv.CV_CALIB_CB_ADAPTIVE_THRESH
        | cv.CV_CALIB_CB_NORMALIZE_IMAGE | cv2.CALIB_CB_FAST_CHECK)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < x < (w - BORDER)) and (BORDER < y < (h - BORDER))
                for (x, y) in corners]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row * board.n_rows + col
                min_distance = min(min_distance,
                                   _pdist(corners[index], corners[index + 1]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row * board.n_rows + col
                min_distance = min(
                    min_distance,
                    _pdist(corners[index], corners[index + board.n_cols]))
        radius = int(math.ceil(min_distance * 0.5))
        corners = cv.FindCornerSubPix(
            mono, corners, (radius, radius), (-1, -1),
            (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))

    return (ok, corners)
Example #4
0
    # create the wanted images
    eig = cv.CreateImage(cv.GetSize(grey), 32, 1)
    temp = cv.CreateImage(cv.GetSize(grey), 32, 1)

    # the default parameters
    quality = 0.01
    min_distance = 10

    # search the good points
    features = cv.GoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality,
                                      min_distance, None, 3, 0, 0.04)

    # refine the corner locations
    features = cv.FindCornerSubPix(
        grey, features, (win_size, win_size), (-1, -1),
        (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

    # calculate the optical flow
    features, status, track_error = cv.CalcOpticalFlowPyrLK(
        prev_grey, grey, prev_pyramid, pyramid, features, (win_size, win_size),
        3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

    # set back the points we keep
    features = [p for (st, p) in zip(status, features) if not st]

    # draw the points as green circles
    for the_point in features:
        cv.Circle(image, (int(the_point[0]), int(the_point[1])), 3,
                  (0, 255, 0, 0), -1, 8, 0)
    def downsample_and_detect(self, rgb):
        """
        Downsample the input image to approximately VGA resolution and detect the
        calibration target corners in the full-size image.

        Combines these apparently orthogonal duties as an optimization. Checkerboard
        detection is too expensive on large images, so it's better to do detection on
        the smaller display image and scale the corners back up to the correct size.

        Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
        """
        # Scale the input image down to ~VGA size
        (width, height) = cv.GetSize(rgb)
        scale = math.sqrt((width * height) / (640. * 480.))
        if scale > 1.0:
            scrib = cv.CreateMat(int(height / scale), int(width / scale),
                                 cv.GetElemType(rgb))
            cv.Resize(rgb, scrib)
        else:
            scrib = cv.CloneMat(rgb)
        # Due to rounding, actual horizontal/vertical scaling may differ slightly
        x_scale = float(width) / scrib.cols
        y_scale = float(height) / scrib.rows

        if self.pattern == Patterns.Chessboard:
            # Detect checkerboard
            (ok, downsampled_corners, board) = self.get_corners(scrib,
                                                                refine=True)

            # Scale corners back to full size image
            corners = None
            if ok:
                if scale > 1.0:
                    # Refine up-scaled corners in the original full-res image
                    # TODO Does this really make a difference in practice?
                    corners_unrefined = [(c[0] * x_scale, c[1] * y_scale)
                                         for c in downsampled_corners]
                    # TODO It's silly that this conversion is needed, this function should just work
                    #      on the one-channel mono image
                    mono = cv.CreateMat(rgb.rows, rgb.cols, cv.CV_8UC1)
                    cv.CvtColor(rgb, mono, cv.CV_BGR2GRAY)
                    radius = int(math.ceil(scale))
                    corners = cv.FindCornerSubPix(
                        mono, corners_unrefined, (radius, radius), (-1, -1),
                        (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
                else:
                    corners = downsampled_corners
        else:
            # Circle grid detection is fast even on large images
            (ok, corners, board) = self.get_corners(rgb)
            # Scale corners to downsampled image for display
            downsampled_corners = None
            if ok:
                #                print corners
                if scale > 1.0:
                    downsampled_corners = [(c[0] / x_scale, c[1] / y_scale)
                                           for c in corners]
                else:
                    downsampled_corners = corners

        return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
Example #6
0
# cornersA =cvPointTo32f(MAX_CORNERS)

cornersA = cv.GoodFeaturesToTrack(
    imgA,  # image
    eig_image,  # Temporary floating-point 32-bit image
    tmp_image,  # Another temporary image
    #       cornersA,#number of coners to detect
    corner_count,  # number of coners to detect
    0.01,  # quality level
    5.0,  # minDistace
    useHarris=0,
);
cornerA = cv.FindCornerSubPix(
    imgA,
    cornersA,
    #   corner_count,
    (win_size, win_size),
    (-1, -1),
    (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)
);
# Call the Lucas Kanade algorithm
#
# features_found = [ MAX_CORNERS ];
# feature_errors = [ MAX_CORNERS ];
pyr_sz = (imgA.width + 8, imgB.height / 3);
pyrA = cv.CreateImage(pyr_sz, cv.IPL_DEPTH_32F, 1);
pyrB = cv.CreateImage(pyr_sz, cv.IPL_DEPTH_32F, 1);
cornersB = [];
cornersB, features_found, feature_errors = cv.CalcOpticalFlowPyrLK(
    imgA,
    imgB,
    pyrA,
Example #7
0
            cv.SetZero (image)

        if need_to_init:
            # we want to search all the good points
            # create the wanted images
            
            # search the good points
            points [1] = cv.GoodFeaturesToTrack (
                grey, eig, temp,
                MAX_COUNT,
                quality, min_distance, None, 3, 0, 0.04)
            
            # refine the corner locations
            cv.FindCornerSubPix (
                grey,
                points [1],
                (win_size, win_size) (-1, -1),
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
                                   20, 0.03))
                                               
        elif len (points [0]) > 0:
            # we have points, so display them

            # calculate the optical flow
            [points [1], status], something = cv.CalcOpticalFlowPyrLK (
                prev_grey, grey, prev_pyramid, pyramid,
                points [0], len (points [0]),
                (win_size, win_size), 3,
                len (points [0]),
                None,
                (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,
                                   20, 0.03),flags)