コード例 #1
0
    def findMarkers(self, gray, objpoints, imgpoints):
            # objp = np.zeros((self.marker_size[0]*self.marker_size[1],3), np.float32)
            # objp[:,:2] = np.mgrid[0:self.marker_size[0],0:self.marker_size[1]].T.reshape(-1,2)
            objp = np.zeros((np.prod(self.marker_size), 3), np.float32)
            objp[:, :2] = np.indices(self.marker_size).T.reshape(-1, 2)*self.marker_scale  # make a grid of points

            # Find the chess board corners or circle centers
            if self.marker_type is Markers.checkerboard:
                flags = cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_FAST_CHECK | cv2.CALIB_CB_NORMALIZE_IMAGE
                ret, corners = cv2.findChessboardCorners(gray, self.marker_size, flags=flags)
            elif self.marker_type is Markers.circle:
                flags=0
                ret, corners = cv2.findCirclesGrid(gray, self.marker_size, flags=flags)
            elif self.marker_type is Markers.acircle:
                flags=cv2.CALIB_CB_ASYMMETRIC_GRID
                ret, corners = cv2.findCirclesGrid(gray, self.marker_size, flags=flags)
            # elif self.marker_type is Markers.charuco:
            #     corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.dictionary)
            #     # ret = True if len(ids) > 0 else False
            #     if len(ids) > 0:
            #         ret, corners, ids = aruco.interpolateCornersCharuco(corners, ids, gray, self.board)
            else:
                raise Exception("invalid marker type: {}".format(self.marker_type))

            if ret:
                # rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11),(-1, -1), self.criteria)
                imgpoints.append(corners.reshape(-1, 2))
                objpoints.append(objp)
            else:
                corners = [] # didn't find any

            return ret, objpoints, imgpoints, corners
コード例 #2
0
def _get_circles(img, board, pattern):
    """
    Get circle centers for a symmetric or asymmetric grid
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img

    flag = cv2.CALIB_CB_SYMMETRIC_GRID
    if pattern == Patterns.ACircles:
        flag = cv2.CALIB_CB_ASYMMETRIC_GRID
    mono_arr = numpy.array(mono)
    (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows),
                                        flags=flag)

    # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
    # again with dimensions swapped - not so efficient.
    # TODO Better to add as second board? Corner ordering will change.
    if not ok and pattern == Patterns.Circles:
        (ok, corners) = cv2.findCirclesGrid(mono_arr,
                                            (board.n_rows, board.n_cols),
                                            flags=flag)

    return (ok, corners)
コード例 #3
0
def _get_circles(img, board, pattern):
    """
    Get circle centers for a symmetric or asymmetric grid
    """
    w, h = cv.GetSize(img)
    mono = cv.CreateMat(h, w, cv.CV_8UC1)
    cv.CvtColor(img, mono, cv.CV_BGR2GRAY)

    flag = cv2.CALIB_CB_SYMMETRIC_GRID
    if pattern == Patterns.ACircles:
        flag = cv2.CALIB_CB_ASYMMETRIC_GRID
    mono_arr = numpy.array(mono)
    (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag)

    # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
    # again with dimensions swapped - not so efficient.
    # TODO Better to add as second board? Corner ordering will change.
    if not ok and pattern == Patterns.Circles:
        (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag)

    # For some reason findCirclesGrid returns centers as [[x y]] instead of (x y) like FindChessboardCorners
    if corners is not None:
        corners = [(x, y) for [[x, y]] in corners]

    return (ok, corners)
コード例 #4
0
def testImage(imageData):
    retval = False
    print "\tlooking for black on white..."
    if testForSideMarker(imageData, blackOnWhiteBlobDetector):
        print "\tfound black on white!"
        retval = True
    else:
        print "\tdid not find black on white"
    print "\tlooking for white on black..."
    if testForSideMarker(imageData, whiteOnBlackBlobDetector):
        print "\tfound white on black!"
        retval = True
    else:
        print "\tdid not find white on black"

    print "\tlooking for black on white circles grid..."
    found, centers = cv2.findCirclesGrid(
        imageData, (3, 9), flags=cv2.CALIB_CB_ASYMMETRIC_GRID, blobDetector=blackOnWhiteBlobDetector
    )
    if found:
        print "\tfound black on white circles grid!"
        retval = True

    print "\tlooking for white on black circles grid..."
    found, centers = cv2.findCirclesGrid(
        imageData, (3, 9), flags=cv2.CALIB_CB_ASYMMETRIC_GRID, blobDetector=whiteOnBlackBlobDetector
    )
    if found:
        print "\tfound white on black circles grid!"
        retval = True

    return retval
コード例 #5
0
    def findMarkers(self, gray):
        # Find the chess board corners or circle centers
        if self.marker_type is Markers.checkerboard:
            flags = 0
            flags |= cv2.CALIB_CB_ADAPTIVE_THRESH
            flags |= cv2.CALIB_CB_FAST_CHECK
            flags |= cv2.CALIB_CB_NORMALIZE_IMAGE
            ret, corners = cv2.findChessboardCorners(gray,
                                                     self.marker_size,
                                                     flags=flags)
        elif self.marker_type is Markers.circle:
            flags = 0
            ret, corners = cv2.findCirclesGrid(gray,
                                               self.marker_size,
                                               flags=flags)
        elif self.marker_type is Markers.acircle:
            flags = cv2.CALIB_CB_ASYMMETRIC_GRID
            ret, corners = cv2.findCirclesGrid(gray,
                                               self.marker_size,
                                               flags=flags)
        else:
            raise Exception("invalid marker type: {}".format(self.marker_type))

        if not ret:
            corners = []  # didn't find any

        return ret, corners
コード例 #6
0
def _get_circles(img, board, pattern):
    """
    Get circle centers for a symmetric or asymmetric grid
    """
    w, h = cv.GetSize(img)
    if img.channels == 3:
        mono = cv.CreateMat(h, w, cv.CV_8UC1)
        cv.CvtColor(img, mono, cv.CV_BGR2GRAY)
    else:
        mono = img

    flag = cv2.CALIB_CB_SYMMETRIC_GRID
    if pattern == Patterns.ACircles:
        flag = cv2.CALIB_CB_ASYMMETRIC_GRID
    mono_arr = numpy.array(mono)
    (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows),
                                        flags=flag)

    # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
    # again with dimensions swapped - not so efficient.
    # TODO Better to add as second board? Corner ordering will change.
    if not ok and pattern == Patterns.Circles:
        (ok, corners) = cv2.findCirclesGrid(mono_arr,
                                            (board.n_rows, board.n_cols),
                                            flags=flag)

    # For some reason findCirclesGrid returns centers as [[x y]] instead of (x y) like FindChessboardCorners
    if corners is not None:
        corners = [(x, y) for [[x, y]] in corners]

    return (ok, corners)
コード例 #7
0
	def processImage(self, image_msg):
		if self.lock.testandset():
			vehicle_detected_msg_out = BoolStamped()
			try:
				image_cv=self.bridge.imgmsg_to_cv2(image_msg,"bgr8")
			except CvBridgeError as e:
				print e
			start = rospy.Time.now()
			params = cv2.SimpleBlobDetector_Params()
			params.minArea = self.blobdetector_min_area
			params.minDistBetweenBlobs = self.blobdetector_min_dist_between_blobs
			simple_blob_detector = cv2.SimpleBlobDetector(params)
			(detection, corners) = cv2.findCirclesGrid(image_cv,
					self.circlepattern_dims, flags=cv2.CALIB_CB_SYMMETRIC_GRID,
					blobDetector=simple_blob_detector)
			elapsed_time = (rospy.Time.now() - start).to_sec()
			self.pub_time_elapsed.publish(elapsed_time)
			vehicle_detected_msg_out.data = detection
			self.pub_detection.publish(vehicle_detected_msg_out)
			if self.publish_circles:
				cv2.drawChessboardCorners(image_cv, 
						self.circlepattern_dims, corners, detection)
				image_msg_out = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
				self.pub_circlepattern_image.publish(image_msg_out)
			self.lock.unlock()
コード例 #8
0
def filter_images(input_pattern, pattern, width, height):
# termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Arrays to store object points and image points from all the images.
    imgpoints = [] # 2d points in image plane.
    images = glob(input_pattern)
    to_del = []
    for i, fname in enumerate(images):
        #print fname
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        # Find the chess board corners
        if pattern=='chessboard':
            ret, interest_points = cv2.findChessboardCorners(gray, (width, height), None)
            if ret == True:
                cv2.cornerSubPix(gray, interest_points, (11, 11), (-1, -1), criteria)
        else:
            flag = cv2.CALIB_CB_SYMMETRIC_GRID if pattern=='circles' else \
                   cv2.CALIB_CB_ASYMMETRIC_GRID
            ret, interest_points = cv2.findCirclesGrid(gray, (width, height), None, flag)
        # If found, add object points, image points (after refining them)
        if ret == True:
            cv2.cornerSubPix(gray, interest_points, (11, 11), (-1, -1), criteria)
            imgpoints.append(interest_points)
        else:
            to_del.append(i)
    while len(to_del) > 0:
        last_item = to_del[-1]
        del images[last_item]
        del to_del[-1]
    return images, imgpoints
コード例 #9
0
    def _find_pattern(self):
        """
        Tries to find pattern from 'self.frame'
        If found, 'self._pattern_found' will be set to "True" and 'self._pattern_points' will be filled with
        coordinates.
        If pattern is not found in the frame 'self._pattern_found' will be set to "False"
        When the recognition has ended, Event 'self._new_data' will be cleared
        """
        gray = cv2.cvtColor(self._frame, cv2.COLOR_BGR2GRAY)
        if self.pattern_type == PatternType.Checkerboard:
            self._pattern_found, self._pattern_points = cv2.findChessboardCorners(
                image=gray,
                patternSize=self.pattern_dims,
                flags=0,  # cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE
            )
            if self._pattern_found:
                # Improve found points' accuracy
                criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
                self._pattern_points = cv2.cornerSubPix(gray, self._pattern_points, (11, 11), (-1, -1), criteria)

        elif self.pattern_type == PatternType.AsymmetricCircles:
            self._pattern_found, self._pattern_points = cv2.findCirclesGrid(
                image=gray,
                patternSize=self.pattern_dims,
                flags=cv2.CALIB_CB_ASYMMETRIC_GRID,  # + cv2.CALIB_CB_CLUSTERING
            )

        self._new_data.clear()
コード例 #10
0
def live_calibrate(camera, pattern_shape, n_matches_needed):
    """ Find calibration parameters as the user moves a checkerboard in front of the camera """
    print("Looking for %s checkerboard" % (pattern_shape,))
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    example_3d = np.zeros((pattern_shape[0] * pattern_shape[1], 3), np.float32)
    example_3d[:, :2] = np.mgrid[0 : pattern_shape[1], 0 : pattern_shape[0]].T.reshape(-1, 2)
    points_3d = []
    points_2d = []
    while len(points_3d) < n_matches_needed:
        ret, frame = camera.cap.read()
        assert ret
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findCirclesGrid(
            gray_frame, pattern_shape, flags=cv2.CALIB_CB_ASYMMETRIC_GRID
        )
        cv2.imshow("camera", frame)
        if ret:
            points_3d.append(example_3d.copy())
            points_2d.append(corners)
            print("Found calibration %i of %i" % (len(points_3d), n_matches_needed))
            drawn_frame = cv2.drawChessboardCorners(frame, pattern_shape, corners, ret)
            cv2.imshow("calib", drawn_frame)
        cv2.waitKey(10)
    ret, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera(
        points_3d, points_2d, gray_frame.shape[::-1], None, None
    )
    assert ret
    return camera_matrix, distortion_coefficients
コード例 #11
0
ファイル: utils.py プロジェクト: RyusizzSNU/SomaCube
def find_imagepoints_from_images(images,
                                 w,
                                 h,
                                 square=True,
                                 show_imgs=0,
                                 rotate_angle=0):
    # Arrays to store object points and image points from all the images.
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    read_imgs = []
    points_dict = {}

    for i in range(len(images)):
        img = images[i]
        img = rotate_image(img, rotate_angle)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        if square:
            ret, corners = cv2.findChessboardCorners(img, (w, h), None)
        else:
            ret, corners = cv2.findCirclesGrid(img, (w, h), None)

        # If found, add object points, image points (after refining them)
        if ret == True:
            cv2.cornerSubPix(img, corners, (11, 11), (-1, -1), criteria)
            points_dict[i] = corners
            read_imgs.append(img)
            # Draw and display the corners
            img = cv2.drawChessboardCorners(img, (w, h), corners, ret)

            if show_imgs != 0:
                cv2.imshow('img', img)
                cv2.waitKey(0)
    print("succeeded to find points in %d imgs", len(read_imgs))
    return read_imgs, points_dict
コード例 #12
0
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        if self.collect_new:
            img = frame.img
            status, grid_points = cv2.findCirclesGrid(img, (4,11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
            if status:
                self.img_points.append(grid_points)
                self.obj_points.append(self.obj_grid)
                self.collect_new = False
                self.count -=1
                self.button.status_text = "{:d} to go".format(self.count)


        if self.count<=0 and not self.calculated:
            self.calculate()
            self.button.status_text = ''

        if self.window_should_close:
            self.close_window()

        if self.show_undistortion:

            adjusted_k,roi = cv2.getOptimalNewCameraMatrix(cameraMatrix= self.camera_intrinsics[0], distCoeffs=self.camera_intrinsics[1], imageSize=self.camera_intrinsics[2], alpha=0.5,newImgSize=self.camera_intrinsics[2],centerPrincipalPoint=1)
            self.undist_img = cv2.undistort(frame.img, self.camera_intrinsics[0], self.camera_intrinsics[1],newCameraMatrix=adjusted_k)
コード例 #13
0
def main():
    args = get_args()
    w = args.width
    h = args.height
    margin = args.margin_size
    block_size = args.block_size
    radius = args.radius
    chessboard = np.ones((block_size * h + margin * 2, block_size * w + margin * 2), dtype=np.uint8) * 255

    for y in range(h):
        for x in range(w):
            cx = int((x + 0.5) * block_size + margin)
            cy = int((y + 0.5) * block_size + margin)
            cv2.circle(chessboard, (cx, cy), radius, 0, thickness=-1)

    #cv2.imwrite("circleboard{}x{}.png".format(w, h), chessboard)

    ch = 1024
    cw = 768
    chessboard = cv2.resize(chessboard,(ch,cw))
    #print chessboard.shape[:2]

    ret2, circles = cv2.findCirclesGrid(chessboard, (10,7), flags = cv2.CALIB_CB_SYMMETRIC_GRID)

    if ret2 == True:
        objpoints.append(objp)
        imgpoints.append(circles)
        cv2.drawChessboardCorners(chessboard, (10, 7), circles, ret2)
        print circles[0][0]
        print circles[9][0]
        print circles[69][0]
        print circles[60][0]
コード例 #14
0
def find_points_from_images(cam, square=True):
    i = 0
    imgs = []
    points_dict = {}
    while (True):
        try:
            with open(args.data_dir + '/images/%s_%d.jpg' % (cam, i)) as f:
                pass
            imgs.append(
                cv2.imread(args.data_dir + '/images/%s_%d.jpg' % (cam, i)))
        except IOError:
            break
        i += 1
    for i in range(len(imgs)):
        img = imgs[i]
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if square:
            ret, corners = cv2.findChessboardCorners(img, (args.h, args.w),
                                                     None)
        else:
            ret, corners = cv2.findCirclesGrid(img, (args.h, args.w), None)

        if ret:
            corners = cv2.cornerSubPix(img, corners, (11, 11), (-1, -1),
                                       criteria)
            points_dict[i] = np.squeeze(corners)

            img = cv2.drawChessboardCorners(img, (args.h, args.w), corners,
                                            ret)
            if args.show_imgs != 0:
                cv2.imshow('img', img)
                cv2.waitKey(0)
    return imgs, points_dict
コード例 #15
0
def main():

    chessboard = cv2.imread('circleboard10x5.png', 1)
    #ch = 1920
    #cw = 1080
    #chessboard = cv2.resize(chessboard,(ch,cw))
    print chessboard.shape[:2]

    ret2, circles = cv2.findCirclesGrid(chessboard, (10, 5),
                                        flags=cv2.CALIB_CB_SYMMETRIC_GRID)

    if ret2 == True:
        objpoints.append(objp)
        imgpoints.append(circles)
        cv2.drawChessboardCorners(chessboard, (10, 5), circles, ret2)
        print circles[0][0]
        print circles[8][0]
        print circles[49][0]
        print circles[41][0]

    cv2.imshow('img', chessboard)
    #cv2.imshow('inv_img',invgray)
    cv2.waitKey(0)

    cv2.destroyAllWindows()
コード例 #16
0
def image_callback(data):
    frame = bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # For annotation
    for i in range(len(img_points)):
        if (s == 'A' or s == 'a'):
            cv2.drawChessboardCorners(frame, (9, 6), img_points[i],
                                      ret_list[i])
        elif (s == 'B' or s == 'b'):
            cv2.drawChessboardCorners(frame, (7, 7), img_points[i],
                                      ret_list[i])

    cv2.imshow('capture', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        if (s == 'A' or s == 'a'):
            #Intereestingly, the funciton returns a false when it is applied twice to the same
            #image. Why?
            ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
            print ret
        elif (s == 'B' or s == 'b'):
            ret, corners = cv2.findCirclesGrid(gray, (7, 7), None)
            print ret
        if (ret > 0):
            img_points.append(corners)
            ret_list.append(ret)
            file_name = str(len(img_points)) + ".png"
            cv2.imwrite(file_name, gray)
            print "calibration image number: %d" % len(img_points)
コード例 #17
0
def addObjAndImgPoints(path):
    # Arrays to hold all the object and image points from all the calibration images
    objectPoints = [] # Circle center coordinates relative to the circle board
    imagePoints = [] # Circle center coordinates in absolute positioning (relative to whole image).
    imageShape = None
    
    # We need to add data points for evaluating where a circle is on the grid
    # The circle pattern I used has a zip-zagging grid that up and down instead of being in a straight grid.
    # The circle grid I used can be found here:
    # In order to make sure the data points can fit this geometry, we must remove non-compatible data.
    # Also, the circle grid has a shape of 4 x 11.

    # Create a 3D array for holding circle center coordinates from a circle grid relative to the lower-leftest circle.
    objp = np.zeros((77, 3), np.float32) # The zero matrix is used as a placeholder, yet keeps a shape of 4 x 11.

    # Have a placeholder array to hold the indices of incompatible data points which need to be removed
    arraysToRemove = []

    # Replace filler data points with prospective data points to objp,
    # where the z component is zero and the x and y components
    # range from 0 to 3 and 0 to 5 respectively, with an increment of 0.5.
    objp[:,:2] = np.mgrid[0:3:7j, 0:5:11j].T.reshape(-1,2)

    # NOTE: Incompatible data points occur where one of the two components is a whole number,
    # and the other component has a decimal value added (from a whole number) of 0.5.
    # EX: x = 0.5, y = 1 does not exist on the circle pattern, so the array containing these points
    # must be removed.

    # Find and remove incompatible data points
    for i, x in enumerate(objp):
        if (abs(x[1] - x[0]) == 0.5) or (abs(x[1] - x[0]) == 1.5) or (abs(x[1] - x[0]) == 2.5) or (abs(x[1] - x[0]) == 3.5) or (abs(x[1] - x[0]) == 4.5): # Finds the incompatible data points to remove
            arraysToRemove.append(i) # Adds the indices of incompatible data point arrays for later use

    # Since the array we worked with in the for loop is an ndarray (numpy array),
    # we must convert the arraysToRemove array into an ndarray
    # so that we can remove the incompatible data points from objp.
    npArraysToRemove = np.array(arraysToRemove)

    # Remove the arrays whose data points cannot exist with the circle grid image
    objp = np.delete(objp, npArraysToRemove, 0)

    # Load calibration images

    imagePaths = glob.glob("{0}/*.jpeg".format(path))

    # Find object and image points, and grab the shape of the image
    for file in imagePaths:
        img = cv2.imread(file)
        grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        imageShape = grayimg.shape[::-1]

        # Determine whether circles are found in the image and the coordinates of the centers
        circlesFound, centerCoords = cv2.findCirclesGrid(grayimg, (4, 11))

        # If circles are found, add object and image points
        if circlesFound:
            objectPoints.append(objp)
            imagePoints.append(centerCoords)

    return objectPoints, imagePoints, imageShape
コード例 #18
0
ファイル: calib.py プロジェクト: dottiey/Calib2DCamera
def findCirclesGrid(array,grid_shape1, grid_shape2):
    img=np.asarray(array,dtype=np.uint8)
    ret, centers = cv2.findCirclesGrid(img, (grid_shape1, grid_shape2), None)
    if ret==True:
        cv2.drawChessboardCorners(img, (grid_shape1, grid_shape2), centers, ret)
    show_img("ff",img)
    return ret
コード例 #19
0
    def update(self, frame, events):
        if self.collect_new:
            img = frame.img
            status, grid_points = cv2.findCirclesGrid(
                img, (4, 11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
            if status:
                self.img_points.append(grid_points)
                self.obj_points.append(self.obj_grid)
                self.collect_new = False
                self.count -= 1
                self.button.status_text = "{:d} to go".format(self.count)

        if self.count <= 0 and not self.calculated:
            self.calculate()
            self.button.status_text = ''

        if self.window_should_close:
            self.close_window()

        if self.show_undistortion:

            adjusted_k, roi = cv2.getOptimalNewCameraMatrix(
                cameraMatrix=self.camera_intrinsics[0],
                distCoeffs=self.camera_intrinsics[1],
                imageSize=self.camera_intrinsics[2],
                alpha=0.5,
                newImgSize=self.camera_intrinsics[2],
                centerPrincipalPoint=1)
            self.undist_img = cv2.undistort(frame.img,
                                            self.camera_intrinsics[0],
                                            self.camera_intrinsics[1],
                                            newCameraMatrix=adjusted_k)
コード例 #20
0
 def calibration(images, obj_size=(7, 7), obj_dis=30):
     """ 进行相机标定
     images: RGB格式的一些图片数组
     """
     objp = np.zeros((obj_size[0] * obj_size[1], 3), np.float32)
     objp[:, :2] = np.mgrid[0:obj_size[0], 0:obj_size[1]].T.reshape(-1, 2)
     objp = objp * obj_dis
     objpoints = []
     imgpoints = []
     for img in images:
         # img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
         img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
         ret, centers = cv2.findCirclesGrid(img, obj_size,
                                            cv2.CALIB_CB_SYMMETRIC_GRID)
         if ret:
             objpoints.append(objp)
             imgpoints.append(centers)
     ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
         objpoints, imgpoints, img.shape[:2][::-1], None, None)
     print(ret)
     print("mtx:\n", mtx)  # 内参数矩阵
     print("dist:\n",
           dist)  # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
     # print("rvecs:\n", rvecs[0])   # 旋转向量  # 外参数
     # print("tvecs:\n", tvecs[0])  # 平移向量  # 外参数
     mean_error = 0
     for i in range(len(objpoints)):
         imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                           mtx, dist)
         error = cv2.norm(imgpoints[i], imgpoints2,
                          cv2.NORM_L2) / len(imgpoints2)
         mean_error += error
     mean_error = mean_error / len(objpoints)
     print("total error: {:.4f} pixel".format(mean_error))
     return mtx, dist, mean_error
コード例 #21
0
def calibrationCapture(frame, config):
    # A pipeline to capture frames (asymetic circles) to be used for clibration

    # TODO: Format for use with /data directory
    output_dir = Path('calib_imgs')

    output_dir.mkdir(exist_ok=True)

    pattern_width = 8
    pattern_height = 27
    pattern_size = (pattern_width, pattern_height)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    ret, centers = cv2.findCirclesGrid(gray, pattern_size, None,
                                       cv2.CALIB_CB_ASYMMETRIC_GRID)

    if ret:
        cv2.imwrite(str(output_dir / 'frame-{}.png'.format(time.monotonic())),
                    frame)
        cv2.drawChessboardCorners(frame, pattern_size, centers, ret)

        logging.info("Frame captured")

    return (None, frame)
コード例 #22
0
    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        if self.collect_new:
            img = frame.img
            status, grid_points = cv2.findCirclesGrid(
                img, (4, 11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID
            )
            if status:
                self.img_points.append(grid_points)
                self.obj_points.append(self.obj_grid)
                self.collect_new = False
                self.count -= 1
                self.button.status_text = "{:d} to go".format(self.count)

        if self.count <= 0 and not self.calculated:
            self.calculate()
            self.button.status_text = ""

        if self.window_should_close:
            self.close_window()

        if self.show_undistortion:
            assert self.g_pool.capture.intrinsics
            # This function is not yet compatible with the fisheye camera model and would have to be manually implemented.
            # adjusted_k,roi = cv2.getOptimalNewCameraMatrix(cameraMatrix= np.array(self.camera_intrinsics[0]), distCoeffs=np.array(self.camera_intrinsics[1]), imageSize=self.camera_intrinsics[2], alpha=0.5,newImgSize=self.camera_intrinsics[2],centerPrincipalPoint=1)
            self.undist_img = self.g_pool.capture.intrinsics.undistort(frame.img)
コード例 #23
0
    def load_img(self, file_name, binary_flag, save_flag, calib_flag):
        # print ("data/" , file_name , ".jpg")
        self.img = cv2.imread("data/" + file_name + ".jpg", cv2.IMREAD_COLOR)
        img = np.copy(self.img)
        # img = np.float32(img)

        if binary_flag == 1:
            orgHeight, orgWidth = img.shape[:2]
            for i in range(orgHeight):
                for j in range(orgWidth):
                    b = img[i][j][0]
                    g = img[i][j][1]
                    r = self.img[i][j][2]
                    if b > self.b and g > self.g and r > self.r:
                        img[i][j][0] = 0
                        img[i][j][1] = 0
                        img[i][j][2] = 0

        if save_flag == 1:
            cv2.imwrite("data2/" + file_name + ".jpg", img)

        if calib_flag == 1:
            ret2, circles = cv2.findCirclesGrid(
                img, (10, 5), flags=cv2.CALIB_CB_SYMMETRIC_GRID)
            if ret2 == True:
                cv2.drawChessboardCorners(img, (10, 5), circles, ret2)

        show_img = cv2.resize(img, (480, 270))
        self.drawing(show_img)
コード例 #24
0
ファイル: camera_calibration.py プロジェクト: narahma2/xray
def detectPoints(img, gridSize, objp, useMask):
    # Mask out View 3 for Cameras 1 and 3 only
    if useMask:
        mask = (img == 255).astype(img.dtype)
        img = ~mask * (img > threshold_local(img, 21, method='mean'))
        img = img.astype(np.uint8)

    # Convert to gray scale with dark blobs
    gray = cv2.convertScaleAbs(img)
    gray = cv2.convertScaleAbs(255 - gray)

    # Get the blob
    blobDetector = makeBlob()

    # Detect blobs
    keypoints = blobDetector.detect(gray)

    # Draw detected blobs as red circles
    # This helps cv2.findCirclesGrid()
    flag = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
    im_with_keypoints = cv2.drawKeypoints(gray, keypoints, np.array([]),
                                          (255, 0, 0), flag)
    im_with_keypoints_gray = cv2.cvtColor(im_with_keypoints,
                                          cv2.COLOR_BGR2GRAY)

    # Keypoint array
    kypt = np.array([x.pt for x in keypoints])

    # Find the circle grid
    _, corners = cv2.findCirclesGrid(im_with_keypoints_gray,
                                     gridSize,
                                     kypt,
                                     flags=cv2.CALIB_CB_SYMMETRIC_GRID)

    return (corners, objp)
コード例 #25
0
 def processImage(self, image_msg):
     if self.lock.testandset():
         vehicle_detected_msg_out = BoolStamped()
         try:
             image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
         except CvBridgeError as e:
             print e
         start = rospy.Time.now()
         params = cv2.SimpleBlobDetector_Params()
         params.minArea = self.blobdetector_min_area
         params.minDistBetweenBlobs = self.blobdetector_min_dist_between_blobs
         simple_blob_detector = cv2.SimpleBlobDetector_create(params)
         (detection,
          corners) = cv2.findCirclesGrid(image_cv,
                                         self.circlepattern_dims,
                                         flags=cv2.CALIB_CB_SYMMETRIC_GRID,
                                         blobDetector=simple_blob_detector)
         elapsed_time = (rospy.Time.now() - start).to_sec()
         self.pub_time_elapsed.publish(elapsed_time)
         vehicle_detected_msg_out.data = detection
         self.pub_detection.publish(vehicle_detected_msg_out)
         if self.publish_circles:
             cv2.drawChessboardCorners(image_cv, self.circlepattern_dims,
                                       corners, detection)
             image_msg_out = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
             self.pub_circlepattern_image.publish(image_msg_out)
         self.lock.unlock()
コード例 #26
0
	def findMarkers(self,gray,objpoints,imgpoints):
		#objp = np.zeros((self.marker_size[0]*self.marker_size[1],3), np.float32)
		#objp[:,:2] = np.mgrid[0:self.marker_size[0],0:self.marker_size[1]].T.reshape(-1,2)
		objp = np.zeros( (np.prod(self.marker_size), 3), np.float32 )
		objp[:,:2] = np.indices(self.marker_size).T.reshape(-1, 2)
		
		# Find the chess board corners
		if self.marker_checkerboard == True:
			ret, corners = cv2.findChessboardCorners(gray, self.marker_size)
			if ret: print '[+] chess - found corners: ', corners.size/2
		else:
			ret, corners = cv2.findCirclesGrid(gray, self.marker_size,None,cv2.CALIB_CB_ASYMMETRIC_GRID)
			if ret: print '[+] circles - found corners: ', corners.size/2
		
		# If found, add object points, image points (after refining them)
		if ret == True: 
			term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
			cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
			imgpoints.append(corners.reshape(-1, 2))
			objpoints.append(objp)
			
			# Draw the corners
			self.draw(gray,corners)
		
		else:
			print '[-] Couldn\'t find markers'
			
		
		return ret,objpoints,imgpoints
コード例 #27
0
ファイル: camera_calibrate.py プロジェクト: dtbinh/soccer2
	def findMarkers(self, gray, objpoints, imgpoints):
		# objp = np.zeros((self.marker_size[0]*self.marker_size[1],3), np.float32)
		# objp[:,:2] = np.mgrid[0:self.marker_size[0],0:self.marker_size[1]].T.reshape(-1,2)
		objp = np.zeros((np.prod(self.marker_size), 3), np.float32)
		objp[:, :2] = np.indices(self.marker_size).T.reshape(-1, 2) # make a grid of points

		# Find the chess board corners or circle centers
		if self.marker_checkerboard is True:
			ret, corners = cv2.findChessboardCorners(gray, self.marker_size)
			if ret: print '[+] chess - found corners: ', corners.size / 2
		else:
			ret, corners = cv2.findCirclesGrid(gray, self.marker_size, flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
			# ret, corners = cv2.findCirclesGrid(gray, self.marker_size, flags=cv2.CALIB_CB_CLUSTERING)
			# print '[+] circles - found corners: ', corners.size / 2, 'ret:', ret
			# print 'corners:', corners
			if ret: print '[+] circles - found corners: ', corners.size / 2

		# If found, add object points, image points (after refining them)
		if ret is True:
			term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
			cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
			imgpoints.append(corners.reshape(-1, 2))
			objpoints.append(objp)
		else:
			print '[-] Couldn\'t find markers'

		# Draw the corners
		self.draw(gray, corners)

		return ret, objpoints, imgpoints
コード例 #28
0
    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        if self.collect_new:
            img = frame.img
            status, grid_points = cv2.findCirclesGrid(
                img, (4, 11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID
            )
            if status:
                self.img_points.append(grid_points)
                self.obj_points.append(self.obj_grid)
                self.collect_new = False
                self.count -= 1
                self.button.status_text = "{:d} to go".format(self.count)

        if self.count <= 0 and not self.calculated:
            self.calculate()
            self.button.status_text = ""

        if self.window_should_close:
            self.close_window()

        if self.show_undistortion:
            assert self.g_pool.capture.intrinsics
            # This function is not yet compatible with the fisheye camera model and would have to be manually implemented.
            # adjusted_k,roi = cv2.getOptimalNewCameraMatrix(cameraMatrix= np.array(self.camera_intrinsics[0]), distCoeffs=np.array(self.camera_intrinsics[1]), imageSize=self.camera_intrinsics[2], alpha=0.5,newImgSize=self.camera_intrinsics[2],centerPrincipalPoint=1)
            self.undist_img = self.g_pool.capture.intrinsics.undistort(frame.img)
コード例 #29
0
ファイル: calibration.py プロジェクト: viz3d/viz3d
def mark_calibration_points(image, image_marker=None, type="checkerboard"):
    """
    Draws the checkerboard corners on the image
    :param image: the image to process in grayscale
    :param image_marker: optional. Draws the markers on this image, can be the same as image
    :return: the corners found
    """

    # Mark calibration points
    dimension = cfg["calibration"][type]["dimension"]
    if type == "checkerboard":
        found, corners = cv.findChessboardCorners(image, dimension, None, flags=cv.CALIB_CB_ADAPTIVE_THRESH)
    else:
        found, corners = cv.findCirclesGrid(image, dimension, flags=cv.CALIB_CB_ASYMMETRIC_GRID)

    if found:
        if type == "checkerboard":
            # termination criteria
            criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.01)
            corners = cv.cornerSubPix(image, corners, (11, 11), (-1, -1), criteria)

        # Draw corners
        if image_marker is not None:
            cv.drawChessboardCorners(image_marker, dimension, corners, found)

        return found, corners
    return found, None
コード例 #30
0
    def findCirclePatterns(self, camImgUndist, doDebug):
        imgMasked = camImgUndist
        # Create a detector with the parameters
        ver = (cv.__version__).split('.')
        if int(ver[0]) < 3:
            blobDetector = cv.SimpleBlobDetector(self.circleFinderParams)
        else:
            blobDetector = cv.SimpleBlobDetector_create(
                self.circleFinderParams)
        found = True
        point_list = []
        hull_list = []
        while found == True:
            found, points = cv.findCirclesGrid(
                image=imgMasked,
                patternSize=(self.pattern["width"], self.pattern["height"]),
                flags=self.findCirclesGridFlag,
                blobDetector=blobDetector)

            if found == True:
                points = np.squeeze(points, axis=1)
                point_list.append(points)
                mins = np.amin(points, axis=0)
                maxs = np.amax(points, axis=0)
                # Mask image to not see the already detected pattern any more
                hull = np.array([[mins[0], mins[1]], [mins[0], maxs[1]],
                                 [maxs[0], maxs[1]], [maxs[0], mins[1]]],
                                np.int32)
                cv.fillConvexPoly(imgMasked, hull, (255, 255, 255))
                hull_list.append(hull)
                if doDebug == True:
                    cv.imshow("masked image", imgMasked)
                    cv.waitKey(2000)
        return point_list
コード例 #31
0
    def _circulargrid_image_points(self, img, flags, blobDetector):
        found, corners = cv2.findCirclesGrid(
            img, (self.pattern_columns, self.pattern_rows),
            flags=flags,
            blobDetector=blobDetector)

        return (found, corners)
コード例 #32
0
    def cb_image(self, image_msg):
        """
        Callback for processing a image which potentially contains a back pattern. Processes the image only if
        sufficient time has passed since processing the previous image (relative to the chosen processing frequency).

        The pattern detection is performed using OpenCV's `findCirclesGrid <https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?highlight=solvepnp#findcirclesgrid>`_ function.

        Args:
            image_msg (:obj:`sensor_msgs.msg.CompressedImage`): Input image

        """
        now = rospy.Time.now()
        if now - self.last_stamp < self.publish_duration:
            return
        else:
            self.last_stamp = now

        vehicle_centers_msg_out = VehicleCorners()
        detection_flag_msg_out = BoolStamped()
        image_cv = self.bridge.compressed_imgmsg_to_cv2(image_msg, "bgr8")

        (detection, centers) = cv2.findCirclesGrid(
            image_cv,
            patternSize=tuple(self.circlepattern_dims.value),
            flags=cv2.CALIB_CB_SYMMETRIC_GRID,
            blobDetector=self.simple_blob_detector,
        )

        # if the pattern is detected, cv2.findCirclesGrid returns a non-zero result, otherwise it returns 0
        # vehicle_detected_msg_out.data = detection > 0
        # self.pub_detection.publish(vehicle_detected_msg_out)

        vehicle_centers_msg_out.header = image_msg.header
        vehicle_centers_msg_out.detection.data = detection > 0
        detection_flag_msg_out.header = image_msg.header
        detection_flag_msg_out.data = detection > 0

        # if the detection is successful add the information about it,
        # otherwise publish a message saying that it was unsuccessful
        if detection > 0:
            points_list = []
            for point in centers:
                center = Point32()
                center.x = point[0, 0]
                center.y = point[0, 1]
                center.z = 0
                points_list.append(center)
            vehicle_centers_msg_out.corners = points_list
            vehicle_centers_msg_out.H = self.circlepattern_dims.value[1]
            vehicle_centers_msg_out.W = self.circlepattern_dims.value[0]

        self.pub_centers.publish(vehicle_centers_msg_out)
        self.pub_detection_flag.publish(detection_flag_msg_out)
        if self.pub_circlepattern_image.get_num_connections() > 0:
            cv2.drawChessboardCorners(image_cv,
                                      tuple(self.circlepattern_dims.value),
                                      centers, detection)
            image_msg_out = self.bridge.cv2_to_compressed_imgmsg(image_cv)
            self.pub_circlepattern_image.publish(image_msg_out)
コード例 #33
0
def detect_warp(imgs, grid_size, chess_circles, wheelbase_mm):
    # Number of squares
    nos = grid_size[0] * grid_size[1]
    # Termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    camera_points = []
    world_points = []
    for i, img in enumerate(imgs):
        # for img in imgs:
        # Find corners
        if (chess_circles):
            ret, corners = cv2.findChessboardCorners(img, grid_size, None)
            corners = cv2.cornerSubPix(img, corners, (11, 11), (-1, -1),
                                       criteria)
        else:
            ret, corners = cv2.findCirclesGrid(img, grid_size, None)
            # print("corners: ", corners)
            # import pdb; pdb.set_trace()
        if not ret:
            print('pattern was not found on number ', i)
            # Skip this image if pattern was not found
            continue
        # Get better corner positions

        cv2.drawChessboardCorners(img, (grid_size[0], grid_size[1]), corners,
                                  ret)
        show_img("chess", img)

        # Get position of centermost corner
        cent_corn = corners[corners.shape[0] // 2]
        print('Center corner is', cent_corn)

        # Points in 2D camera space
        src = np.array(corners).reshape((nos, 2))

        # Points in 3D world space
        obj_points = np.zeros((nos, 3), dtype=np.float32)
        obj_points[:, :2] = np.indices(grid_size).T.reshape(-1,
                                                            2) * wheelbase_mm

        # import pdb; pdb.set_trace()
        camera_points.append(src)
        world_points.append(obj_points)
        # H = cv2.findHomography(obj_points, src)
        # import pdb; pdb.set_trace()
    h, w = imgs[0].shape
    # Pass a single image
    rms, mtx, d_coef, rvecs, tvecs = cv2.calibrateCamera(
        world_points, camera_points, (w, h), None, None)

    cam_mtx, roi = cv2.getOptimalNewCameraMatrix(mtx, d_coef, (w, h), 1,
                                                 (w, h))

    print('RMS', rms)
    print('mtx', mtx)
    print('camera matrix', cam_mtx)
    print('distorsion coefficients', d_coef)

    return (mtx, d_coef, cam_mtx, rvecs, tvecs)
コード例 #34
0
ファイル: gigapan.py プロジェクト: nadiacandoit/gestalt
        def captureUndistortMap(self):
                img = cv.QueryFrame(self.capture)
                cv.ShowImage(self.windowname,img)
                [found corners] = cv2.findCirclesGrid(img, self.calibtarget_dim, flags = cv2.CALIB_CB_ASYMMETRIC_GRID)
                cv2.drawChessboardCorners(img, self.calibtarget_dim, corners, found)

                if cv.WaitKey(10) & 0xff == 'c':
                        return (corners)
コード例 #35
0
    def _detect_pattern(self):
        gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        flags = cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING
        ret, pattern = cv2.findCirclesGrid(gray,(self.ndots_xy[0], self.ndots_xy[1]), flags=flags)
        if ret:
            self.pattern = pattern

        return ret
コード例 #36
0
def calibrate():
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    Nx_cor = 5
    Ny_cor = 5

    objp = np.zeros((Nx_cor * Ny_cor, 3), np.float32)
    objp[:, :2] = np.mgrid[0:Nx_cor, 0:Ny_cor].T.reshape(-1, 2)
    objpoints = []  # 3d points in real world space
    imgpoints = []  # 2d points in image plane.

    count = 0  # count 用来标志成功检测到的棋盘格画面数量
    while (True):

        ret, frame = cap.read()

        if cv2.waitKey(1) & 0xFF == ord(' '):

            # Our operations on the frame come here
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            ret, corners = cv2.findCirclesGrid(gray, (Nx_cor, Ny_cor),
                                               None)  # Find the corners
            # If found, add object points, image points
            if ret == True:
                corners = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1),
                                           criteria)
                objpoints.append(objp)
                imgpoints.append(corners)
                cv2.drawChessboardCorners(frame, (Nx_cor, Ny_cor), corners,
                                          ret)
                count += 1

                if count > 20:
                    break

        # Display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    global mtx, dist

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray.shape[::-1], None,
                                                       None)
    print(mtx, dist)

    mean_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                          mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2,
                         cv2.NORM_L2) / len(imgpoints2)
        mean_error += error

    print("total error: ", mean_error / len(objpoints))
    # # When everything done, release the capture

    np.savez('calibrate.npz', mtx=mtx, dist=dist[0:4])
コード例 #37
0
    def processImage(self, image_msg):
        
        if not self.active:
            return
        
        now = rospy.Time.now()
        if now - self.last_stamp < self.publish_duration:
            return
        else:
            self.last_stamp = now
        
        
        vehicle_detected_msg_out = BoolStamped()
        vehicle_corners_msg_out = VehicleCorners()
        try:
            image_cv = self.bridge.compressed_imgmsg_to_cv2(
                image_msg, "bgr8")
        except CvBridgeError as e:
            print e

        start = rospy.Time.now()
        params = cv2.SimpleBlobDetector_Params()
        params.minArea = self.blobdetector_min_area
        params.minDistBetweenBlobs = self.blobdetector_min_dist_between_blobs
        simple_blob_detector = cv2.SimpleBlobDetector_create(params)
        (detection, corners) = cv2.findCirclesGrid(image_cv,
                                                    self.circlepattern_dims, flags=cv2.CALIB_CB_SYMMETRIC_GRID,
                                                    blobDetector=simple_blob_detector)

        # print(corners)

        vehicle_detected_msg_out.data = detection
        self.pub_detection.publish(vehicle_detected_msg_out)
        if detection:
            # print(corners)
            points_list = []
            for point in corners:
                corner = Point32()
                # print(point[0])
                corner.x = point[0, 0]
                # print(point[0,1])
                corner.y = point[0, 1]
                corner.z = 0
                points_list.append(corner)
            vehicle_corners_msg_out.header.stamp = rospy.Time.now()
            vehicle_corners_msg_out.corners = points_list
            vehicle_corners_msg_out.detection.data = detection
            vehicle_corners_msg_out.H = self.circlepattern_dims[1]
            vehicle_corners_msg_out.W = self.circlepattern_dims[0]
            self.pub_corners.publish(vehicle_corners_msg_out)
        elapsed_time = (rospy.Time.now() - start).to_sec()
        self.pub_time_elapsed.publish(elapsed_time)
        if self.publish_circles:
            cv2.drawChessboardCorners(image_cv,
                                        self.circlepattern_dims, corners, detection)
            image_msg_out = self.bridge.cv2_to_imgmsg(image_cv, "bgr8")
            self.pub_circlepattern_image.publish(image_msg_out)
コード例 #38
0
 def AddImgPnts(self, l_img, r_img, drawCHKBD=False):
     gray_l = cv2.cvtColor(l_img, cv2.COLOR_BGR2GRAY)
     gray_r = cv2.cvtColor(r_img, cv2.COLOR_BGR2GRAY)
     self.imshape = gray_r.shape[:2]
     print(self.imshape)
     ret_l, centers_l = cv2.findCirclesGrid(
         gray_l, CHKBD_DIMS, flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
     ret_r, centers_r = cv2.findCirclesGrid(
         gray_r, CHKBD_DIMS, flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
     if ret_l and ret_r:
         self.imgPnts_l.append(centers_l)
         self.imgPnts_r.append(centers_r)
         self.objPnts.append(self.objp)
         if drawCHKBD:
             cv2.drawChessboardCorners(l_img, CHKBD_DIMS, centers_l, ret_l)
             cv2.drawChessboardCorners(r_img, CHKBD_DIMS, centers_r, ret_r)
     print("Num Calib Imgs: {}".format(len(self.objPnts)))
     return l_img, r_img
コード例 #39
0
ファイル: Pattern.py プロジェクト: PierreMandas/SIGB
    def FindCirclesGrid(self, image):
        """Finds centers in the grid of circles using an asymmetric pattern."""
        # Processed image
        gray = image.copy()

        # Finds centers in the grid of circles.
        flags = cv2.CALIB_CB_ASYMMETRIC_GRID
        retval, corners = cv2.findCirclesGrid(gray, self.Size, flags=flags)

        # Garbage Collector.
        del gray

        # Return the final result.
        return corners
コード例 #40
0
def _get_circles(img, board, pattern):
    """
    Get circle centers for a symmetric or asymmetric grid
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img

    flag = cv2.CALIB_CB_SYMMETRIC_GRID
    if pattern == Patterns.ACircles:
        flag = cv2.CALIB_CB_ASYMMETRIC_GRID
    mono_arr = numpy.array(mono)
    (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag)

    # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
    # again with dimensions swapped - not so efficient.
    # TODO Better to add as second board? Corner ordering will change.
    if not ok and pattern == Patterns.Circles:
        (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag)

    return (ok, corners)
コード例 #41
0
    def get_corners(self, img):
        """
        Get circle centers for a symmetric or asymmetric grid
        """
        w, h = cv.GetSize(img)
        mono = cv.CreateMat(h, w, cv.CV_8UC1)
        cv.CvtColor(img, mono, cv.CV_BGR2GRAY)
        mono_arr = numpy.array(mono)
        (ok, corners) = cv2.findCirclesGrid(mono_arr, (self.n_cols, self.n_rows), cv2.CALIB_CB_ASYMMETRIC_GRID)

        # For some reason findCirclesGrid returns centers as [[x y]] instead of (x y) like FindChessboardCorners
        if corners is not None:
            corners = [(x, y) for [[x, y]] in corners]

        return (ok, corners)
コード例 #42
0
ファイル: dotCalibrate.py プロジェクト: dbaldwin/uavtools
def main():
	image_names =  sys.argv[1:]
	if not image_names:
		print
		print 'No calibration images supplied'
		print 'Usage:'
		print '\tpython dotCalibrate.py [file2 file2 file3 ...]'
		print
		return;

	# termination criteria
	criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

	# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
	objp = np.zeros((6*7,3), np.float32)
	objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)

	# Arrays to store object points and image points from all the images.
	objpoints = [] # 3d point in real world space
	imgpoints = [] # 2d points in image plane.

	count = 0
	for fname in image_names:
		print 'testing file: ' + fname + ': ' + str(count) + ' of ' + str(len(image_names))
		img = cv2.imread(fname)
		gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

	    # Find the chess board corners
		ret, corners = cv2.findCirclesGrid(gray, (4,11),None)

	    # If found, add object points, image points (after refining them)
		if ret == True:
			print 'Checkerboard found in image: ' + fname
			objpoints.append(objp)

			corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
			imgpoints.append(corners2)

	        # Draw and display the corners
			img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)
			cv2.imshow('img',img)
			cv2.waitKey(500)
		else:
			print 'Checkerboard not found in image: ' + fname

		count += 1

	cv2.destroyAllWindows()
コード例 #43
0
    image = cv2.imread(filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    if visualize:
        cv2.imshow("image", image)
        cv2.waitKey()
    assert image != None, 'Cannot read ' + filename

    params = cv2.SimpleBlobDetector_Params()
    params.minDistBetweenBlobs = 5.0
    params.minArea = 15.0
    blackDetector = cv2.SimpleBlobDetector(params)
    params.blobColor = 255

    whiteDetector = cv2.SimpleBlobDetector(params)
    whiteCenters = np.empty((0, 0))
    blackCenters = np.empty((0, 0))
    isBlackFound, blackCenters = cv2.findCirclesGrid(image, patternSize, blackCenters, cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING, blackDetector)
    isWhiteFound, whiteCenters = cv2.findCirclesGrid(image, patternSize, whiteCenters, cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING, whiteDetector)

    circlesImage = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    cv2.drawChessboardCorners(circlesImage, patternSize, blackCenters, isBlackFound)
    cv2.drawChessboardCorners(circlesImage, patternSize, whiteCenters, isWhiteFound)
    if visualize:
        cv2.imshow("corners", circlesImage)
        cv2.waitKey()

    assert (isBlackFound and isWhiteFound), 'Cannot find two circles grids'
    whiteCentroid = np.mean(whiteCenters, 0)
    blackCentroid = np.mean(blackCenters, 0)


    mask = np.zeros(image.shape, np.uint8)
コード例 #44
0
ファイル: calib.py プロジェクト: ricardojdferreira/pyVision
def computeCalibration(files,size_temp,invalid):
        
    #parse size_temp
    print '\n# PROCESSING'    
    size = np.array([size_temp[0],size_temp[1]])
    
    # create table to display calibration results
    isFound1list = []
    isFound2list = []
    imageref = []
    
    # counter for valid calibration files
    count = 0
    
    # counter for invalid calibration files
    invalid_count = 0
    invalid_flag = 1
    if not invalid:
        invalid_flag = 0
    
    # prepare grid points
    obj = np.zeros((size[0]*size[1],3), np.float32)
    obj[:,:2] = np.mgrid[0:size[0],0:size[1]].T.reshape(-1,2)
    
    # arrays to store correspondent points
    objectpts = []
    image1pts = []
    image2pts = []
    validref = []
    imagesize = 0
    
    # use if a list of invalid images is provided
    if invalid_flag == 1:
        
        # load all images
        for i in range(0,len(files)/2):
            
            print '    IMAGE ' + str(i)            
            im1 = cv2.imread(files[2*i],0)
            im2 = cv2.imread(files[2*i+1],0)
            
            # store size of the image
            if i == 0:
                imagesize = im1.shape[::-1]
                
            # checks for invalid flag
            if i != invalid[invalid_count]:         
            
                # runs findCirclesGrid
                print '        GRID SEARCH'            
                isFound1, centers1 = cv2.findCirclesGrid(im1,(size[0],size[1]),flags = cv2.CALIB_CB_SYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING)
                isFound2, centers2 = cv2.findCirclesGrid(im2,(size[0],size[1]),flags = cv2.CALIB_CB_SYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING)
                isFound1list.append(isFound1)        
                isFound2list.append(isFound2)
                
                # stores image reference
                imageref.append(i)
                        
                # adds the processed points to an array
                if isFound1 == True & isFound2 == True:
        
                    #store valid calibration entries
                    validref.append(i)            
                    objectpts.append(obj)
                    image1pts.append(centers1)
                    image2pts.append(centers2)
                    
                    count += 1
                
            else:
                
                if invalid_count != len(invalid)-1:
                    invalid_count += 1
                isFound1list.append(False)
                isFound2list.append(False)
                imageref.append(i)
    
    # compute for all images, no invalid list provided            
    else:
        
        # load all images
        for i in range(0,len(files)/2):
            
            print '    IMAGE ' + str(i)              
            im1 = cv2.imread(files[2*i],0)
            im2 = cv2.imread(files[2*i+1],0)
            im1 = cv2.blur(im1,(2,2))
            im2 = cv2.blur(im2,(2,2))            
           
            # store size of the image
            if i == 0:
                imagesize = im1.shape[::-1]
            
            # runs findCirclesGrid
            print '        GRID SEARCH'
            isFound1, centers1 = cv2.findCirclesGrid(im1,(size[0],size[1]),flags = cv2.CALIB_CB_SYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING)
            isFound2, centers2 = cv2.findCirclesGrid(im2,(size[0],size[1]),flags = cv2.CALIB_CB_SYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING)
            isFound1list.append(isFound1)        
            isFound2list.append(isFound2)
            
            # stores image reference
            imageref.append(i)
                    
            # adds the processed points to an array
            if isFound1 == True & isFound2 == True:
    
                #store valid calibration entries
                validref.append(i)            
                objectpts.append(obj)
                image1pts.append(centers1)
                image2pts.append(centers2)
            
                count += 1
                
    # calibrate cameras
    print '    PARAMETERS ESTIMATION'
    print '        CAMERA 1'
    _, camera_matrix1, dist_coefs1, rvecs1, tvecs1 = cv2.calibrateCamera(objectpts, image1pts, (imagesize[0],imagesize[1]), None, None) 

    print '        CAMERA 2'
    _, camera_matrix2, dist_coefs2, rvecs2, tvecs2 = cv2.calibrateCamera(objectpts, image2pts, (imagesize[0],imagesize[1]), None, None)

    print '    STEREO CALIBRATION'
    # stereo calibration
    stereo_var = cv2.stereoCalibrate(objectpts,image1pts,image2pts,camera_matrix1,dist_coefs1,camera_matrix2,dist_coefs2,(imagesize[0],imagesize[1]), flags = cv2.CALIB_FIX_INTRINSIC )
#, flags = cv2.CALIB_FIX_INTRINSIC    
    
    # reprojection variables
    calib_var = []
    calib_var.append(objectpts)
    calib_var.append(rvecs1) 
    calib_var.append(tvecs1)
    calib_var.append(rvecs2) 
    calib_var.append(tvecs2)
    calib_var.append(image1pts)
    calib_var.append(image2pts)
    calib_var.append(validref)
    calib_var.append(imageref)
    calib_var.append(isFound1list)
    calib_var.append(isFound2list)
    
    # return object and image points arrays
    return stereo_var,calib_var
コード例 #45
0
ファイル: calibrate.py プロジェクト: walchko/opencv_python
    pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
    #pattern_points *= square_size

    obj_points = []
    img_points = []
    h, w = 0, 0
    for fn in img_names:
        print 'processing %s...' % fn,
        img = cv2.imread(fn, 0)
        if img is None:
          print "Failed to load", fn
          continue

        h, w = img.shape[:2]
        #found, corners = cv2.findChessboardCorners(img, pattern_size)
        found, corners = cv2.findCirclesGrid(img, pattern_size, flags = cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING)
        if found:
            term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
            cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
        if debug_dir:
            vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
            cv2.drawChessboardCorners(vis, pattern_size, corners, found)
            path, name, ext = splitfn(fn)
            cv2.imwrite('%s/%s_chess.png' % (debug_dir, name), vis)
        if not found:
            print 'chessboard not found'
            continue
        img_points.append(corners.reshape(-1, 2))
        obj_points.append(pattern_points)

        print 'ok'
コード例 #46
0
            r,th_img = cv2.threshold(diff_img, 200, 255, cv2.THRESH_BINARY)
            thimg_, cont, hier = cv2.findContours( th_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE )
            Amax, xw, yw = 0, 0, 0
            for c in cont:
                A = cv2.contourArea(c)
                if A>Amax:
                    (xw,yw),ax,ang = cv2.fitEllipse(c)
            if (xw,yw)==(0,0):
                raise Exception('Error at point %d,%d:' % (x,y) + 'Cannot find image point')
            pt_warped.append( (xw,yw) )
                    


    # Calibration image (only for test)
    circles = cv2.imread('circles.png')
    r, pts =   cv2.findCirclesGrid( circles, (8,6) )  # in the original image
    
    # Get screen transformation
    cam_to_scr, mask = cv2.findHomography( array(pt_warped), array(pt_orig) )

    # Save transformation matrix
    savetxt('calib_projection.dat', cam_to_scr)


    #---Show feedback image until program is killed---

    cv2.imshow('frame', zeros((H,W,3), dtype=uint8) )
    cv2.waitKey(1)

    nframes = 0       # total number of acquired frames
    t0 = time.time()  # start time
コード例 #47
0
                else:
                    bg = None
            if event.key == pygame.K_q:
                quit = True
                continue
            if delta_etime is not None:
                etime = max(10, min(etime + delta_etime, 30000))
                # c.ExposureTime = etime / 1000.
                print "Set exposure time to %i" % etime

    # print "\tQueuing buffer"
    # c.buffers.queue()
    # im = c.capture()
    im, _ = c.grab("rgb8", stop=False)
    if grab_calibration:
        ret, cir = cv2.findCirclesGrid(im, csize, flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
        grab_calibration = False
        if ret:
            cpts.append(cir)
            print ("Found grid: %s" % len(cpts))
        else:
            print ("Could not find grid")

    if has_cv2 and recording and video is not None:
        video.write(im[:, :, ::-1])
    # print im.min(), im.max(), im.mean(), im.std()
    im = im[::scale, ::scale, :]
    im = numpy.swapaxes(im, 0, 1)
    # print "\tCapture: %s" % r
    # print(im[0, 0])
    # if bg is not None:
コード例 #48
0
def calibrate(directory, rows, cols, space, win=11, save=True,
              directory_out='out', visualize=False, circles=False):
    """Calibrate a camera based on the images in directory

    If save is set, then the resulting data (as txt and json files), along with
    undistorted versions of the input images will be saved to directory_out.
    The system only works in rows != cols. Most instructions will be printed to
    the terminal during execution.

    Args:
        directory (str): Where are the images stored
        rows (int): The number of internal corners in the vertical direction
        cols (int): The number of internal corners in the horizontal direction
        win (int): Half of the side length of the search window for subpixel
            accuracy corner detection. For example, if win=5, then a
            5*2+1 x 5*2+1 = 11 x 11 search window is used.
        save (bool): Whether to save output
        directory_out (str): Where to save output
        space (float): The spacing between squares on the grid.
        visualize (bool): Whether to visualize output as the script is running.
        circles (bool): Whether to use a circle calibration grid
    """
    # Based on example at:
    # http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/
    # py_calib3d/py_calibration/py_calibration.html

    # Setup colored output
    init()

    if len(directory_out) and (directory_out[0] == '/' or
                               directory_out[0] == '\\'):
        directory_out = directory_out[1:]
    if len(directory_out) and (directory_out[-1] == '/' or
                               directory_out[-1] == '\\'):
        directory_out = directory_out[:-1]

    if len(directory) and (directory[0] == '/' or directory[0] == '\\'):
        directory = directory[1:]
    if len(directory) and (directory[-1] == '/' or directory[-1] == '\\'):
        directory = directory[:-1]

    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + "\nWelcome\n"

    # Figure out where the images are and get all file_names for that directory
    target_directory = os.path.join(os.getcwd(), directory)
    directory_out = os.path.join(os.getcwd(), directory_out)
    print "Searching for images in: " + target_directory
    file_names = os.listdir(target_directory)
    print "Found Images:"
    for name in file_names:
        print "\t" + name

    if visualize:
        print ("\nYou have enabled visualizations.\n\tEach visualization will "
               "pause the software for 5 seconds.\n\tTo continue prior to the "
               "5 second time, press any key.")

    # Check grid symmetry
    if rows == cols:
        print (Style.BRIGHT + Back.RED + "It is best to use an asymmetric grid,"
                                         " rows and cols should be different")
        print(Style.RESET_ALL)
        exit()

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    object_point = np.zeros((cols * rows, 3), np.float32)
    object_point[:, :2] = np.mgrid[0:(rows * space):space,
                                   0:(cols * space):space].T.reshape(-1, 2)

    number_found = 0  # How many good images are there?

    # Arrays to store object points and image points from all the images.
    object_points = []  # 3d point in real world space
    image_points = []  # 2d points in image plane.

    # termination criteria for the sub-pixel corner search algorithm
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # Setup windows for visualization
    if visualize:
        cv2.namedWindow("Raw Image", cv2.WINDOW_NORMAL)
        cv2.namedWindow("Undistorted Image", cv2.WINDOW_NORMAL)
        cv2.namedWindow("Image with " + ("Centers" if circles else "Corners"),
                        cv2.WINDOW_NORMAL)

    # Check if output directory exists, if not, make it.
    if save:
        print (Style.BRIGHT + Back.MAGENTA + "\nSaving output to: " +
               directory_out)
        if not os.path.exists(directory_out):
            os.makedirs(directory_out)
            print Style.BRIGHT + Back.GREEN + "\tMade a new output directory"
        print "\n"

    #########################################################################
    image_size = None
    for image_file in file_names:
        image_file = os.path.join(target_directory, image_file)

        # Try to read in image as gray scale
        img = cv2.imread(image_file, 0)

        # If the image_file isn't an image, move on
        if img is not None:
            print Style.BRIGHT + Back.CYAN + "searching image: " + image_file

            if visualize:
                cv2.imshow('Raw Image', img)
                cv2.waitKey(5000)

            if circles:
                # Find circle centers.
                re_projection_error, centers = \
                    cv2.findCirclesGrid(img, (rows, cols))
            else:
                # Find chessboard corners.
                re_projection_error, corners = cv2.findChessboardCorners(
                    img, (rows, cols), flags=cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_ADAPTIVE_THRESH)

            # If we found chessboard corners lets work on them
            if re_projection_error:
                print (Style.BRIGHT + Back.GREEN + "\tfound " +
                       ("centers" if circles else "corners"))
                object_points.append(object_point)

                # Since this is a good image, we will take its size as the
                # image size
                image_size = img.shape[::-1]

                # We found another good image
                number_found += 1

                if circles:
                    image_points.append(centers)

                    # Draw, display, and save the corners
                    color_image = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

                    new_color_image = \
                        cv2.drawChessboardCorners(color_image, (cols, rows),
                                                  centers, re_projection_error)
                    # OpenCV 2 vs 3
                    if new_color_image is not None:
                        color_image = new_color_image
                else:
                    # Get subpixel accuracy corners
                    corners2 = cv2.cornerSubPix(img, corners, (win, win),
                                                (-1, -1), criteria)

                    # Draw, display, and save the corners
                    color_image = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

                    # depending on the version of OpenCV, cv2.cornerSubPix may
                    # return none, in which case, it modified corners (how
                    # un-Pythonic)
                    if corners2 is None:
                        corners2 = corners

                    image_points.append(corners2)
                    print (Style.BRIGHT + Back.GREEN +
                           "\t\tfound sub-pixel corners")
                    new_color_image = \
                        cv2.drawChessboardCorners(color_image, (cols, rows),
                                                  corners2, re_projection_error)
                    # OpenCV 2 vs 3
                    if new_color_image is not None:
                        color_image = new_color_image

                if save:
                    cv2.imwrite(os.path.join(directory_out, "grid" +
                                             str(number_found) + ".jpg"),
                                color_image)
                if visualize:
                    cv2.imshow("Image with " +
                               ("Centers" if circles else "Corners"),
                               color_image)
                    cv2.waitKey(5000)
            else:
                print (Style.BRIGHT + Back.RED + "\tcould not find " +
                       ("Centers" if circles else "Corners"))
            print "\n"

    # Check how many good images we found
    if number_found >= 10:
        print (Style.BRIGHT + Back.GREEN + "Found " + str(number_found) +
               " calibratable images.")
    elif number_found == 0:
        print (Style.BRIGHT + Back.RED + "Found " + str(number_found) +
               " calibratable images. \nNow Exiting")
        print(Style.RESET_ALL)
        exit()
    else:
        print (Style.BRIGHT + Back.YELLOW + "Found " + str(number_found) +
               " calibratable images.")

    #######################################################################
    print Style.BRIGHT + Back.CYAN + "Beginning Calibration"

    # Execute calibration
    (re_projection_error, camera_matrix, distortion_coefficients,
     rotation_vectors, translation_vectors) = \
        cv2.calibrateCamera(object_points, image_points, image_size, None, None)

    # Get the crop and optimal matrix for the image
    w, h = image_size[:2]
    new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, distortion_coefficients, (w, h), 1, (w, h))

    # Go through all images and undistort them
    print Style.BRIGHT + Back.CYAN + "Beginning Undistort"
    i = 0
    for image_file in file_names:
        image_file = os.path.join(target_directory, image_file)

        # Try to read in image as gray scale
        img = cv2.imread(image_file, 0)

        # If the image_file isn't an image, move on
        if img is not None:
            print Style.BRIGHT + Back.CYAN + "undistorting image: " + image_file

            if visualize:
                cv2.imshow('Raw Image', img)
                cv2.waitKey(5000)

            # undistort
            dst = cv2.undistort(img, camera_matrix, distortion_coefficients,
                                None, new_camera_matrix)

            # crop the image
            x, y, w, h = roi
            dst = dst[y:y + h, x:x + w]
            if save:
                cv2.imwrite(os.path.join(directory_out, "undistort" + str(i) +
                                         ".jpg"), dst)
            if visualize:
                cv2.imshow('Undistorted Image', dst)
                cv2.waitKey(5000)

            i += 1

    #########################################################################
    print "\n"
    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + "Intrinsic Matrix:"
    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + str(camera_matrix)
    print "\n"
    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + "Distortion Matrix:"
    print (Fore.WHITE + Style.BRIGHT + Back.MAGENTA +
           str(distortion_coefficients))
    print "\n"
    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + "Optimal Camera Matrix:"
    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + str(new_camera_matrix)
    print "\n"
    print (Fore.WHITE + Style.BRIGHT + Back.MAGENTA +
           "Optimal Camera Matrix Crop:")
    print Fore.WHITE + Style.BRIGHT + Back.MAGENTA + str(roi)

    # Calculate Re-projection Error
    tot_error = 0
    for i in xrange(len(object_points)):
        image_points_2, _ = cv2.projectPoints(
            object_points[i], rotation_vectors[i], translation_vectors[i],
            camera_matrix, distortion_coefficients)
        error = cv2.norm(image_points[i], image_points_2,
                         cv2.NORM_L2) / len(image_points_2)
        tot_error += error
    mean_error = tot_error / len(object_points)
    print "\n"
    print (Fore.WHITE + Style.BRIGHT + Back.MAGENTA + "Re-projection Error: " +
           str(mean_error))

    if save:
        with open(os.path.join(directory_out, 'result.txt'), 'w') as \
                result_text_file:
            result_text_file.write("Grid: Rows: {}, Cols: {}, Spacing: {}:\n"
                                   .format(rows, cols, space))
            result_text_file.write("Time: {}\n".format(datetime.datetime.now()))
            result_text_file.write("Intrinsic Matrix:\n")
            np.savetxt(result_text_file, camera_matrix, '%E')
            result_text_file.write("\n\n")
            result_text_file.write("Distortion Matrix:\n")
            np.savetxt(result_text_file, distortion_coefficients, '%E')
            result_text_file.write("\n\n")
            result_text_file.write("Optimal Camera Matrix:\n")
            np.savetxt(result_text_file, new_camera_matrix, '%E')
            result_text_file.write("\n\n")
            result_text_file.write("Optimal Camera Matrix Crop:\n")
            np.savetxt(result_text_file, roi, '%i')
            result_text_file.write("\n\n")
            result_text_file.write("Re-projection Error:  ")
            result_text_file.write(str(mean_error))

        json_dict = {"grid": {"rows": rows,
                              "cols": cols,
                              "spacing": space},
                     "time": str(datetime.datetime.now()),
                     "intrinsic": camera_matrix.tolist(),
                     "distortion": distortion_coefficients.tolist(),
                     "optimal": new_camera_matrix.tolist(),
                     "crop": roi,
                     "error": mean_error}
        with open(os.path.join(directory_out, 'result.json'), 'w') as \
                result_json_file:
            json.dump(json_dict, result_json_file, indent=4)

    print(Style.RESET_ALL)
    cv2.destroyAllWindows()
コード例 #49
0
ファイル: gigapan.py プロジェクト: Felicitymecha/pygestalt
                        pos = self.getPosition()
                        cv.SaveImage("images/hi%.03f-%.03f-%.03f.jpg", img) % pos
                        #cv.SaveImage("images/hi"+str(time.time())[6:]+".jpeg", img)
                return img

        def captureUndistortMap(self):
                img = cv.QueryFrame(self.capture)
                cv.ShowImage(self.windowname,img)
                [found corners] = cv2.findCirclesGrid(img, self.calibtarget_dim, flags = cv2.CALIB_CB_ASYMMETRIC_GRID)
                cv2.drawChessboardCorners(img, self.calibtarget_dim, corners, found)

                if cv.WaitKey(10) & 0xff == 'c':
                        return (corners)

        def initUndistort(self)
                captured_corners = [cv2.findCirclesGrid(img, self.calibtarget_dim, flags = cv2.CALIB_CB_ASYMMETRIC_GRID) for img in captured_image_list]
                #load camera intrinsic params
                #get optimal camera matrix (cv2 equiv)
                #init undistorm rectify map (cv2 equiv)
                #store it
                pass

        def correctImage(self,img):
                if(not undistortmap):
                        #capture undistort map
                        #init undistort map
                        undistortmap = True
                #remap image
                return img

	def takeGigapan(self):
コード例 #50
0
 def _findSymmetricCircles(self, flags=cv2.CALIB_CB_SYMMETRIC_GRID):
     (didFindCorners, corners) = cv2.findCirclesGrid(
         self.img, self.opts['size'],
         flags=flags | cv2.CALIB_CB_CLUSTERING)
     return didFindCorners, corners
コード例 #51
0
    def findSkewPoints(calibImages):
        '''
        Takes in a series of images of a 11x4 circleGrid pattern, 
        locates their major center points,
        and places them in two matrices
    
        Args: 
            calibImages: list of circleGrid photos taken for calibration
    
        Returns:
            objpoints: 3D points in real world space
            imgpoints: 2D points in image plane

        '''
    
        logging.basicConfig(level=logging.DEBUG, format='%(message)s')
        
        
        if calibImages == []:
            raise AttributeError
        
    
        # Initialize arrays
        centers = np.zeros((6*7), np.float32)
    
        pattern_points = np.zeros( (np.prod(DetermineSkew.shape), 3), np.float32)
        pattern_points[:,:2] = np.indices(DetermineSkew.shape).T.reshape(-1, 2)

    
        objpoints = [] # 3d point in real world space
        imgpoints = [] # 2d points in image plane


        x = 0

    
        logging.debug("Starting loop")
        for fname in calibImages[1:]:
            logging.debug("In loop")
            logging.debug(x)
            img = fname
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Image must be greyscale for center finding to work.
            
            # h, w = gray.shape[:2]
    
            logging.debug("Got images")

        
            # Find circle centers
            [ret, centers] = cv2.findCirclesGrid(gray, DetermineSkew.shape, centers, cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING)
    
    
            logging.debug("Done finding centers")
            logging.debug(" ")
        
            if ret == True:
        
                criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
                logging.debug("Found centers")

                centers2 = cv2.cornerSubPix(gray, centers, DetermineSkew.shape, (-1,-1), criteria)
            
                imgpoints.append(deepcopy(centers2.reshape(-1,2)))
                objpoints.append(pattern_points)

                # Draw and display the corners
                logging.debug("Drawing corners")
            
                img = cv2.drawChessboardCorners(img, DetermineSkew.shape, centers, ret)
                # cv2.imshow('img', img)
#                 cv2.waitKey(0)
#
                x = x+1
            
            else:
                logging.debug("No corners here, bub.")
                logging.debug(ret)
                logging.debug(centers)
                # cv2.imshow('img', img)
#                 cv2.waitKey(0)
            
                x = x+1

        # cv2.destroyAllWindows()
        
        
        return objpoints, imgpoints