def init_video_cam_and_cv_algorithm(self, create_video_folder=True):
		"""
		Initializes camera: connects to it, loads settings from config file (if available),
		loads color threshold and blob detector settings (if available), creates a folder to save cv output images...
		:param create_video_folder: True to create folder specified by VIDEO_FOLDER (where output frames will be saved)
		"""
		self.video_capture = UvcCapture.new_from_settings(self.CAMERA_SETTINGS_FILE)  # Connect to device specified by settings, and load its desired param values
		if self.video_capture is None:  # If unable to connect to device specified by settings, open first available camera
			self.video_capture = UvcCapture(0)
			if self.video_capture is None:  # If still unable to connect, raise an exception
				raise Exception("Couldn't open camera! :(")

			# If we're here, we couldn't connect to device specified by settings but were able to open 1st available cam
			if not self.video_capture.load_settings(self.CAMERA_SETTINGS_FILE):  # Try to load frame size & rate from settings
				self.video_capture.select_best_frame_mode(60)  # If loading settings failed, choose best frame size with fps >= 60
		logging.info("Camera opened! :)")

		# Initialize cv algorithm too: load settings for color thresholding and blob detector
		self.load_color_thresh_settings()
		self.load_blob_detector_settings()

		# Sometimes, first couple frames take a long time to be obtained, do it before quad goes in flying mode
		self.video_capture.get_frame_robust()
		self.video_capture.get_frame_robust()

		# Initialize PID setpoints and initial input value to the center of the frame
		self.cf_PID_roll.setSetPoint(self.video_capture.frame_size[0] / 2)
		self.cf_PID_thrust.setSetPoint(self.video_capture.frame_size[1] / 2)
		self.cf_PID_pitch.setSetPoint(40)
		self.cf_PID_roll.PIDpos.curr_input = self.cf_PID_roll.getSetPoint()
		self.cf_PID_thrust.PIDpos.curr_input = self.cf_PID_thrust.getSetPoint()
		self.cf_PID_pitch.PIDpos.curr_input = self.cf_PID_pitch.getSetPoint()

		# Prepare the folder self.VIDEO_FOLDER so we can store each frame we processed (for debugging)
		if create_video_folder:
			shutil.rmtree(self.VIDEO_FOLDER, ignore_errors=True)  # Delete the folder and its contents, if it exists (ignore errors if it doesn't)
			os.makedirs(self.VIDEO_FOLDER)  # Now create the folder, which won't throw any exceptions as we made sure it didn't already exist
def estimate_cam_to_world_transform(calibration_file,
                                    is_chessboard=False,
                                    cell_size=0.02):
    """
	Opens a GUI and displays the camera's output. For every frame, it also looks for a calibration pattern (eg: OpenCV's
	chessboard pattern), and computes the cam->world transformation matrix if found.
	:param calibration_file: Str indicating the name of the calibration file to load camera_matrix and dist_coefs from
	:param is_chessboard: True if the calibration pattern is a chessboard. False, for asymmetric circle grid
	:param cell_size: Float indicating the distance (in m) between two consecutive points in the pattern grid
	"""
    cam = UvcCapture.new_from_settings(Spotter.CAMERA_SETTINGS_FILE)
    if cam is None:
        logging.error(
            "Please connect the camera, can't do this without you :P")
        return
    cam.do_undistort = False  # Don't undistort, we're already taking into account camera_matrix in the equations
    # for c in cam.controls:
    # 	if c.display_name == "Sharpness":
    # 		c.value = c.max_val
    # 		break

    win_title = "Estimating camera->world coordinate transform"
    cv2.namedWindow(win_title)
    cv2.moveWindow(win_title, 100, 100)
    while cv2.waitKey(1) < 0:
        t_start = datetime.now()
        img = cam.get_frame_robust().bgr
        world_to_camera_transf, F = auxV.find_world_to_cam_and_F(
            img,
            calibration_file,
            is_chessboard=is_chessboard,
            pattern_grid_or_cell_size=cell_size)
        if world_to_camera_transf is not None:
            pattern_grid, _ = auxV.get_calib_pattern_info(
                is_chessboard, cell_size)
            found, corners = auxV.find_calib_pattern(img, is_chessboard,
                                                     pattern_grid)
            cv2.drawChessboardCorners(img, pattern_grid, corners, found)
            cam_pos = auxV.cam_to_world_coords([0, 0, 0],
                                               world_to_camera_transf)
            d = auxV.world_to_cam_coords([0, 0, 0], world_to_camera_transf)[2]
            cv2.putText(
                img,
                "[x={p[0]:.2f}, y={p[1]:.2f}, z={p[2]:.2f}] cm; dist={d:.2f}cm; F={F:.2f}px"
                .format(p=100 * cam_pos, d=100 * d,
                        F=F), (50, img.shape[0] - 50), cv2.FONT_HERSHEY_DUPLEX,
                1, (0, 0, 200), 1, cv2.LINE_AA)

            # # Test img->world + world->img transforms to make sure everything works correctly
            # dist_img_coords = [img.shape[1], img.shape[0], 20]
            # RADIUS_IN_M = 0.02
            # depth = F * RADIUS_IN_M/dist_img_coords[2]  # If an object that's dist_img_coords[2] pixels wide measures RADIUS_IN_M meters, how far away from the camera is it?
            # cam_coords = auxV.img_to_cam_coords(np.hstack((dist_img_coords[0:2], depth)), calibration_file, is_dist_img_coords=True)
            # world_coords = auxV.cam_to_world_coords(cam_coords, world_to_camera_transf)
            # new_cam_coords = auxV.world_to_cam_coords(world_coords, world_to_camera_transf)
            # new_dist_img_coords = auxV.cam_to_img_coords(new_cam_coords, calibration_file, want_dist_img_coords=True)
            # cv2.putText(img, "[u={p[0]:.2f}, v={p[1]:.2f}]".format(p=new_dist_img_coords), (50, img.shape[0]-100), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 200), 1, cv2.LINE_AA)
            # cv2.circle(img, tuple([int(x) for x in new_dist_img_coords]), int(dist_img_coords[2]), (0, 200, 0), -1)
        else:
            cv2.putText(img, "CAN'T FIND PATTERN!!", (50, img.shape[0] - 50),
                        cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 1,
                        cv2.LINE_AA)

        cv2.imshow(win_title, img)
        t_end = datetime.now()
        print("Frame took {:.2f}ms to process!".format(
            (t_end - t_start).total_seconds() * 1000))

    cv2.destroyAllWindows()