Esempio n. 1
0
def final_plots(save_dir):
    fig = commons.graph_subplots(nrows=1, ncols=2, figsize=(50, 10))([
        CurvatureParams.left_lane_curvature_radii,
        CurvatureParams.right_lane_curvature_radii
    ], ["Left Lane (curvature radius)", "Right Lane (curvature radius)"])
    commons.save_matplotlib(f'{save_dir}/radius_of_curvature.png', fig)

    fig = commons.graph_subplots(nrows=1, ncols=2, figsize=(15, 6))([
        CurvatureParams.left_line_curr_poly_variance,
        CurvatureParams.right_line_curr_poly_variance
    ], ["left_lane_variance_curvature_change", "right_lane_curvature_change"])
    commons.save_matplotlib(f'{save_dir}/change_in_curvature.png', fig)
Esempio n. 2
0
def plot_random_images(generator):
    for i in range(0, len(generator)):
        image, steering_val = eval_generator[i]

        random_images = image[0:3, :, :].astype(np.uint8)
        steering_val = steering_val[0:3, :]
        steering_val = list(np.round(steering_val.flatten(), 2))
        
        cropped_img_list = [img[50:-20, :, :] for img in random_images]
        fig = commons.image_subplots(nrows=2, ncols=3)(list(random_images)+cropped_img_list, steering_val+steering_val)
        commons.save_matplotlib("./image/input_img.png", fig)

        break
def fetch_start_position_with_hist_dist(preprocessed_bin_image, save_dir=None):
    """
    At this point we should have a binary image (1, 0) with detected lane line. Here detections are annotated with 1
    This method is necessary to provide a starting point to find the curvature of the lane line
    :param preprocessed_bin_image:
    :param save_dir:
    :return:
        left_lane_start_point_coordinates = (y1, x1)
        right_lane_start_point_coordinates = (y2, x2)
    # TODO: Lane Line are broad and hence the values between the edges of line lines are 0. It would be a good
    practise to apply a kernel to fill up these valleys
    # TODO: Remove Outliers (Bad Gradients)
    # TODO: How we we handle Bimodal Distribution for a particular lane
    # TODO: Can we try weighted method (Something like a moving Average) with sliding window
    # TODO: Use a kernel to compute a weighted value of neighboring columns instead of using just one.
    """
    # preprocessed_image[preprocessed_image > 0] = 1
    assert(set(np.unique(preprocessed_bin_image)) == {0, 1}), (
        f'The preprocessed image should be binary {0, 1} but contains values {set(np.unique(preprocessed_bin_image))}'
    )
    # Sum all the values in column axis
    frequency_histogram = np.sum(preprocessed_bin_image*CurvatureParams.hist_weight_matrix, axis=0)
    # Divide the Frequency histogram into two parts to find starting points for Left Lane and Right Lane
    left_lane = frequency_histogram[0: len(frequency_histogram) // 2]
    right_lane = frequency_histogram[len(frequency_histogram) // 2:]
    
    if save_dir:
        fig = commons.graph_subplots(nrows=1, ncols=3, figsize=(50, 10))(
                [frequency_histogram, left_lane, right_lane],
                ["frequency_histogram", "hist_left_lane", "hist_right_lane"]
        )
        fig2 = commons.image_subplots(nrows=1, ncols=1, figsize=(6, 6))(
                [CurvatureParams.hist_weight_matrix], ["histogram_weight_matrix"]
        )
        commons.save_matplotlib(f"{save_dir}/histogram_dist.png", fig)
        commons.save_matplotlib(f"{save_dir}/histogram_weights.png", fig2)
    
    left_lane_start_index = np.argmax(left_lane)
    right_lane_start_index = len(frequency_histogram) // 2 + np.argmax(right_lane)
    # print('Left lane right lane start: ', left_lane_start_index, right_lane_start_index)
    return (preprocessed_bin_image.shape[0], left_lane_start_index), (preprocessed_bin_image.shape[0], right_lane_start_index)
def fetch_object_and_image_points(distorted_image_paths,
                                  x_corners_cnt,
                                  y_corners_cnt,
                                  plot=False,
                                  dump=True,
                                  force_fetch=False):
    """
    :param distorted_image_paths:
    :param distorted_image_paths:
    :param x_corners_cnt:
    :param y_corners_cnt:
    :param debug:
    :param dump:
    :param force_fetch:
    :return:

    Idea:
        To correct for distortion we need to calibrate the camera and find the calibration co-efficients.
        A way of doing that would be to ge the shift of a pixel coordinate from 1 image to another.
        
        1. We need to identify pixels that are present in all the image (For Chessboard the best pixel marking would
        be the corners). Open CV provides an easy way to find corners in a chessboard
        2. (object_points) We need to give each mark/pint an id. (In this case its the ith_x_value, jth_y_value and 0
        for z axis)
        3. (image_points)We need to find the actual pixel coordinate in the marks/points
    """
    if not force_fetch and os.path.exists(object_image_points_path):
        data_dict = cm.read_pickle(save_path=object_image_points_path)
        return data_dict["object_points"], data_dict["image_points"]
    object_points = np.zeros((y_corners_cnt * x_corners_cnt, 3), np.float32)

    # Get corner coordinates as a mesh grid = (x_coord, y_coord, 0 (xpoint))
    object_points[:, :2] = np.mgrid[0:x_corners_cnt,
                                    0:y_corners_cnt].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    object_points_list = []  # 3d points in real world space
    image_points_list = []  # 2d points in image plane.

    collect_images_for_plot = []
    for idx, fname in enumerate(distorted_image_paths):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners 8 corners in the x direction and 6 corners in the y direction
        is_valid_corner, image_points = cv2.findChessboardCorners(
            gray, (x_corners_cnt, y_corners_cnt), None)

        # If found, add object points, image points
        if is_valid_corner:
            assert (len(object_points) == len(image_points)), (
                f"len(object_points)={len(object_points)} should equal len(image_points)={len(image_points)}"
            )
            object_points_list.append(object_points)
            image_points_list.append(image_points)

            # Draw and display the corners
            if plot:
                cv2.drawChessboardCorners(img, (x_corners_cnt, y_corners_cnt),
                                          image_points, is_valid_corner)
                collect_images_for_plot.append(img)
    if plot:
        fig = cm.image_subplots(nrows=2, ncols=2,
                                figsize=(10, 10))(collect_images_for_plot[0:4],
                                                  None)
        cm.save_matplotlib(image_corner_detection_path, fig)

    if dump:
        cm.write_pickle(save_path=object_image_points_path,
                        data_dict={
                            "object_points": object_points_list,
                            "image_points": image_points_list
                        })
    return object_points_list, image_points_list
    camera_params_path = f"{camera_calibration_dir}/camera_params.pickle"
    image_corner_detection_path = f"{camera_calibration_dir}/chessboard_corner_detection.jpg"
    undistorted_img_save_path = f"{camera_calibration_dir}/undistorted_image.jpg"

    object_points_list, image_points_list = fetch_object_and_image_points(
        distorted_image_paths,
        x_corners_cnt=9,
        y_corners_cnt=6,
        plot=True,
        dump=True,
        force_fetch=True)
    camera_matrix, distortion_coefficients = calibrate_camera(
        cm.read_image(distorted_image_paths[0]).shape, object_points_list,
        image_points_list)

    print("camera_matrix: \n", camera_matrix)
    print("distortion_coefficients: \n", distortion_coefficients)
    undistorted_images = [
        undistort(cm.read_image(img), camera_matrix, distortion_coefficients)
        for img in distorted_image_paths[0:2]
    ]

    fig = cm.image_subplots(nrows=2, ncols=2, figsize=(
        10, 10
    ), facecolor='w')(list(
        itertools.chain(*[
            *zip([cm.read_image(path)
                  for path in distorted_image_paths[0:2]], undistorted_images)
        ])))
    cm.save_matplotlib(undistorted_img_save_path, fig)
 def plot(self):
     assert (len(self.plot_images) == len(self.plot_names))
     ncol = 3
     nrows = int(np.ceil(len(self.plot_names) / ncol))
     fig = commons.image_subplots(nrows=nrows, ncols=ncol)(self.plot_images, self.plot_names)
     commons.save_matplotlib(f'{self.save_dir}/postprocess.png', fig)
 def plot(self, ncol, save_path):
     assert (len(self.plot_images) == len(self.plot_names)), (f'{len(self.plot_images)} != {len(self.plot_names)}')
     nrows = int(np.ceil(len(self.plot_names) / ncol))
     fig = commons.image_subplots(nrows=nrows, ncols=ncol)(self.plot_images, self.plot_names)
     commons.save_matplotlib(save_path, fig)