Esempio n. 1
0
def show_warp(image_file, visualize=False, save_example=False):
    # Read in an image
    img = cv2.imread(image_file)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # Undistort
    params = camera_calibration.load_calibration_data()
    img = camera_calibration.undistort_image(img, params)

    # Get mappings to plot on image
    src, dst = create_warp_mapping(img)

    # Warp image
    warped, M, Minv = apply_transform(img)
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7))
    ax1.imshow(img)

    for p in src:
        ax1.plot(p[0], p[1], "ro")
    ax1.set_title("Original Image", fontsize=24)
    ax2.imshow(warped)

    for p in dst:
        ax2.plot(p[0], p[1], "ro")
    ax2.set_title("Warped Image", fontsize=24)

    if visualize:
        plt.show(block=True)
    if save_example:
        save_file_name = "warped_{}".format(
            os.path.basename(image_file.replace(".jpg", ".png")))
        save_location = "./output_images/{}".format(save_file_name)
        f.savefig(save_location, bbox_inches="tight")
def demo_transform(calibration_image_names,
                   road_image_names,
                   output_folder,
                   shape=(720, 1280),
                   xct=9,
                   yct=6):
    """
    run all parts of the pipeline and write sample images to ouput folder
    :param calibration_image_names: list of images to use for camera calibration
    :param road_image_names: list of images to apply pipeline to
    :param output_folder: folder to write output images to
    :param shape: expected shape of input images
    :param xct: expected corners in x direction for camera calibration
    :param yct: expected corners in y direction for camera calibration
    """
    import camera_calibration as cc
    import pipeline as pl
    if not path.exists(output_folder):
        os.makedirs(output_folder)

    object_points, image_points, pattern_found = \
        cc.calibration_points(calibration_image_names, xct=xct, yct=yct)
    ret, mtx, dist, rvecs, tvecs = cc.calibrate_camera(object_points,
                                                       image_points, shape)

    for i, file_name in enumerate(road_image_names):
        base_name = path.split(file_name)[1]
        original_image = cv2.imread(file_name)

        # undistort the image
        corrected_image = cc.undistort_image(original_image, mtx, dist, mtx)
        h, w, _ = corrected_image.shape

        # create threshholded binary image
        binary_image = bt.combined_binary(corrected_image)

        src, dst = find_quad_points(binary_image)
        quad_image = convert_binary_to_color(binary_image)
        draw_quad(quad_image, src, color=[255, 0, 0])
        draw_quad(quad_image, dst, color=[0, 255, 0])
        pl.save_single_example('/'.join(
            [output_folder, "srcquad_{}".format(base_name)]),
                               'src quad',
                               quad_image,
                               cmap='jet')

        transformed = perspective_transform(
            corrected_image, cv2.getPerspectiveTransform(src, dst))
        draw_quad(transformed, dst, color=[0, 0, 255])
        pl.save_single_example('/'.join(
            [output_folder, "init_perspective_{}".format(base_name)]),
                               'initializing perspective transform',
                               cv2.cvtColor(transformed, cv2.COLOR_BGR2RGB),
                               cmap='jet')

        save_full_example(
            '/'.join(
                [output_folder, "perspective_process_{}".format(base_name)]),
            cv2.cvtColor(corrected_image, cv2.COLOR_BGR2RGB), quad_image,
            cv2.cvtColor(transformed, cv2.COLOR_BGR2RGB))
Esempio n. 3
0
def process_video_frame(image):

    global prev_fit

    undistorted = undistort_image(image)
    threshold = combined_threshold(undistorted)
    bird_eye = original2bird_eye(threshold)
    lane_line_params = calc_lane_lines(bird_eye)

    result = draw_lane_lines(undistorted, lane_line_params)

    return result
Esempio n. 4
0
def corners_unwarp(img, nx, ny, mtx, dist):
    # Pass in your image into this function

    # Write code to do the following steps
    # 1) Undistort using mtx and dist
    img_undistorted = undistort_image(img, mtx, dist, plot_images=False)

    # 2) Convert to grayscale
    gray = cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2GRAY)

    # 3) Find the chessboard corners
    print("Finding corners in calibration image")
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    # 4) If corners found:
    if ret == True:
        print("corners.size: {}".format(corners.size))
        print("len(corners): {}".format(len(corners)))

        # a) draw corners
        cv2.drawChessboardCorners(img, (nx, ny), corners, ret)

        # b) define 4 source points src = np.float32([[,],[,],[,],[,]])
        #Note: you could pick any four of the detected corners
        # as long as those four corners define a rectangle
        #One especially smart way to do this would be to use four well-chosen
        # corners that were automatically detected during the undistortion steps
        #We recommend using the automatic detection of corners in your code
        src = np.float32(
            [corners[0], corners[nx - 1], corners[-1], corners[-nx]])

        # c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
        offset = 100
        img_size = (gray.shape[1], gray.shape[0])
        dst = np.float32([[offset, offset], [img_size[0] - offset, offset],
                          [img_size[0] - offset, img_size[1] - offset],
                          [offset, img_size[1] - offset]])

        # d) use cv2.getPerspectiveTransform() to get M, the transform matrix
        M = cv2.getPerspectiveTransform(src, dst)

        # e) use cv2.warpPerspective() to warp your image to a top-down view
        warped = cv2.warpPerspective(img_undistorted,
                                     M,
                                     img_size,
                                     flags=cv2.INTER_LINEAR)
    else:
        print("Could not find chessboard corners!")
        M = None
        warped = np.copy(img)

    return warped, M
Esempio n. 5
0
 def initialize_perspective_transform(self, image):
     """
     initialize the perspective transform matrices by using a straight line
     image to choose for points of a trapezoid to use as source points.
     :param image: undistorted front facing straight lane line image
     """
     t0 = time()
     ci = cc.undistort_image(image, self.mtx, self.dist, self.mtx)
     bi = self.combined_binary_image(ci)
     self.src, self.dst = pt.find_quad_points(bi)
     self.M_transform = cv2.getPerspectiveTransform(self.src, self.dst)
     self.Minv_transform = cv2.getPerspectiveTransform(self.dst, self.src)
     print("Perspective Transform Init Duration: {}s".format(
         round(time() - t0, 3)))
Esempio n. 6
0
def initialize():
    # Calibrate camera
    mtx, dist = calibrate_camera(use_calib_cache=True)

    # Perspective transform
    test_image = cv2.cvtColor(cv2.imread('./test_images/straight_lines1.jpg'),
                              cv2.COLOR_BGR2RGB)

    img_undistorted = undistort_image(test_image, mtx, dist, plot_images=False)

    img_size = (test_image.shape[1], test_image.shape[0])
    perspective_M, perspective_M_inv = get_perspective_transform(img_size)

    return mtx, dist, perspective_M, perspective_M_inv
Esempio n. 7
0
def get_binary_warped(image_file):
    # read in image
    img = mpimg.imread(image_file)

    # undistort image
    params = camera_calibration.load_calibration_data()
    img = camera_calibration.undistort_image(img, params)

    # perform perspective transform
    img, M, Minv = perspective_transform.apply_transform(img)

    # perform thresholding
    processed_image = thresholding.process_imaging(img)

    return img, processed_image
Esempio n. 8
0
def test_binarize_frame():
    mtx, dist, perspective_M, perspective_M_inv = initialize()

    # Read in an image
    image = cv2.cvtColor(cv2.imread('./test_images/straight_lines2.jpg'),
                         cv2.COLOR_BGR2RGB)
    img_undistorted = undistort_image(image, mtx, dist, plot_images=False)
    img_binary = binarize_frame(img_undistorted)

    img_size = (image.shape[1], image.shape[0])
    top_down_binary = warp_image_to_top_down_view(img_binary.astype(np.uint8),
                                                  img_size,
                                                  perspective_M).astype(bool)

    # Plot the result
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()
    ax1.imshow(image)
    ax1.set_title('Original Image', fontsize=50)
    ax2.imshow(top_down_binary, cmap='gray')
    ax2.set_title('Undistorted and Warped Image', fontsize=50)
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
Esempio n. 9
0
def process_video_frame(frame):
    # NOTE: output should be a color image (3 channel) for processing video below
    global dict_config_params
    global num_frames_processed
    global mtx, dist, perspective_M, perspective_M_inv
    global left_fit, right_fit, left_fitx, right_fitx
    #global line_left, line_right

    frame_undistorted = undistort_image(frame, mtx, dist, plot_images=False)
    frame_binary = binarize_frame(frame_undistorted)

    frame_size = (frame_binary.shape[1], frame_binary.shape[0])
    top_down_binary = warp_image_to_top_down_view(
        frame_binary.astype(np.uint8), frame_size, perspective_M).astype(bool)

    #print("num_frames_processed: {}".format(num_frames_processed))

    out_img = None
    ploty = None
    if num_frames_processed == 0:
        out_img = detect_lane_lines(top_down_binary,
                                    line_left,
                                    line_right,
                                    plot_image=False)
    else:
        out_img = track_lane_lines(top_down_binary, line_left, line_right)

    num_frames_processed += 1
    #print("num_frames_processed: {}".format(num_frames_processed))

    img_lines_on_road = project_lane_lines_to_road(frame_undistorted, out_img,
                                                   line_left, line_right,
                                                   perspective_M_inv)
    write_curvature_text_to_image(img_lines_on_road, dict_config_params,
                                  line_left, line_right)
    write_lane_offset_text_to_image(img_lines_on_road, dict_config_params,
                                    line_left, line_right)

    return img_lines_on_road
def process_image(image):
    
    undistorted = undistort_image(image)
    threshold = combined_threshold(undistorted)
    bird_eye = original2bird_eye(threshold)
    lane_line_params = calc_lane_lines(bird_eye)
    result = draw_lane_lines(undistorted, lane_line_params)

    f, axarr = plt.subplots(2, 2, figsize=(30, 15))
    axarr[0, 0].imshow(undistorted)
    axarr[0, 0].set_title('Undistorted')
    axarr[0, 1].imshow(threshold, cmap='gray')
    axarr[0, 1].set_title('Threshold')
    axarr[1, 0].imshow(lane_line_params['out_img'])
    axarr[1, 0].plot(lane_line_params['left_fit_x'], lane_line_params['plot_y'], color='yellow')
    axarr[1, 0].plot(lane_line_params['right_fit_x'], lane_line_params['plot_y'], color='yellow')
    axarr[1, 0].set_title('Calculated')
    axarr[1, 1].imshow(result)
    axarr[1, 1].set_title('Result')
    plt.show()

    return result
Esempio n. 11
0
def pipeline(img):
    """
    Complete pipeline for lane finding.
    1) The image passed in is first undistorted.  
    2) Then the image is transformed or warped.
    3) Next the image passes through sobel and color filtering.  
    4) After that, a histogram finds the left and right lane.
    5) Next, the lanes are painted and the image warping is reversed.
    6) Finally, text is overlayed to display radius of curvature and distance from center.
    """
    # Undistort image
    params = camera_calibration.load_calibration_data()
    img = camera_calibration.undistort_image(img, params)

    # Perspective transform
    image, M, Minv = perspective_transform.apply_transform(img)

    # Sobel/color thresholding and gaussian blur
    processed_image = thresholding.process_imaging(image)

    # Find lanes
    histogram_image, left_fit, right_fit = lane_localization.histogram(
        processed_image)
    lane_lines, left_curverad, right_curverad, car_pos = lane_localization.find_lane_lines(
        processed_image, left_fit, right_fit)

    # Undo perspective transform
    inv_warp = cv2.warpPerspective(lane_lines,
                                   Minv, (img.shape[1], img.shape[0]),
                                   flags=cv2.INTER_LINEAR)
    result = cv2.addWeighted(img, 1, inv_warp, 0.4, 0)

    # Put text
    result = lane_localization.put_text(result, left_curverad, right_curverad,
                                        car_pos)

    return result
Esempio n. 12
0
    def process_detection(self, mtx, dist):

        self.undistImage = undistort_image(self.image, mtx, dist)

        # cv2.imwrite('./output_images/undist.png', self.undistImage)
        # cv2.imwrite('./output_images/original.png', self.image)

        # exit(-1)
        # Get Edges with sobel
        sobelImage = applySobel(self.undistImage)

        # cv2.imshow('Sobel Mix', sobelImage)

        colorMaskImage = white_yellow_mask(self.undistImage)

        # cv2.imshow('Color Mx', colorMaskImage)

        colorSobelImage = np.zeros_like(colorMaskImage)

        colorSobelImage[(sobelImage == 1) | (colorMaskImage == 1)] = 1

        self.warp_transformation(colorMaskImage)

        if self.reset == False:
            self.calc_fits_from_previous()
        else:
            self.calc_lanes_fits()

        self.calc_fitsx()

        self.calc_curvature()

        self.calc_offset()

        self.draw_search()

        return self.left_fit, self.right_fit, self.left_curverad, self.right_curverad, self.left_fitx, self.right_fitx, self.dif_meters, self.left_base, self.right_base
Esempio n. 13
0
def test_detect_lane_lines():
    mtx, dist, perspective_M, perspective_M_inv = initialize()
    line_left = Line()
    line_right = Line()

    # Read in an image
    image = cv2.cvtColor(cv2.imread('./test_images/straight_lines2.jpg'),
                         cv2.COLOR_BGR2RGB)
    img_undistorted = undistort_image(image, mtx, dist, plot_images=False)
    img_binary = binarize_frame(img_undistorted)

    img_size = (image.shape[1], image.shape[0])
    top_down_binary = warp_image_to_top_down_view(img_binary.astype(np.uint8),
                                                  img_size,
                                                  perspective_M).astype(bool)

    print("top_down_binary.shape = {}".format(top_down_binary.shape))
    out_image = detect_lane_lines(top_down_binary,
                                  line_left,
                                  line_right,
                                  plot_image=True)

    print("left_fit: {}".format(line_left.line_polyfit))
    print("right_fit: {}".format(line_right.line_polyfit))
# transform matrix
trans_matrix = cv2.getPerspectiveTransform(src, dst)
invert_matrix = cv2.getPerspectiveTransform(dst, src)

def original2bird_eye(image):        
    bird_eye = cv2.warpPerspective(image, trans_matrix, image.shape[1::-1], flags=cv2.INTER_LINEAR)    
    return bird_eye


if __name__ == '__main__':
   
    images = glob.glob('./test_images/*.jpg')    
    
    # show result on test images
    for idx, filename in enumerate(images):
        image = cv2.imread(filename)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        undistorted = undistort_image(image)
        threshold = combined_threshold(undistorted)
        bird_eye = original2bird_eye(image)
    
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
        ax1.set_title(os.path.basename(filename)+' Original', fontsize=12)
        ax1.imshow(image, cmap='gray')
        for point in src:
            ax1.plot(*point, '.')
        ax2.set_title(os.path.basename(filename)+' Bird Eye', fontsize=12)
        ax2.imshow(bird_eye, cmap='gray')
        for point in dst:
            ax2.plot(*point, '.')
Esempio n. 15
0
 def get_undistorted_image(self, mtx, dist):
     if (self.image):
         return undistort_image(self.image, mtx, dist)
     else:
         print('Image not loaded')
         return None
# %%
import numpy as np
import cv2
import matplotlib.pyplot as plt

import perspective_transform
import camera_calibration


# %%
def cv2_imread(path):
    return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)


img = cv2_imread("../assets/straight_lines1.jpg")
img = camera_calibration.undistort_image(img, pkl_name="calibration.p")

plt.imshow(img)
_ = plt.title("Original image (undistorted)")

# %%
import masking
img_luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
img_white = masking.mask_white_from_LUV(img_luv, thresholds=(225, 255))

img_lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
img_yellow = masking.mask_yellow_from_LAB(img_lab)

img_hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_bright = masking.mask_bright_from_HLS(img_hls)
Esempio n. 17
0
 def undistort_image(self, image):
     return cc.undistort_image(image, self.mtx, self.dist, self.mtx)
"""
test camera calibration and image undistortion
"""
import glob
import cv2
import pickle

from camera_calibration import calibrate_camera, undistort_image
from utilities import two_plots


chess_board_images_ = glob.glob('camera_calibration_images/calibration*.jpg')
pattern_size_ = (9, 6)
camera_cali_file = "camera_cali.pkl"

calibrate_camera(
    chess_board_images_, pattern_size=pattern_size_, output=camera_cali_file)

test_img = cv2.imread("camera_calibration_images/calibration1.jpg")
test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)

with open(camera_cali_file, "rb") as fp:
    camera_cali_ = pickle.load(fp)
test_img_undistorted = undistort_image(
    test_img, camera_cali_["obj_points"], camera_cali_["img_points"])

two_plots(test_img, test_img_undistorted,
          ('original', 'undistorted', 'camera calibration'), output='')