示例#1
0
def main():
    video_capture = cv2.VideoCapture(webcam_path)
    ret, mtx, dist, rvecs, tvecs = calibration_utils.calibrate_camera(
        n, m, testdir, fmt)
    while (True):
        ret, frame = video_capture.read()
        #        hl_yellow = binarization.highlight_yellow_lines(frame, yellow_min, yellow_max)
        #        sobel_edge = binarization.edge_detection(frame, kernel)
        #        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        #        vehicle_off = vehicle_offset.vehicle_offset(frame)
        #        cv2.imshow('frame-yellow',hl_yellow)
        # B G  R
        #        hough_lines.hough_lines(frame, (0,255,0), 'white')
        #        hough_lines.hough_lines(frame, (255,0,0), 'yellow')
        #        cv2.imshow('frame-white',hl_white)
        #        cv2.imshow('frame-sobel',sobel_edge)
        #        cv2.imshow('frame-gray',gray)
        undist_img = calibration_utils.undistort(frame, mtx, dist)
        #        cv2.imshow('frame-undistorted', undist_img)
        #        hl_yellow = binarization.highlight_yellow_lines(undist_img, yellow_min, yellow_max)
        #        hl_white = binarization.highlight_white_lines(undist_img)
        #        cv2.imshow('frame-yellow',hl_yellow)
        #        cv2.imshow('frame-white',hl_white)
        hl_white_yellow = binarization.white_yellow(undist_img, yellow_min,
                                                    yellow_max)
        #        cv2.imshow('frame-white-yellow',hl_white_yellow)
        ##        birdeye = perspective_utils.birdeye(frame)
        #        birdeye, be_boxes = perspective_karla.birdeye(undist_img)
        birdeye, be_boxes = perspective_karla.birdeye(hl_white_yellow)
        cv2.imshow('frame-birdeye', birdeye)
        cv2.imshow('frame-birdeye-boxes', be_boxes)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
示例#2
0
def process_pipeline(frame, keep_state=True):
    """
    Apply whole lane detection pipeline to an input color frame.
    :param frame: input color frame
    :param keep_state: if True, lane-line state is conserved (this permits to average results)
    :return: output blend with detected lane overlaid
    """

    global line_lt, line_rt, processed_frames

    # undistort the image using coefficients found in calibration
    img_undistorted = undistort(frame, mtx, dist, verbose=False)

    # binarize the frame s.t. lane lines are highlighted as much as possible
    img_binary = binarize(img_undistorted, verbose=False)

    # compute perspective transform to obtain bird's eye view
    img_birdeye, M, Minv = birdeye(img_binary, verbose=False)

    # fit 2-degree polynomial curve onto lane lines found
    if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:
        line_lt, line_rt, img_fit = get_fits_by_previous_fits(img_birdeye,
                                                              line_lt,
                                                              line_rt,
                                                              verbose=False)
    else:
        line_lt, line_rt, img_fit = get_fits_by_sliding_windows(img_birdeye,
                                                                line_lt,
                                                                line_rt,
                                                                n_windows=9,
                                                                verbose=False)

    # compute offset in meter from center of the lane
    offset_meter = compute_offset_from_center(line_lt,
                                              line_rt,
                                              frame_width=frame.shape[1])

    # draw the surface enclosed by lane lines back onto the original frame
    blend_on_road = draw_back_onto_the_road(img_undistorted, Minv, line_lt,
                                            line_rt, keep_state)

    # stitch on the top of final output images from different steps of the pipeline
    blend_output = prepare_out_blend_frame(blend_on_road, img_binary,
                                           img_birdeye, img_fit, line_lt,
                                           line_rt, offset_meter)

    processed_frames += 1

    return blend_output
示例#3
0
def process_pipeline(frame, keep_state=True):
    """
    Apply whole lane detection pipeline to an input color frame.
    :param frame: input color frame
    :param keep_state: if True, lane-line state is conserved (this permits to average results)
    :return: output blend with detected lane overlaid
    """

    global line_lt, line_rt, processed_frames

    # undistort the image using coefficients found in calibration
    img_undistorted = undistort(frame, mtx, dist, verbose=False)

    # binarize the frame s.t. lane lines are highlighted as much as possible
    img_binary = binarize(img_undistorted, verbose=False)

    # compute perspective transform to obtain bird's eye view
    img_birdeye, M, Minv = birdeye(img_binary, verbose=False)

    # fit 2-degree polynomial curve onto lane lines found
    if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:
        line_lt, line_rt, img_fit = get_fits_by_previous_fits(img_birdeye, line_lt, line_rt, verbose=False)
    else:
        line_lt, line_rt, img_fit = get_fits_by_sliding_windows(img_birdeye, line_lt, line_rt, n_windows=9, verbose=False)

    # compute offset in meter from center of the lane
    offset_meter = compute_offset_from_center(line_lt, line_rt, frame_width=frame.shape[1])

    # draw the surface enclosed by lane lines back onto the original frame
    blend_on_road = draw_back_onto_the_road(img_undistorted, Minv, line_lt, line_rt, keep_state)

    # stitch on the top of final output images from different steps of the pipeline
    blend_output = prepare_out_blend_frame(blend_on_road, img_binary, img_birdeye, img_fit, line_lt, line_rt, offset_meter)

    processed_frames += 1

    return blend_output
示例#4
0
        for point in src:
            axarray[0].plot(*point, '.')
        axarray[1].set_title('After perspective transform')
        axarray[1].imshow(warped, cmap='gray')
        for point in dst:
            axarray[1].plot(*point, '.')
        for axis in axarray:
            axis.set_axis_off()
        plt.show()

    return warped, m, m_inv


if __name__ == '__main__':
    with open('calibrate_camera.p', 'rb') as f:
        save_dict = pickle.load(f)
    mtx = save_dict['mtx']
    dist = save_dict['dist']

    # show result on test images
    for test_img in glob.glob('test_images/*.jpg'):
        img = cv2.imread(test_img)

        img_undistorted = undistort(img, mtx, dist, verbose=False)

        img_binary = binarize(img_undistorted, verbose=False)

        img_birdeye, M, Minv = perspective_transform(cv2.cvtColor(
            img_undistorted, cv2.COLOR_BGR2RGB),
                                                     verbose=True)
        axarray[0].imshow(img, cmap='gray')
        for point in src:
            axarray[0].plot(*point, '.')
        axarray[1].set_title('After perspective transform')
        axarray[1].imshow(warped, cmap='gray')
        for point in dst:
            axarray[1].plot(*point, '.')
        for axis in axarray:
            axis.set_axis_off()
        plt.show()

    return warped, M, Minv


if __name__ == '__main__':

    ret, mtx, dist, rvecs, tvecs = calibrate_camera(calib_images_dir='camera_cal')

    # show result on test images
    for test_img in glob.glob('test_images/*.jpg'):

        img = cv2.imread(test_img)

        img_undistorted = undistort(img, mtx, dist, verbose=False)

        img_binary = binarize(img_undistorted, verbose=False)

        img_birdeye, M, Minv = birdeye(cv2.cvtColor(img_undistorted, cv2.COLOR_BGR2RGB), verbose=True)


def process_pipeline(frame, keep_state=True):
    """
    Apply whole lane detection pipeline to an input color frame.
    :param frame: input color frame
    :param keep_state: if True, lane-line state is conserved (this permits to average results)
    :return: output blend with detected lane overlaid
    """

    global line_lt, line_rt, processed_frames

    #  step 2
    # TODO 4: call undistort funcction and pass it to img_undistorted
    # undistort the image using coefficients found in calibration
    img_undistorted = undistort(frame, mtx, dist, verbose=False)

    # step 3
    # binarize the frame s.t. lane lines are highlighted as much as possible
    # img_binary = binarize(img_undistorted, verbose=False)
    #  TODO 5: Binarize
    img_binary = binarize_with_threshold(img_undistorted, verbose=False)

    # step 4
    # TODO 6: Perspetive transform
    # perspective transform
    binary_warped, m, m_inv = perspective_transform(img_binary, verbose=False)

    # step 5
    # TODO 7: Curve fitting
    # fit 2-degree polynomial curve onto lane lines found
    if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:
        # Fast line fit
        line_lt, line_rt, img_fit = get_fits_by_previous_fits(binary_warped,
                                                              line_lt,
                                                              line_rt,
                                                              verbose=False)
    else:
        line_lt, line_rt, img_fit = get_fits_by_sliding_windows(binary_warped,
                                                                line_lt,
                                                                line_rt,
                                                                n_windows=9,
                                                                verbose=False)

    # step 6
    # TODO 8: Compute offset
    # compute offset in meter from center of the lane
    offset_meter = compute_offset_from_center(line_lt,
                                              line_rt,
                                              frame_width=frame.shape[1])

    # step 7
    # TODO 9: Final visualization
    # Perform final visualization on top of original undistorted image
    result = final_viz(img_undistorted, m_inv, line_lt, line_rt, keep_state)

    # step 8
    # TODO 11: Blending
    # stitch on the top of final output images from different steps of the pipeline
    blend_output = prepare_out_blend_frame(result, img_binary, binary_warped,
                                           img_fit, line_lt, line_rt,
                                           offset_meter)

    processed_frames += 1

    return blend_output