Exemple #1
0
def main():

    binary_outputs = []
    thresholded_wrapeds = []
    results = []
    checker_path = 'camera_cal'
    imgs_path = 'test_images'
    checker_imgs = utils.get_images_by_dir(checker_path)
    lane_imgs = utils.get_images_by_dir(imgs_path)
    # 相机校正
    imgs, undistorts = cam.cam_calibration(checker_imgs, lane_imgs)
    #阈值过滤
    for undistort in undistorts:
        binary_output = threshold.thresholding(undistort)
        binary_outputs.append(binary_output)
    #透视变换
        M, Minv = perspective_transform.get_M_Minv()

        thresholded_wraped = cv2.warpPerspective(binary_output, M, undistort.shape[1::-1], flags=cv2.INTER_LINEAR)

    #检测车道边界、滑动窗口拟合
        left_fit, right_fit, left_lane_inds, right_lane_inds = find_line1.find_line(thresholded_wraped)
    #计算车道曲率、及车辆相对车道中心位置、显示信息
        curvature, distance_from_center = final_process.calculate_curv_and_pos(thresholded_wraped, left_fit, right_fit)
        result = final_process.draw_area(undistort, thresholded_wraped, Minv, left_fit, right_fit)
        final_process.draw_values(result, curvature, distance_from_center)
        results.append(result)



    for i in range(len(undistorts)):
        cv2.imshow('in_image', undistorts[i])
        cv2.imshow('result', results[i])
        cv2.waitKey(0)
def cam_calibration(chechers_path, imgs_path):

    cal_images = utils.get_images_by_dir(chechers_path)

    object_points, img_points = get_obj_img_points(cal_images)
    imgs = utils.get_images_by_dir(imgs_path)

    undistorted = []
    for img in imgs:
        img = cal_undistort(img, object_points, img_points)
        undistorted.append(img)
    return imgs, undistorted
Exemple #3
0
# -*- coding: utf-8 -*-

import os
import cv2
import utils
import matplotlib.pyplot as plt
import numpy as np

#script mainly use for drawing the demo picture

cal_imgs = utils.get_images_by_dir('camera_cal')
object_points,img_points = utils.calibrate(cal_imgs,grid=(9,6))

#test_imgs = utils.get_images_by_dir('test_images')
test_imgs = utils.get_images_by_dir('new_test')

undistorted = []
for img in test_imgs:
    img = utils.cal_undistort(img,object_points,img_points)
    undistorted.append(img)

trans_on_test=[]
for img in undistorted:
    src = np.float32([[(203, 720), (585, 460), (695, 460), (1127, 720)]])
    dst = np.float32([[(320, 720), (320, 0), (960, 0), (960, 720)]])
    M = cv2.getPerspectiveTransform(src, dst)
    trans = cv2.warpPerspective(img, M, img.shape[1::-1], flags=cv2.INTER_LINEAR)
    trans_on_test.append(trans)
    
thresh = []
binary_wrapeds = []
Exemple #4
0
    #draw the detected laneline and the information
    area_img = utils.draw_area(undist, thresholded_wraped, Minv, left_fit,
                               right_fit)
    curvature, pos_from_center = utils.calculate_curv_and_pos(
        thresholded_wraped, left_fit, right_fit)
    result = utils.draw_values(area_img, curvature, pos_from_center)

    return result


#
#
left_line = line.Line()
right_line = line.Line()
cal_imgs = utils.get_images_by_dir('camera_cal')
object_points, img_points = utils.calibrate(cal_imgs, grid=(9, 6))
M, Minv = utils.get_M_Minv()

project_outpath = 'vedio_out/project_video_out.mp4'
project_video_clip = VideoFileClip("project_video.mp4")
project_video_out_clip = project_video_clip.fl_image(lambda clip: processing(
    clip, object_points, img_points, M, Minv, left_line, right_line))
project_video_out_clip.write_videofile(project_outpath, audio=False)

##draw the processed test image
# test_imgs = utils.get_images_by_dir('test_images')
# undistorted = []
# for img in test_imgs:
#    img = utils.cal_undistort(img,object_points,img_points)
#    undistorted.append(img)
Exemple #5
0
if __name__ == "__main__":

    # Setup commandline argument(s) structures
    ap = argparse.ArgumentParser(description='Image Segmentation')
    ap.add_argument("--pic",
                    "-p",
                    type=str,
                    default='test',
                    metavar='FILE',
                    help="Name of video file to parse")
    # Store parsed arguments into array of variables
    args = vars(ap.parse_args())

    # Extract stored arguments array into individual variables for later usage in script
    _img = args["pic"]
    _imgs, _img_names = utils.get_images_by_dir(_img)

    # create trackbars for color change
    cv2.namedWindow('image')

    # create trackbars for color change
    cv2.createTrackbar('Hmin', 'image', 32, 255, onChange)
    cv2.createTrackbar('Smin', 'image', 52, 255, onChange)
    cv2.createTrackbar('Vmin', 'image', 118, 255, onChange)
    cv2.createTrackbar('Hmax', 'image', 255, 255, onChange)
    cv2.createTrackbar('Smax', 'image', 255, 255, onChange)
    cv2.createTrackbar('Vmax', 'image', 110, 255, onChange)
    cv2.createTrackbar('Ymin', 'image', 0, 255, onChange)
    cv2.createTrackbar('Umin', 'image', 0, 255, onChange)
    cv2.createTrackbar('VVmin', 'image', 0, 255, onChange)
    cv2.createTrackbar('Ymax', 'image', 164, 255, onChange)
Exemple #6
0
def cam_calibration(chechers_imgs, lane_imgs):

    object_points, img_points = get_obj_img_points(chechers_imgs)

    undistorted = []
    for img in lane_imgs:
        img = cal_undistort(img, object_points, img_points)
        undistorted.append(img)
    return lane_imgs, undistorted


def display(in_imgs, out_imgs):
    plt.figure(figsize=(10, 10))
    i = 0
    j = 1
    for img1, img2 in zip(in_imgs, out_imgs):
        plt.subplot(8, 2, j), plt.imshow(img1)
        j = j + 1
        plt.subplot(8, 2, j), plt.imshow(img2)
        j = j + 1
        i = i + 1
    plt.show()


if __name__ == '__main__':
    checker_path = 'camera_cal'
    imgs_path = 'test_images'
    checker_imgs = utils.get_images_by_dir(checker_path)
    lane_imgs = utils.get_images_by_dir(imgs_path)
    imgs, undistort = cam_calibration(checker_imgs, lane_imgs)
    display(imgs, undistort)
Exemple #7
0
import utils
from camera_calibration import *
from threasholding import *
from perspective_transform import *
from detect_lane import *
from cal_curv_pos import *
from show import *

if __name__ == '__main__':
    # test chessboard
    cal_imgs = utils.get_images_by_dir(
        os.path.join(os.getcwd(), 'img\\chessboard'))
    # cv2.imshow('img',cal_imgs[0])
    # cv2.waitKey(0)
    object_points, img_points = calibrate(cal_imgs, grid=(9, 6))
    # print(len(cal_imgs), object_points, img_points)
    test_imgs = utils.get_images_by_dir(os.path.join(os.getcwd(), 'img\\test'))
    test_imgs = [test_imgs[0]]
    undistorted_imgs = []
    for img in test_imgs:
        #img = cal_undistort(img, object_points, img_points)
        undistorted_imgs.append(img)
    # cv2.imshow("", undistorted_imgs[0])
    # cv2.waitKey(0)

    # tmp = undistorted_imgs[0]
    # tmp = abs_sobel_thresh(tmp, orient='x', thresh_min=10, thresh_max=230)
    # tmp = mag_thresh(tmp, sobel_kernel=9, mag_thresh=(30, 150))
    # tmp=dir_threshold(img,sobel_kernel=3,thresh=(0.7,1.3))
    # tmp=hls_select(tmp,channel='s',thresh=(180,255))
    # tmp=lab_select(tmp,(180,255))
Exemple #8
0

left_line = line.Line()
right_line = line.Line()
#cal_imgs = utils.get_images_by_dir('camera_cal')
#object_points,img_points = utils.calibrate(cal_imgs,grid=(9,6))
M, Minv = utils.get_M_Minv()
'''
project_outpath = 'vedio_out/Rail_detection.mp4'
project_video_clip = VideoFileClip("Rail.mp4")
project_video_out_clip = project_video_clip.fl_image(lambda clip: processing(clip,M,Minv,left_line,right_line))
project_video_out_clip.write_videofile(project_outpath, audio=False)
'''

#draw the processed test image
test_imgs = utils.get_images_by_dir('pic')
undistorted = []
for img in test_imgs:
    #img = utils.cal_undistort(img,object_points,img_points)
    undistorted.append(img)

result = []
t2 = []
c = 1
for img in undistorted:
    prev_time = time.time()
    res, t1 = processing(img, M, Minv, left_line, right_line)
    curr_time = time.time()
    exec_time = curr_time - prev_time
    info = "time: %.2f ms" % (1000 * exec_time)
    print(info)