예제 #1
0
def process_single_image(filename, mtx, dist):
    print(filename)
    laneFinder = LaneFinder.LaneFinder(mtx, dist)

    #read image
    img = cv2.imread("test_images/" + filename)
    #convert to rgb
    rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    #undistort
    undist = cv2.undistort(rgb, mtx, dist, None, mtx)
    cv2.imwrite("output_images/undist_" + filename,
                cv2.cvtColor(undist, cv2.COLOR_RGB2BGR))

    bin = laneFinder.image2LaneBinary(undist)
    gray = bin * 255
    cv2.imwrite("output_images/bin_" + filename, gray)
    # plt.imshow(grey, cmap='gray')
    trans = laneFinder.perspectiveTransform(bin)

    #plt.imshow(rgb)

    leftx, lefty, rightx, righty, transLane = laneFinder.findLanePixels(trans)
    cv2.imwrite("output_images/tr_" + filename, transLane)

    outimg = laneFinder.processImage(rgb)
    cv2.imwrite("output_images/lane_" + filename,
                cv2.cvtColor(outimg, cv2.COLOR_RGB2BGR))
예제 #2
0
def processVideo(mtx, dist):
    laneFinder = LaneFinder.LaneFinder(mtx, dist)
    clip = VideoFileClip("project_video.mp4")
    outclip = clip.fl_image(lambda img: process_video_image(laneFinder, img))
    outclip.write_videofile("output_video/project_video.mp4", audio=False)

    laneFinder = LaneFinder.LaneFinder(mtx, dist)
    clip = VideoFileClip("challenge_video.mp4")
    outclip = clip.fl_image(lambda img: process_video_image(laneFinder, img))
    outclip.write_videofile("output_video/challenge_video.mp4", audio=False)

    laneFinder = LaneFinder.LaneFinder(mtx, dist)
    clip = VideoFileClip("harder_challenge_video.mp4")
    outclip = clip.fl_image(lambda img: process_video_image(laneFinder, img))
    outclip.write_videofile("output_video/harder_challenge_video.mp4",
                            audio=False)
예제 #3
0
def warp_image(mtx, dist):
    img = cv2.imread("test_images/straight_lines1.jpg")
    rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    laneFinder = LaneFinder.LaneFinder(mtx, dist)
    undist = cv2.undistort(rgb, mtx, dist, None, mtx)

    #warp
    warp = laneFinder.perspectiveTransform(undist)

    #draw src lines to undistorted image
    linar = LaneFinder.src.astype(np.int32).reshape((-1, 1, 2))
    cv2.polylines(undist, [linar], True, [255, 0, 0], 3)

    #draw dst lines to warped image
    linar = LaneFinder.dst.astype(np.int32).reshape((-1, 1, 2))
    cv2.polylines(warp, [linar], True, [255, 0, 0], 3)

    cv2.imwrite("output_images/straight_lines1_warpsrc.jpg",
                cv2.cvtColor(undist, cv2.COLOR_RGB2BGR))
    cv2.imwrite("output_images/straight_lines1_warpdst.jpg",
                cv2.cvtColor(warp, cv2.COLOR_RGB2BGR))
예제 #4
0
    cap.release()
    outVideo.release()
    print("Execute end")


if __name__ == '__main__':

    import gc
    path = './configure.json'
    config_file = open(path, "rb")
    fileJson = json.load(config_file)
    cam_matrix = fileJson[0]["cam_matrix"]
    dist_coeffs = fileJson[0]["dist_coeffs"]
    perspective_transform = fileJson[0]["perspective_transform"]
    pixels_per_meter = fileJson[0]["pixels_per_meter"]
    WARPED_SIZE = fileJson[0]["WARPED_SIZE"]
    ORIGINAL_SIZE = fileJson[0]["ORIGINAL_SIZE"]

    cam_matrix = np.array(cam_matrix)
    dist_coeffs = np.array(dist_coeffs)
    perspective_transform = np.array(perspective_transform)
    pixels_per_meter = tuple(pixels_per_meter)
    WARPED_SIZE = tuple(WARPED_SIZE)
    ORIGINAL_SIZE = tuple(ORIGINAL_SIZE)

    lf = LaneFinder.LaneFinder(ORIGINAL_SIZE, WARPED_SIZE, cam_matrix,
                               dist_coeffs, perspective_transform,
                               pixels_per_meter)
    main()
    gc.collect()
예제 #5
0
import pickle
import LaneFinder as lf
import imageio
from moviepy.editor import VideoFileClip

undistort_coeff = pickle.load(open("camera_cal/undistort_pickle.p", "rb"))
mtx = undistort_coeff['mtx']
dist = undistort_coeff['dist']
camera_cal = (mtx, dist)
test = lf.Finder(camera_cal)

results = 'results2.mp4'
clip1 = VideoFileClip("challenge_video.mp4")
white_clip = clip1.fl_image(test.discover)
white_clip.write_videofile(results, audio=False)

while True:
    left_lane_slope = 0
    right_lane_slope = 0

    #Capturing and preprocessing
    capture = np.array(ScreenGrab.grab_screen(region=(0, 40, 800, 640)))
    cropped_img = CropFunctions.crop(capture)

    #Rl begins
    a = np.argmax(Q[s, :] + np.random.rand(1, len(action_space)) *
                  (1.0 / (i_rand + 1)))
    preprocessed_img, count = Preprocessing.preprocess_image(
        cropped_img, action_space[a])
    filtered_lines, slopes, line_count = LaneFinder.filter_lines(
        preprocessed_img)
    s_new, reward = step(a, s, count, line_count)
    Q[s, a] = Q[s, a] + learning_rate * (reward + gamma * np.max(Q[s_new, :]) -
                                         Q[s, a])
    s = s_new
    if i_rand > randomness:
        i_rand = 0

    LaneFinder.draw_lines(preprocessed_img, filtered_lines)
    cv2.imshow('houghlines', preprocessed_img)

    #cv2.imshow('Captured',preprocessed_img)
    #lines = Preprocessing.hough_lines(preprocessed_img)
    '''
    #finding lanes
    left_lines, right_lines, left_slopes, right_slopes = LaneFinder.left_right_lines(lines)