예제 #1
0
def visualize_perspective_transform(img):
    pipeline = LaneDetection()

    img = pipeline.distortion_correction(img)
    thresh = pipeline.combined_threshold(img)
    warped, M = pipeline.warp_perspective(thresh)

    # convert from BGR to RGB
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    # add src lines to image
    src_pts = np.array(pipeline.src, np.int32)
    src_pts = src_pts.reshape((-1, 1, 2))
    img = cv2.polylines(img, [src_pts], True, (255, 0, 0), 5)

    # add dst lines to image & convert to rgb so the lines are red
    thresh = np.array(np.dstack((thresh, thresh, thresh)) * 255, np.uint8)

    dst_pts = np.array(pipeline.dst, np.int32)
    dst_pts = dst_pts.reshape((-1, 1, 2))
    thresh = cv2.polylines(thresh, [dst_pts], True, (255, 0, 0), 5)

    warped = np.array(np.dstack((warped, warped, warped)) * 255, np.uint8)

    dst_pts = np.array(pipeline.dst, np.int32)
    dst_pts = dst_pts.reshape((-1, 1, 2))
    warped = cv2.polylines(warped, [dst_pts], True, (255, 0, 0), 5)

    # Visualize perspective transformation
    side_by_side(
        (img, thresh, warped),
        ('Original Image', 'Combined Threshold', 'Warped Perspective'),
        cols=3)
    def grab(self):
        global window_search
        window_search = True

        # if TRUE, set mouse callback function
        if self.mouse_callback:
            cv2.namedWindow("screen_grab")
            cv2.setMouseCallback("screen_grab", mouse_coords)

        # create lane detection class instance
        find_lane = LaneDetection()

        with mss.mss() as sct:
            while 'Screen capturing':
                last_time = time.time()
                # Get raw pixels from the screen, save it to a Numpy array
                img = np.array(sct.grab(self.monitor))

                try:
                    processed_img = find_lane.get_image(img)
                    cv2.imshow('screen_grab', processed_img)
                except:
                    print("Finding Lanes...")
                    pass

                if self.show_fps:
                    print('FPS: {0:.0f}'.format(1 / (time.time() - last_time)))

                # Press "q" to quit
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    break
예제 #3
0
def calculate_score_for_leaderboard():
    """
    Evaluate the performance of the network. This is the function to be used for
    the final ranking on the course-wide leader-board, only with a different set
    of seeds. Better not change it.
    """
    # action variables
    a = np.array([0.0, 0.0, 0.0])

    # init environement
    env = CarRacing()
    env.render()
    env.reset()

    seeds = [
        22597174, 68545857, 75568192, 91140053, 86018367, 49636746, 66759182,
        91294619, 84274995, 31531469
    ]

    total_reward = 0

    for episode in range(10):
        env.seed(seeds[episode])
        observation = env.reset()

        # init modules of the pipeline
        LD_module = LaneDetection(gradient_threshold=25, spline_smoothness=20)
        LatC_module = LateralController(gain_constant=1.8,
                                        damping_constant=0.05)
        LongC_module = LongitudinalController(KD=0.001)

        reward_per_episode = 0
        for t in range(600):
            # perform step
            s, r, done, speed, info = env.step(a)

            # lane detection
            lane1, lane2 = LD_module.lane_detection(s)

            # waypoint and target_speed prediction
            waypoints = waypoint_prediction(lane1, lane2)
            target_speed = target_speed_prediction(waypoints,
                                                   max_speed=60,
                                                   exp_constant=6)

            # control
            a[0] = LatC_module.stanley(waypoints, speed)
            a[1], a[2] = LongC_module.control(speed, target_speed)

            # reward
            reward_per_episode += r

            env.render()

        print('episode %d \t reward %f' % (episode, reward_per_episode))
        total_reward += np.clip(reward_per_episode, 0, np.infty)

    print('---------------------------')
    print(' total score: %f' % (total_reward / 10))
    print('---------------------------')
예제 #4
0
def evaluate():
    """
    """

    # action variables
    a = np.array([0.0, 0.0, 0.0])

    # init environement
    env = CarRacing()
    env.render()
    env.reset()

    for episode in range(5):
        observation = env.reset()
        # init modules of the pipeline
        LD_module = LaneDetection()
        LatC_module = LateralController()
        LongC_module = LongitudinalController()
        reward_per_episode = 0
        for t in range(500):
            # perform step
            s, r, done, speed, info = env.step(a)

            # lane detection
            lane1, lane2 = LD_module.lane_detection(s)

            # waypoint and target_speed prediction
            waypoints = waypoint_prediction(lane1, lane2)
            target_speed = target_speed_prediction(waypoints,
                                                   max_speed=60,
                                                   exp_constant=4.5)

            # control
            a[0] = LatC_module.stanley(waypoints, speed)
            a[1], a[2] = LongC_module.control(speed, target_speed)

            # reward
            reward_per_episode += r
            env.render()

        print('episode %d \t reward %f' % (episode, reward_per_episode))
예제 #5
0
def __main__():
    # get video stream
    video_cap = imageio.get_reader(config["project_video"])
    # polynomial lane fit
    lanes_fit = []

    # history of heatmaps to reject false positives
    history = deque(maxlen=config["history_limit"])

    # classifier and scaler
    classifier = Classifier.get_trained_classifier(use_pre_trained=True)

    # load calibration parameters:
    camera_matrix, dist_coef = PreProcessing.load_calibration_params()
    for index, img in enumerate(video_cap):
        if index % config["skip_frames"] == 0:
            # get lanes
            lanes_fit, img = LaneDetection.pipeline(img, lanes_fit,
                                                    camera_matrix, dist_coef)
            # resize image to improve speed of vehicle detection using classifier

            # jpg to png
            if config["is_training_png"]:
                img = Helper.scale_to_png(img)

            # 3 channel without alpha
            img = img[:, :, :config["channels"]]

            bounding_boxes = []
            # get bounding boxes for left side
            x_start_stop_left, y_start_stop_left = config["xy_start_stop_left"]
            bounding_boxes += WindowSearch.get_bounding_boxes(
                img, classifier, x_start_stop_left, y_start_stop_left)
            # # get bounding boxes for top side
            x_start_stop_top, y_start_stop_top = config["xy_start_stop_top"]
            bounding_boxes += WindowSearch.get_bounding_boxes(
                img, classifier, x_start_stop_top, y_start_stop_top)
            # get bounding boxes for right side
            x_start_stop_right, y_start_stop_right = config[
                "xy_start_stop_right"]
            bounding_boxes += WindowSearch.get_bounding_boxes(
                img, classifier, x_start_stop_right, y_start_stop_right)

            # remove false positives and duplicates from detection
            detected_cars = Helper.remove_false_positives(
                img, bounding_boxes, history)
            # visualization
            plt.imshow(detected_cars)
            plt.pause(0.0001)
def main():
    NX = 9
    NY = 6
    h = 720
    w = 1280
    nwindows = 9
    margin = 100
    tol = 50

    CALIBRATION_IMAGES = glob.glob('../camera_cal/*')

    lane_detector = LaneDetection(h, w, CALIBRATION_IMAGES, NX, NY, nwindows,
                                  margin, tol)

    video_output = '../output_videos/project_video.mp4'
    clip1 = VideoFileClip("../project_video.mp4")

    white_clip = clip1.fl_image(lane_detector.process_frame)
    white_clip.write_videofile(video_output, audio=False)
예제 #7
0
def main():
    '''
    Video streaming.
    Press 'q' to exit and 'w' to pause video.
    :return:
    '''
    if len(sys.argv) > 1:
        cap = cv2.VideoCapture(sys.argv[1])
    else:
        cap = cv2.VideoCapture('videos/road01.mp4')
    fps = cap.get(cv2.CAP_PROP_FPS)
    while cap.isOpened():
        time_start = time.time()
        _, frame = cap.read()
        frame = LaneDetection(frame).lane_detection()
        cv2.imshow("Lane Detection", frame)
        key = cv2.waitKey(1)
        if key == ord('q'):
            break
        if key == ord('w'):
            cv2.waitKey(-1)
        sync_fps(time_start, fps)
    cap.release()
    cv2.destroyAllWindows()
예제 #8
0
    if k==key.DOWN:  a[2] = 0

# init environement
env = CarRacing()
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
env.reset()

# define variables
total_reward = 0.0
steps = 0
restart = False

# init modules of the pipeline
LD_module = LaneDetection()

# init extra plot
fig = plt.figure()
plt.ion()
plt.show()

while True:
    # perform step
    s, r, done, speed, info = env.step(a)
    
    # lane detection
    lane1, lane2 = LD_module.lane_detection(s)

    # waypoint and target_speed prediction
    waypoints = waypoint_prediction(lane1, lane2)
#'GTKAgg', 'template', 'pgf', 'pdf', 'nbAgg', 'GTK3Cairo', 'GTKCairo', 'ps',
#'Qt5Agg', 'TkAgg', 'WX', 'gdk', 'GTK', 'GTK3Agg', 'svg', 'WebAgg', 'emf']
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

#%matplotlib qt
#from IPython import get_ipython
#get_ipython().run_line_magic('matplotlib', 'qt')
from lane_detection import LaneDetection
'''
Compute the camera calibration matrix and distortion coefficients
given a set of chessboard images.
'''
## Instantiate ride
ride = LaneDetection()

# Get images path
images = glob.glob('../data/test_images/test*.jpg')
i = 0
for fname in images:
    original_img = mpimg.imread(fname)  #in RGB
    undistorted_img = ride.camera.undistort(original_img)
    '''
    Get combined binary image
    '''
    combined_binary = ride.thresholding_pipeline(undistorted_img)

    fig = plt.figure(i * 4)
    # Plotting thresholded images
    fig_handle = fig.add_subplot(1, 2, 1)
예제 #10
0
import numpy as np
import cv2

from lane_detection import LaneDetection

# Initiate The Lane Detection Pipeline
pipeline = LaneDetection()

name = 'project_'
# name = 'challenge_'
# name = 'harder_challenge_'

cap = cv2.VideoCapture(name + 'video.mp4')

# Check if camera opened successfully
if cap.isOpened() is False:
    print("Error opening video stream or file")

# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))

# Define the codec and create VideoWriter object.The output is stored in 'outpy.mp4' file.
out = cv2.VideoWriter(name + 'out.mp4', cv2.VideoWriter_fourcc(*'H264'), 24,
                      (frame_width, frame_height))

while cap.isOpened():
    ret, frame = cap.read()

    if ret == True:
예제 #11
0
    def camera_callback(self, data):

        # Dividing frame rate by 3 (10fps)
        if self.counter % 3 != 0:
            self.counter += 1
            return
        else:
            self.counter = 1

        try:
            #### direct conversion to CV2 ####
            np_arr = np.fromstring(data.data, np.uint8)
            cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

        except:
            print("Error conversion to CV2")

    # Undistorted image
    # undist = cv2.undistort(cv_image, self.mtx, self.dist, None, self.mtx)

    # Create lane detection object
        lane_detection_object = LaneDetection()

        # Lane lines detection and process cross track error
        cte, angle, final_img = lane_detection_object.processImage(cv_image)

        cmd_vel = Twist()
        if cte is not None and angle is not None:
            angular_z = self.Kp * cte + self.Kd * (cte - self.last_cte)
            self.last_cte = cte

            linear_x = self.max_desire_linear_vel
            angular_z = max(angular_z, -2.0) if angular_z < 0 else min(
                angular_z, 2.0)
        else:

            angular_z = self.Kp * self.last_cte * 1.9
            linear_x = self.max_desire_linear_vel
            angular_z = max(angular_z, -2.0) if angular_z < 0 else min(
                angular_z, 2.0)

        cmd_vel.linear.x = linear_x
        cmd_vel.angular.z = angular_z
        self.cmd_vel_pub.publish(cmd_vel)

        if self.debug:
            w_l = ((2 * linear_x) -
                   (angular_z * self.wheel_base)) / (2 * self.wheel_radius)
            w_r = ((2 * linear_x) +
                   (angular_z * self.wheel_base)) / (2 * self.wheel_radius)

            pwm_l = int((255 * w_l) / self.max_omega)
            if abs(pwm_l) > 255:
                pwm_l = 255
            elif abs(pwm_l) < 0:
                pwm_l = 0
            else:
                pwm_l = abs(pwm_l)

            pwm_r = int((255 * w_r) / self.max_omega)
            if abs(pwm_r) > 255:
                pwm_r = 255
            elif abs(pwm_r) < 0:
                pwm_r = 0
            else:
                pwm_r = abs(pwm_r)

            print("w_l : " + str(w_l))
            print("pwm_l : " + str(pwm_l))
            print("w_r : " + str(w_r))
            print("pwm_r : " + str(pwm_r))
            print("----------")

        if lane_detection_object.draw_img:
            cv2.imshow("Original Image", final_img)
            cv2.waitKey(1)
예제 #12
0
import cv2
from lane_detection import LaneDetection

image_file = "D:/AutoDrive/0928/start.jpg"
video_file = "D:/AutoDrive/0928/raw.mp4"
output_file = "D:/AutoDrive/0928/out.mp4"

# 创建车道线检测对象
lane_det = LaneDetection(video_file, output_file)
img = cv2.imread(image_file)

# lane_det.gray_scale(image_file)  # 输出灰度图
# lane_det.binarization(image_file)  # 输出二值化图
# blur_gray = lane_det.gaussian_blur(image_file, show=False)  # 输出高斯平滑图
# edges = lane_det.edge_detect(blur_gray, show=False)  # 输出边界检测图,需要用到高斯平滑图结果
# roi_edges = lane_det.roi(edges, show=True)  # 输出兴趣区域截取后的边界,需要用到边界检测图
# lane_det.hough_transform(roi_edges, show=True)  # 输出霍夫变换图,需要用到兴趣区域边界

# cv2.imwrite("D:/AutoDrive/0928/start_edges.jpg", edges)  # 将图片输出为文件

# res_img = lane_det.process_an_image(img, show=True)  # 处理一张图片
lane_det.process_a_video()  # 处理一个视频
# lane_det.generate_video()  # 输出视频


# action variables
a = np.array( [0.0, 0.0, 0.0] )

# init environement
env = CarRacing()
env.render()
env.reset()

# define variables
total_reward = 0.0
steps = 0
restart = False

# init modules of the pipeline
LD_module = LaneDetection()
LatC_module = LateralController()
LongC_module = LongitudinalController()

# init extra plot
fig = plt.figure()
plt.ion()
plt.show()


while True:
    # perform step
    s, r, done, speed, info = env.step(a)

    # lane detection
    lane1, lane2 = LD_module.lane_detection(s)
예제 #14
0

# init environement
env = CarRacing()
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
env.reset()

# define variables
total_reward = 0.0
steps = 0
restart = False

# init modules of the pipeline
LD_module = LaneDetection()

# init extra plot
fig = plt.figure()
plt.ion()
plt.show()

while True:
    # perform step
    s, r, done, speed, info = env.step(a)

    # lane detection
    splines = LD_module.lane_detection(s)

    # reward
    total_reward += r
예제 #15
0
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip

#%matplotlib qt
from IPython import get_ipython
#get_ipython().run_line_magic('matplotlib', 'qt')

from lane_detection import LaneDetection


'''
Compute the camera calibration matrix and distortion coefficients
given a set of chessboard images.
'''
## Instantiate ride
ride = LaneDetection()

''' 
Video rubric
'''
# Video  file path
video_name = '../data/test_videos/project_video.mp4'
video_name_save = '../data/test_videos/project_video_48cfu.mp4'
#video = imageio.get_reader(video_name)

video = VideoFileClip(video_name)
new_clip = video.fl_image(ride.process_image)
#get_ipython().run_line_magic('matplotlib', 'time')
new_clip.write_videofile(video_name_save, audio=False)

input("Press Enter to continue...")
def main():
    global original, image
    tracker = {'apply mask': 0, 'canny high threshold': 0
                ,'canny low threshold' : 0, 'canny low threshold': 0
                , 'hough threshold': 1, 'hough max line gap': 1
                , 'hough min line length': 1}

    img = image
    img_g = image.copy()

    font = cv2.FONT_HERSHEY_SIMPLEX

    ld = LaneDetection()

    while(True):
        cv2.imshow('img', img)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

        # Get current positions of the trackers
        apply_mask = cv2.getTrackbarPos('apply mask', 'img')
        canny_high_threshold = cv2.getTrackbarPos('canny high threshold', 'img')
        canny_low_threshold = cv2.getTrackbarPos('canny low threshold', 'img')
        hough_threshold = cv2.getTrackbarPos('hough threshold', 'img')
        hough_max_line_gap = cv2.getTrackbarPos('hough max line gap', 'img')
        hough_min_line_length = cv2.getTrackbarPos('hough min line length', 'img')

        if canny_high_threshold != tracker['canny high threshold'] or canny_low_threshold != tracker['canny low threshold']:
            img = ld.findCannyEdges(image.copy(), canny_low_threshold, canny_high_threshold)
            img_g = img
            tracker['canny high threshold'] = canny_high_threshold
            tracker['canny low threshold'] = canny_low_threshold
            '''
            print 'Canny_low: ', canny_low_threshold
            print 'Canny_high: ', canny_high_threshold
            '''

        if apply_mask == 0 and apply_mask != tracker['apply mask']:
            img = image.copy()
        elif apply_mask == 1:
            img = ld.getRoi(img)

        if hough_threshold != tracker['hough threshold'] or hough_max_line_gap != tracker['hough max line gap'] or hough_min_line_length != tracker['hough min line length']:
            lines = ld.findHoughLines(img_g, hough_threshold, hough_min_line_length, hough_max_line_gap)
            canvas = np.zeros(original.shape, np.uint8)
            canvas = ld.drawLines(canvas, lines, (0, 200, 0), 2)
            img_c = cv2.cvtColor(img_g.copy(), cv2.COLOR_GRAY2BGR)
            img = cv2.bitwise_and(canvas, img_c)
            tracker['hough threshold'] = hough_threshold
            tracker['hough max line gap'] = hough_max_line_gap
            tracker['hough min line length'] = hough_min_line_length
            '''
            print 'hough_threshold', hough_threshold
            print 'hough_max_line_gap', hough_max_line_gap
            print 'hough_min_line_length', hough_min_line_length
            '''

        # Put the text of tracker's parameters on the iamge
        cv2.putText(img, 'Canny high thresh: '+ str(canny_high_threshold), (10, 440), font, 0.5, (255,255,255),1, cv2.LINE_AA)
        cv2.putText(img, 'Canny low thresh: '+ str(canny_low_threshold), (10, 460), font, 0.5, (255,255,255),1, cv2.LINE_AA)
        cv2.putText(img, 'hough thresh: '+ str(hough_threshold), (10, 480), font, 0.5, (255,255,255),1, cv2.LINE_AA)
        cv2.putText(img, 'hough max line gap: '+ str(hough_max_line_gap), (10, 500), font, 0.5, (255,255,255),1, cv2.LINE_AA)
        cv2.putText(img, 'hough min line length: '+ str(hough_min_line_length), (10, 520), font, 0.5, (255,255,255),1, cv2.LINE_AA)

    cv2.destroyAllWindows()
leftLine = [0] * 4
count = 0
smooth = 5

mtx, dist = camera_calibration()

leftLines = []
rightLines = []
lastLeftLine = Line()
lastRightLine = Line()

cap = cv2.VideoCapture('./project_video.mp4')
# cap = cv2.VideoCapture('./challenge_video.mp4')
# cap = cv2.VideoCapture('./harder_challenge_video.mp4')

detection = LaneDetection(nwindows=20)

fourcc = cv2.VideoWriter_fourcc('X', '2', '6', '4')
out = cv2.VideoWriter('output_video.mp4', fourcc, cap.get(cv2.CAP_PROP_FPS),
                      (1280, 720))

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    if ret == True:

        detection.setImage(frame)

        left_fit, right_fit, left_curvature, right_curvature, left_fitx, right_fitx, dif_meters, left_base, right_base = detection.process_detection(
            mtx, dist)