from moviepy.editor import VideoFileClip
from lane_finder import LaneFinder
from object_detect_yolo import YoloDetector

if __name__ == "__main__":

    def remove_mp4_extension(file_name):
        return file_name.replace(".mp4", "")

    yolo = YoloDetector()
    lane_finder = LaneFinder(save_original_images=True,
                             object_detection_func=yolo.process_image_array)
    video_file = 'project_video.mp4'
    # video_file = 'challenge_video.mp4'
    # video_file = 'back_home.mov'
    # video_file = 'file01_2017322191247.mp4'
    clip = VideoFileClip(video_file, audio=False)
    t_start = 0
    t_end = 0
    if t_end > 0.0:
        clip = clip.subclip(t_start=t_start, t_end=t_end)
    else:
        clip = clip.subclip(t_start=t_start)

    clip = clip.fl_image(lane_finder.process_image)
    clip.write_videofile("{}_output.mp4".format(
        remove_mp4_extension(video_file)),
                         audio=False)
    yolo.shutdown()
from moviepy.video.io.VideoFileClip import VideoFileClip
from lane_finder import LaneFinder

easy = "./videos/project_video.mp4"
medium = "./videos/challenge_video.mp4"
hard = "./videos/harder_challenge_video.mp4"

input_video = hard
debug = False  # output a debugging version of the video

if __name__ == '__main__':
    """
    Process each frame of a video to detect lanes and render output video
    """
    lane_finder = LaneFinder()

    video_clip = VideoFileClip(input_video)

    if debug:
        processed_clip = video_clip.fl_image(lane_finder.debug_frame)
    else:
        processed_clip = video_clip.fl_image(lane_finder.process_frame)

    # save video
    processed_clip.write_videofile(input_video[:-4] + '_processed.mp4',
                                   audio=False)

    print('Done')
    video_files = ['test_video.mp4', 'project_video.mp4']
    output_path = "output_videos"

    def process_image(img,
                      car_finder,
                      lane_finder,
                      cam_matrix,
                      dist_coeffs,
                      reset=False):
        img = cv2.undistort(img, cam_matrix, dist_coeffs)
        car_finder.find_cars(img, reset=reset)
        lane_finder.find_lane(img, distorted=False, reset=reset)
        return lane_finder.draw_lane_weighted(car_finder.draw_cars(img))

    lf = LaneFinder(ORIGINAL_SIZE, UNWARPED_SIZE, cam_matrix, dist_coeffs,
                    perspective_transform, pixels_per_meter, "warning.png")
    cf = CarFinder(64,
                   hist_bins=128,
                   small_size=20,
                   orientations=12,
                   pix_per_cell=8,
                   cell_per_block=1,
                   classifier=cls,
                   scaler=scaler,
                   window_sizes=window_size,
                   window_rois=window_roi,
                   transform_matrix=perspective_transform,
                   warped_size=UNWARPED_SIZE,
                   pix_per_meter=pixels_per_meter)

    for img_path in os.listdir(test_images_dir):
    mtx, dist = cal_cam()
    print("matrix: {} ; distortion: {}".format(mtx, dist))
    # Read test image and convert to RGB as pipeline expects!
    img = cv2.imread(r'C:\Code\AdvLaneFind\test_images\straight_lines1.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # Use calculated Matrix to undistort.
    undist_img = undist_image(img, mtx, dist)
    # Use the Sobel operator to calculate gradients and use a color threshold and combine
    # NOTE: for speed, get rid of colored binary output that shows which part of binary image is produced by what.
    binary, color_binary = color_threshold(undist_img)
    # Transform to Bird's eye perspective.
    # top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
    birdseye, M, Minv = warp_to_birdseye(binary, calibrate=True)

    from lane_finder import LaneFinder
    lf = LaneFinder()
    # Feed lanefinder previously found fit values
    # lf.left_fit = np.array([0, 0, 1.60436725e+02])
    # lf.right_fit = np.array([0, 0, 1.13855949e+03])
    lane_fit_img = lf.find_lane(birdseye, visualization=True)
    birdseye_with_rad, left_curverad, right_curverad = lf.measure_lane_geometry(
        birdseye)
    print('Curvature in m: {}'.format((left_curverad, right_curverad)))

    # Plot Undistorted Test Image

    test_image = cv2.imread('camera_cal/calibration1.jpg')
    undist_test_image = undist_image(test_image, mtx, dist)
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()
    ax1.imshow(test_image)
from moviepy.editor import VideoFileClip
import numpy as np
import cv2
from camera_calibration import cal_cam, undist_image, color_threshold, warp_to_birdseye, warp
from lane_finder import LaneFinder

# Calibrate the camera
mtx, dist = cal_cam()
# Create Lanefinder instance
lf = LaneFinder()


def lane_find_on_image(image):
    """
    A function that takes a colored RGB image and detects and plots lanes on it
    """
    img = np.copy(image)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    undist_img = undist_image(img, mtx, dist)
    binary, color_binary = color_threshold(undist_img)
    birdseye, M, Minv = warp_to_birdseye(binary)
    lines = lf.find_lane(birdseye)
    lines = warp(lines, Minv)
    lines, left_rad, right_rad = lf.measure_lane_geometry(lines)
    output_image = cv2.addWeighted(image, 1.0, lines, 0.3, 0.0)
    return output_image


if __name__ == "__main__":
    write_output = 'output_images/challenge_video.mp4'
    clip1 = VideoFileClip("challenge_video.mp4")
Exemple #6
0
from moviepy.editor import VideoFileClip
import pickle
from lane_finder import LaneFinder
from line import Line
import numpy as np

gray_shape, objpoints, imgpoints = pickle.load(open('camera.p', 'rb'))
lane_finder = LaneFinder(gray_shape, objpoints, imgpoints)
left_line = Line(gray_shape[1])
right_line = Line(gray_shape[1])
is_previous_lane_detected = False
global n_skipped_frames
n_skipped_frames = 0


def sanity_check(lane_finder):
    curs = lane_finder.window_searcher.measure_curvature_real()
    if abs(curs[0] - curs[1]) > 4000:
        print(curs)
        return False
    errors = (lane_finder.window_searcher.right_fitx -
              lane_finder.window_searcher.left_fitx).mean()
    if errors > 1000 or errors < 200:
        print(errors)
        return False
    return True


def process_image(image):
    global n_skipped_frames
    if n_skipped_frames <= 5:
Exemple #7
0
import cv2
from lane_finder import LaneFinder

if __name__ == "__main__":
    lane_finder = LaneFinder()
    filename = "/home/operador/Documentos/_row_image_0.jpeg"
    print filename
    image = cv2.imread(filename)
    lane_finder.process_image(image)
    lane_finder.find_simil_points_lanes()
    hull = lane_finder.hull_set
    for point in lane_finder.points_pair_set:
        cv2.line(image, (point[0], point[1]), (point[2], point[3]),
                 (0, 175, 0), 2)
    for i in range(len(hull)):
        color_contours = (0, 255, 0)  # green - color for contours
        color = (0, 0, 255)  # blue - color for convex hull
        # draw ith contour
        #cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)
        # draw ith convex hull object
        cv2.drawContours(image, hull, i, color, 1, 8)
    cv2.imshow('original', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #8
0
def main(args):
    client = carla.Client(args.host, args.port)
    client.set_timeout(10.0)

    rng = np.random.default_rng(args.seed)

    vehicle = None
    lanefinder = None
    try:
        world = client.get_world()
        spectator = world.get_spectator()
        print('got world')

        # Setup world
        settings = world.get_settings()
        # world.set_weather(carla.WeatherParameters())
        settings.synchronous_mode = True
        settings.fixed_delta_seconds = 0.05
        world.apply_settings(settings)
        world.tick()  # check if necessary
        print('set world settings')

        # Select and spawn vehicle.
        vehicle_blueprint = rng.choice(world.get_blueprint_library().filter(
            args.filter))

        world_map = world.get_map()
        spawn_point = world_map.get_spawn_points()[12]
        vehicle = world.try_spawn_actor(vehicle_blueprint, spawn_point)
        print('created vehicle')

        # Move spectator to newly created vehicle.
        world.tick()
        world.tick()
        spectator.set_transform(vehicle.get_transform())
        print("Initial vehicle position: ", vehicle.get_transform().location)

        all_waypoints = world_map.generate_waypoints(
            10)  # meters between pickup/dropoff positions

        passenger_gen = PassengerGenerator(PASSENGER_ARRIVAL_RATE, rng)

        endpoint_selector = EndpointSelector()
        endpoint_selector.add_passenger_point(
            passenger_gen.generate_new_passenger(all_waypoints))
        current_endpoint = endpoint_selector.next_endpoint()
        assert (current_endpoint is not None)
        print(current_endpoint.transform.location)

        planner = Planner(world_map, MAP_SAMPLING_RESOLUTION,
                          WAYPOINT_ERROR_THRESHOLD)
        planner.set_endpoint(vehicle, current_endpoint)

        turn_controller = PurePursuitController(vehicle, target_speed=10)
        lane_keeping_controller = LaneKeepingController(vehicle,
                                                        target_speed=10)
        print('Created controller')

        lanefinder = LaneFinder(world, vehicle)
        print('Created Lane Finder')

        world.tick()
        i = 0
        while True:
            world.tick()
            world_snapshot = world.get_snapshot()
            time = world_snapshot.timestamp.elapsed_seconds

            if passenger_gen.new_passenger_arrived(time):
                endpoint_selector.add_passenger_point(
                    passenger_gen.generate_new_passenger(all_waypoints))

            target_waypoint = planner.get_target_waypoint(vehicle)
            while target_waypoint is None:
                current_endpoint = endpoint_selector.next_endpoint()
                if current_endpoint is None:
                    target_waypoint = world_map.get_waypoint(
                        vehicle.get_transform().location)
                    break
                planner.set_endpoint(vehicle, current_endpoint)
                target_waypoint = planner.get_target_waypoint(vehicle)

            if i % 1 == 0:
                lanefinder.run_step()
            else:
                lanefinder.image_queue.get()

            road_option = planner.get_target_road_option(vehicle)
            if road_option == RoadOption.LANEFOLLOW or road_option == RoadOption.STRAIGHT:
                control = lane_keeping_controller.run_step(
                    vehicle, target_waypoint, lanefinder.get_left_lane_x(),
                    lanefinder.get_right_lane_x())
            else:
                control = turn_controller.run_step(vehicle, target_waypoint)
            vehicle.apply_control(control)

            i += 1
    finally:
        # Destroy vehicle.
        print("Destroy Vehicle")
        # client.apply_batch([carla.command.DestroyActor(vehicle),
        #     carla.command.DestroyActor(lanefinder.sensor)])
        if vehicle is not None:
            vehicle.destroy()
        if lanefinder is not None and lanefinder.sensor is not None:
            lanefinder.sensor.destroy()
    curve_disp_3 = 'Right Radius = ' + str(np.round(right_curve_rad, 2)) + 'm'
    off_disp_txt = 'Car off track by ' + str(np.round(car_off, 2)) + 'm'

    cv2.putText(final_result, curve_disp_1, (30, 50), font, 1.5,
                (255, 255, 255), 2)
    cv2.putText(final_result, curve_disp_2, (30, 100), font, 1.5,
                (255, 255, 255), 2)
    cv2.putText(final_result, curve_disp_3, (30, 150), font, 1.5,
                (255, 255, 255), 2)
    cv2.putText(final_result, off_disp_txt, (30, 200), font, 1.5,
                (255, 255, 255), 2)

    return image


final_result = display_on_frame(final_result,
                                left_curve_rad=left_curve_rad,
                                right_curve_rad=right_curve_rad,
                                car_off=car_off)
plt.imshow(final_result)
plt.show()
plt.imsave('output_images/final_result.png', final_result)

from moviepy.editor import VideoFileClip

frame_line_finder = LaneFinder()
vid_output = 'project_solved.mp4'
clip_source = VideoFileClip("project_video.mp4")
vid_clip = clip_source.fl_image(frame_line_finder.Lane_find)
vid_clip.write_videofile(vid_output, audio=False)