def __init__(self, debug_dir=_warping_debug_dir):
     self.title = 'Click to select 4 source points for warping.'
     self.title += ' Press r to reset'
     self.debug_dir = debug_dir
     self.perspective_transform_file = \
         join(debug_dir, 'perspective_transform.p')
     self.warper = ImageWarper()
     self.calibrator = CameraCalibrator()
 def __init__(self, debug_dir=_lane_detection_debug_dir):
     self.debug_dir = debug_dir
     self.calibrator = CameraCalibrator()
     self.calibrator.calibrate(CALIBRATION_DIR)
     self.thresholder = ImageThresholder()
     self.lane_line_finder = LaneLineFinder()
     self.output_frames = []
     self.find_warping_params()
     create_dirs([self.debug_dir])
class PerspectiveTransformFinder(object):
    """docstring for PerspectiveTransformFinder"""
    def __init__(self, debug_dir=_warping_debug_dir):
        self.title = 'Click to select 4 source points for warping.'
        self.title += ' Press r to reset'
        self.debug_dir = debug_dir
        self.perspective_transform_file = \
            join(debug_dir, 'perspective_transform.p')
        self.warper = ImageWarper()
        self.calibrator = CameraCalibrator()

    def initialize_image(self, image):
        cv2.namedWindow(self.title)
        self.clicker = OpencvClicker(self.title)
        self.calibrator.calibrate(CALIBRATION_DIR)
        self.image = self.calibrator.undistort(image)
        self.clone = None
        self.preview = None

        h, w, c = image.shape
        margin = w / 4.0
        self.keypoints = []
        self.dest_keypoints = [(margin, 0), (w - margin, 0), (margin, h),
                               (w - margin, h)]
        self.M = None

    def load(self):
        self.M = \
            pickle.load(open(self.perspective_transform_file, 'rb'))
        self.warper.reset(M=self.M)
        print('[i] Loading perspective transform from',
              self.perspective_transform_file)

    def save(self):
        create_dirs([self.debug_dir])
        self.warper.reset(M=self.M)
        pickle.dump(self.M, open(self.perspective_transform_file, 'wb'))
        print('[i] Succesfully saved perspective tranform params to',
              self.perspective_transform_file)

    def find(self, image):
        try:
            self.load()
            return self.warper
        except:
            pass

        self.initialize_image(image)
        self.run()
        self.save()

        return self.warper

    def warp_image(self):
        self.warper.find_perspective_transform(self.keypoints,
                                               self.dest_keypoints)
        self.preview = self.warper.warp(self.image)
        self.M = self.warper.M

    def update_images(self):
        self.clone = self.image.copy()
        for p in self.keypoints:
            cv2.circle(self.clone, p, 1, (0, 0, 255), thickness=2)

        if len(self.keypoints) == 4:
            self.warp_image()
        else:
            self.preview = np.zeros_like(self.image)

        self.img = np.hstack([self.clone, self.preview])

    def show(self):
        self.update_images()
        cv2.imshow(self.title, self.img)
        key = chr(cv2.waitKey(100) & 0xFF)
        return key

    def run(self):
        key = None
        while True:
            key = self.show()

            if self.clicker.has_update():
                print('User clicked')
                self.keypoints.append(self.clicker.retrieve())

            if key == 'q':
                raise Exception('User quit perspective transform finder.')
            elif key == 's':
                if len(self.keypoints) == 4:
                    return
            elif key == 'r':
                self.keypoints = []
            elif key == 'b':
                self.keypoints.pop()
        mfit_r = np.polyfit(ym_points, xfit_r_points * self.xm_per_pix, 2)
        # Calculate the new radii of curvature
        curverad_l = ((1 + (2*mfit_l[0]*np.max(ym_points) + mfit_l[1])**2)**1.5) / np.abs(2*mfit_l[0])
        curverad_r = ((1 + (2*mfit_r[0]*np.max(ym_points) + mfit_r[1])**2)**1.5) / np.abs(2*mfit_r[0])
        return (curverad_l, curverad_r), cendev

if __name__ == '__main__':
    import matplotlib.pyplot as plt
    from calibrate import CameraCalibrator
    from linefilter import LaneLineFilter
    from warp import WarpPerspective

    img = plt.imread('../test_images/test5.jpg')

    # Undistortion
    cc = CameraCalibrator()
    cc.load_matrix(fname='../camera_mtx_dist.pk')
    undist_img = cc.undistort(img)

    # Binary filter
    llf = LaneLineFilter()
    binary = llf.apply(undist_img)

    # Perspective Warp
    wp = WarpPerspective()
    binary_warped = wp.warp(binary)

    plt.figure(figsize=(18, 7.5), dpi=100)
    plt.subplot(2, 3, 1)
    plt.imshow(undist_img)
    plt.title('Undistorted Image')
class LaneDetector(object):
    """docstring for LaneDetector"""
    def __init__(self, debug_dir=_lane_detection_debug_dir):
        self.debug_dir = debug_dir
        self.calibrator = CameraCalibrator()
        self.calibrator.calibrate(CALIBRATION_DIR)
        self.thresholder = ImageThresholder()
        self.lane_line_finder = LaneLineFinder()
        self.output_frames = []
        self.find_warping_params()
        create_dirs([self.debug_dir])

    def find_warping_params(self):
        straight_lane_img = cv2.imread(args.straight_lane_img_file)
        finder = PerspectiveTransformFinder()
        self.warper = finder.find(straight_lane_img)

    def process_frame(self, frame_i, rgb_frame):
        frame = cv2.cvtColor(rgb_frame, cv2.COLOR_RGB2BGR)
        frame = self.calibrator.undistort(frame)
        binary_image = self.thresholder.process(frame)
        binary_gray = cv2.cvtColor(binary_image * 255, cv2.COLOR_GRAY2BGR)
        gray_warped = self.warper.warp(binary_gray)
        binary_warped = self.warper.warp(binary_image)
        try:
            self.lanes_img = self.lane_line_finder.find(
                binary_warped, self.warper.M, rgb_frame)
        except:
            save_path = join(self.debug_dir, 'frame_{}.jpg'.format(frame_i))
            cv2.imwrite(save_path, frame)
            logger.error('Lane finding failed.')
            return

        # canvas = np.vstack([
        #     np.hstack([frame, self.lanes_img]),
        # np.hstack([gray_warped, lanes_img]),
        # ])
        canvas = self.lanes_img
        cv2.imshow(basename(self.video_path), canvas)
        self.output_frames.append(canvas[..., ::-1])

        key = cv2.waitKey(1 - self.paused)
        button = chr(key & 0xff)
        if button == 'q':
            exit(0)
        elif button == 'p':
            self.paused = 1 - self.paused
        elif button == 's':
            save_path = join(self.debug_dir, 'frame_{}.jpg'.format(frame_i))
            cv2.imwrite(save_path, frame)
            logger.info('[i] Successfully saved frame to {}'.format(save_path))

    def process_video(self, video_path):
        self.paused = 0
        self.video_path = video_path
        logger.info(video_path)
        clip = VideoFileClip(video_path)
        for i, rgb_frame in enumerate(clip.iter_frames()):
            self.process_frame(i, rgb_frame)

    def detect(self, video_path):
        try:
            self.output_frames = np.load('output_frames.npy')
            self.output_frames = [
                self.output_frames[i]
                for i in range(self.output_frames.shape[0])
            ]
        except:
            self.process_video(video_path)
            np.save('output_frames.npy', self.output_frames)
        video = ImageSequenceClip(self.output_frames, fps=20)
        video.write_videofile(join(OUTPUT_DIR, 'output.avi'),
                              fps=20,
                              codec='png'
                              # bitrate='64k',
                              )
Exemple #6
0
            self.good_fit = False
        self.n_good_fit.append(self.good_fit)
        if np.mean(self.n_good_fit) <= 0.3:
            self.use_fit_margin = False

        self.ave_fit_l = np.mean(np.array(self.n_fits_l), axis=0)
        self.ave_fit_r = np.mean(np.array(self.n_fits_r), axis=0)


if __name__ == '__main__':
    from calibrate import CameraCalibrator
    from linefilter import LaneLineFilter
    from warp import WarpPerspective
    from fittool import LaneLineFitTool

    cc = CameraCalibrator()
    cc.load_matrix('../camera_mtx_dist.pk')
    undistort = cc.undistort
    lf = LaneLineFilter()
    wp = WarpPerspective()
    ft = LaneLineFitTool()
    linefinder = LaneLineFinder4Video(undist=undistort,
                                      linefilter=lf,
                                      warper=wp,
                                      fittool=ft)

    video_out = '../project_video_out.mp4'
    clip = VideoFileClip('../project_video.mp4')
    white_clip = clip.fl_image(linefinder.apply)
    white_clip.write_videofile(video_out, audio=False)