Esempio n. 1
0
        cv2.polylines(processed_src, [src], True, (0, 0, 255), 2)
        cv2.polylines(processed_dst, [dst], True, (0, 0, 255), 2)

        cv2.imwrite(out_file_prefix + '_persp_src.jpg', processed_src)
        cv2.imwrite(out_file_prefix + '_persp_dst.jpg', processed_dst)

        lanes_centroids, polyfit, curvature, deviation, fail_code = lane_tracker.detect_lanes(
            processed_img)
        processed_img = lane_tracker.draw_windows(processed_img,
                                                  lanes_centroids,
                                                  polyfit,
                                                  blend=True)

        cv2.imwrite(out_file_prefix + '_windows.jpg', processed_img)

        lanes_img = lane_tracker.draw_lanes(input_img, polyfit)
        lanes_img = img_processor.unwarp_image(lanes_img)

        lanes_img = cv2.addWeighted(undistorted_img, 1.0, lanes_img, 1.0, 0)
        font = cv2.FONT_HERSHEY_DUPLEX
        font_color = (0, 255, 0)
        cv2.putText(
            lanes_img,
            'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(
                curvature[0], curvature[1]), (30, 60), font, 1, font_color, 2)
        cv2.putText(lanes_img, 'Center Offset: {:.2f} m'.format(deviation),
                    (30, 90), font, 1, font_color, 2)

        if fail_code > 0:
            failed_text = 'Detection Failed: {}'.format(
                LaneDetector.FAIL_CODES[fail_code])
class VideoProcessor:
    """
    Class used to process a video file and that produces an annotated version with the detected lanes.
    """

    def __init__(self, calibration_data_file, smooth_frames = 5, debug = False, failed_frames_dir = 'failed_frames'):
        self.img_processor = ImageProcessor(calibration_data_file)
        self.lane_tracker = LaneDetector(smooth_frames = smooth_frames)
        self.frame_count = 0
        self.fail_count = 0
        self.debug = debug
        self.failed_frames_dir = failed_frames_dir
        self.last_frame = None
        self.processed_frames = None

    def process_frame(self, img):

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
       
        undistorted_img, thresholded_img, warped_img = self.img_processor.process_image(img)
        
        _, polyfit, curvature, deviation, fail_code = self.lane_tracker.detect_lanes(warped_img)
        
        fill_color = (0, 255, 0) if fail_code == 0 else (0, 255, 255)

        lane_img = self.lane_tracker.draw_lanes(undistorted_img, polyfit, fill_color = fill_color)
        lane_img = self.img_processor.unwarp_image(lane_img)

        out_image = cv2.addWeighted(undistorted_img, 1.0, lane_img, 1.0, 0)

        font = cv2.FONT_HERSHEY_DUPLEX
        font_color = (0, 255, 0)

        curvature_text = 'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(curvature[0], curvature[1])
        offset_text = 'Center Offset: {:.2f} m'.format(deviation)

        cv2.putText(out_image, curvature_text, (30, 60), font, 1, font_color, 2)
        cv2.putText(out_image, offset_text, (30, 90), font, 1, font_color, 2)

        if fail_code > 0:
            self.fail_count += 1
            failed_text = 'Detection Failed: {}'.format(LaneDetector.FAIL_CODES[fail_code])
            cv2.putText(out_image, failed_text, (30, 120), font, 1, (0, 0, 255), 2)
            if self.debug:
                print(failed_text)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '.png'), img)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_lanes.png'), out_image)
                if self.last_frame is not None: # Saves also the previous frame for comparison
                    cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_prev.png'), self.last_frame)

        self.frame_count += 1
        self.last_frame = img

        out_image = cv2.cvtColor(out_image, cv2.COLOR_BGR2RGB)

        return out_image

    def process_frame_split(self, img):

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
       
        undistorted_img, thresholded_img, processed_img = self.img_processor.process_image(img)

        lanes_centroids, polyfit, curvature, deviation, fail_code = self.lane_tracker.detect_lanes(processed_img)
        
        fill_color = (0, 255, 0) if fail_code == 0 else (0, 255, 255)

        lane_img = self.lane_tracker.draw_lanes(undistorted_img, polyfit, fill_color = fill_color)
        lane_img = self.img_processor.unwarp_image(lane_img)

        out_image = cv2.addWeighted(undistorted_img, 1.0, lane_img, 1.0, 0)

        color_img = self.img_processor.color_thresh(undistorted_img) * 255
        gradient_img = self.img_processor.gradient_thresh(undistorted_img) * 255
      
        processed_src = np.array(cv2.merge((thresholded_img, thresholded_img, thresholded_img)), np.uint8)
        src, _ = self.img_processor._warp_coordinates(img)

        src = np.array(src, np.int32)
        
        cv2.polylines(processed_src, [src], True, (0,0,255), 2)

        window_img = np.copy(processed_img)

        window_img = self.lane_tracker.draw_windows(window_img, lanes_centroids, polyfit, blend = True)

        lanes_img = np.copy(out_image)

        font = cv2.FONT_HERSHEY_DUPLEX
        font_color = (0, 255, 0)

        curvature_text = 'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(curvature[0], curvature[1])
        offset_text = 'Center Offset: {:.2f} m'.format(deviation)

        cv2.putText(out_image, curvature_text, (30, 60), font, 1, font_color, 2)
        cv2.putText(out_image, offset_text, (30, 90), font, 1, font_color, 2)

        if fail_code > 0:
            self.fail_count += 1
            failed_text = 'Detection Failed: {}'.format(LaneDetector.FAIL_CODES[fail_code])
            cv2.putText(out_image, failed_text, (30, 120), font, 1, (0, 0, 255), 2)
            if self.debug:
                print(failed_text)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '.png'), img)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_lanes.png'), out_image)
                if self.last_frame is not None: # Saves also the previous frame for comparison
                    cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_prev.png'), self.last_frame)

        self.frame_count += 1

        undistorted_img = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2RGB)
        color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
        gradient_img = cv2.cvtColor(gradient_img, cv2.COLOR_GRAY2RGB)
        thresholded_img= cv2.cvtColor(thresholded_img, cv2.COLOR_GRAY2RGB)
        processed_src= cv2.cvtColor(processed_src, cv2.COLOR_BGR2RGB)
        processed_img= cv2.cvtColor(processed_img, cv2.COLOR_GRAY2RGB)
        window_img= cv2.cvtColor(window_img, cv2.COLOR_BGR2RGB) 
        lanes_img= cv2.cvtColor(lanes_img, cv2.COLOR_BGR2RGB)
        out_image= cv2.cvtColor(out_image, cv2.COLOR_BGR2RGB)

        return (undistorted_img, color_img, gradient_img, thresholded_img, processed_src, processed_img, window_img, lanes_img, out_image)

    def process_video(self, file_path, output, t_start = None, t_end = None):

        input_clip = VideoFileClip(file_path)
        
        if t_start is not None:
            input_clip = input_clip.subclip(t_start = t_start, t_end = t_end)
        
        output_clip = input_clip.fl_image(self.process_frame)
        output_clip.write_videofile(output, audio = False)

    def process_frame_stage(self, img, idx):
        
        if self.processed_frames is None:
            self.processed_frames = []

        if idx == 0:
            result = self.process_frame_split(img)
            self.processed_frames.append(result)
        else:
            self.frame_count += 1

        return self.processed_frames[self.frame_count - 1][idx]

    def process_video_split(self, file_path, output, t_start = None, t_end = None):

        input_clip = VideoFileClip(file_path)
        
        if t_start is not None:
            input_clip = input_clip.subclip(t_start = t_start, t_end = t_end)

        out_file_prefix = os.path.split(output)[1][:-4]
        
        idx = 0
        output_clip = input_clip.fl_image(lambda img: self.process_frame_stage(img, idx))
        output_clip.write_videofile(out_file_prefix + '_undistoreted.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_color.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_gradient.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_thresholded.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_processed_src.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_processed_dst.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_window.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_lanes.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_final.mp4', audio = False)

        print('Number of failed detection: {}'.format(self.fail_count))
Esempio n. 3
0
class VideoProcessor:
    def __init__(self,
                 model_file,
                 calibration_file,
                 min_confidence,
                 heat_threshold,
                 smooth_frames,
                 detect_lanes=False,
                 debug=False):
        self.vehicle_detector = VehicleDetector(model_file, min_confidence,
                                                heat_threshold, smooth_frames)
        self.lane_detector = LaneDetector(smooth_frames=5)
        self.image_processor = ImageProcessor(calibration_file)
        self.detect_lanes = detect_lanes
        self.debug = debug
        self.frame_count = 0
        self.processed_frames = None

    def process_video(self,
                      video_file,
                      file_out,
                      t_start=None,
                      t_end=None,
                      process_pool=None):

        input_clip = VideoFileClip(video_file)

        if t_start is not None:
            input_clip = input_clip.subclip(t_start=t_start, t_end=t_end)

        if self.debug:
            self.processed_frames = []

            stage_idx = 0

            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame_stage(frame, stage_idx,
                                                       process_pool))
            output_clip.write_videofile(file_out, audio=False)

            if len(self.processed_frames) > 0:
                out_file_path = os.path.split(file_out)
                out_file_name = out_file_path[1].split('.')
                for _ in range(len(self.processed_frames[0]) - 1):
                    self.frame_count = 0
                    stage_idx += 1
                    stage_file = '{}.{}'.format(
                        os.path.join(out_file_path[0], out_file_name[0]) +
                        '_' + str(stage_idx), out_file_name[1])
                    output_clip.write_videofile(stage_file, audio=False)
        else:
            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame(frame, process_pool))
            output_clip.write_videofile(file_out, audio=False)

    def process_frame(self, frame, process_pool):

        img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        if self.detect_lanes:
            # Uses the undistored image
            img, _, warped_img = self.image_processor.process_image(img)

        bboxes, heatmap, windows = self.vehicle_detector.detect_vehicles(
            img, process_pool=process_pool)

        frame_out = np.copy(img) if self.debug else img
        # Labelled image
        frame_out = draw_bboxes(frame_out,
                                bboxes, (250, 150, 55),
                                1,
                                fill=True)

        frame_out_text = 'Frame Smoothing: {}, Min Confidence: {}, Threshold: {}'.format(
            self.vehicle_detector.smooth_frames,
            self.vehicle_detector.min_confidence,
            self.vehicle_detector.heat_threshold)

        self.write_text(frame_out, frame_out_text)
        self.write_text(frame_out,
                        'Detected Cars: {}'.format(len(bboxes)),
                        pos=(30, frame_out.shape[0] - 30),
                        font_color=(0, 250, 150))

        if self.detect_lanes:
            _, polyfit, curvature, deviation, fail_code = self.lane_detector.detect_lanes(
                warped_img)

            fill_color = (0, 255, 0) if fail_code == 0 else (0, 255, 255)

            lane_img = self.lane_detector.draw_lanes(frame_out,
                                                     polyfit,
                                                     fill_color=fill_color)
            lane_img = self.image_processor.unwarp_image(lane_img)

            frame_out = cv2.addWeighted(frame_out, 1.0, lane_img, 1.0, 0)

            curvature_text = 'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(
                curvature[0], curvature[1])
            offset_text = 'Center Offset: {:.2f} m'.format(deviation)

            self.write_text(frame_out, curvature_text, pos=(30, 60))
            self.write_text(frame_out, offset_text, pos=(30, 90))

        frame_out = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)

        if self.debug:

            result = []

            self.write_frame_count(frame_out)
            result.append(frame_out)

            # Unthresholded heatmap image
            heatmap_o = self.vehicle_detector._heatmap(img, windows, 0)
            heatmap_o = self.normalize_heatmap(heatmap_o)
            heatmap_o = np.dstack((heatmap_o, np.zeros_like(heatmap_o),
                                   np.zeros_like(heatmap_o)))
            self.write_frame_count(heatmap_o)

            result.append(heatmap_o)

            # Heatmap image
            heatmap = self.normalize_heatmap(heatmap)
            heatmap = np.dstack(
                (np.zeros_like(heatmap), np.zeros_like(heatmap), heatmap))
            self.write_frame_count(heatmap)

            result.append(cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB))

            heatmap_img = cv2.addWeighted(img, 1, heatmap, 0.8, 0)

            result.append(cv2.cvtColor(heatmap_img, cv2.COLOR_BGR2RGB))

            all_windows = []

            # Windows search image
            for scale, cells_per_step, layer_windows in windows:

                all_windows.extend(layer_windows)

                layer_img = draw_windows(
                    np.copy(img),
                    layer_windows,
                    min_confidence=self.vehicle_detector.min_confidence)

                w_tot = len(layer_windows)
                w_pos = len(
                    list(
                        filter(
                            lambda bbox: bbox[1] >= self.vehicle_detector.
                            min_confidence, layer_windows)))
                w_rej = len(
                    list(
                        filter(
                            lambda bbox: bbox[1] > 0 and bbox[1] < self.
                            vehicle_detector.min_confidence, layer_windows)))

                self.write_text(
                    layer_img,
                    'Scale: {}, Cells per Steps: {}, Min Confidence: {}'.
                    format(scale, cells_per_step,
                           self.vehicle_detector.min_confidence))
                layer_text = 'Windows (Total/Positive/Rejected): {}/{}/{}'.format(
                    w_tot, w_pos, w_rej)
                self.write_text(layer_img,
                                layer_text,
                                pos=(30, layer_img.shape[0] - 30))
                self.write_frame_count(layer_img)

                result.append(cv2.cvtColor(layer_img, cv2.COLOR_BGR2RGB))

            # Combined scales image
            box_img = draw_windows(
                np.copy(img),
                all_windows,
                min_confidence=self.vehicle_detector.min_confidence)

            w_tot = len(all_windows)
            w_pos = len(
                list(
                    filter(
                        lambda bbox: bbox[1] >= self.vehicle_detector.
                        min_confidence, all_windows)))
            w_rej = len(
                list(
                    filter(
                        lambda bbox: bbox[1] > 0 and bbox[1] < self.
                        vehicle_detector.min_confidence, all_windows)))

            self.write_text(
                box_img, 'Min Confidence: {}'.format(
                    self.vehicle_detector.min_confidence))
            box_text = 'Windows (Total/Positive/Rejected): {}/{}/{}'.format(
                w_tot, w_pos, w_rej)
            self.write_text(box_img,
                            box_text,
                            pos=(30, layer_img.shape[0] - 30))
            self.write_frame_count(box_img)

            result.append(cv2.cvtColor(box_img, cv2.COLOR_BGR2RGB))

        else:
            result = frame_out

        return result

    def process_frame_stage(self, frame, stage_idx, process_pool):

        if stage_idx == 0:
            result = self.process_frame(frame, process_pool)
            self.processed_frames.append(result)

        frame_out = self.processed_frames[self.frame_count][stage_idx]

        self.frame_count += 1

        return frame_out

    def normalize_heatmap(self, heatmap, a=0, b=255):

        min_v = np.min(heatmap)
        max_v = np.max(heatmap)

        heatmap = a + ((heatmap - min_v) * (b - a)) / (max_v - min_v)

        return heatmap.astype(np.uint8)

    def write_frame_count(self, img):
        self.write_text(img,
                        '{}'.format(self.frame_count),
                        pos=(img.shape[1] - 75, 30))

    def write_text(self,
                   img,
                   text,
                   pos=(30, 30),
                   font=cv2.FONT_HERSHEY_DUPLEX,
                   font_color=(255, 255, 255),
                   font_size=0.8):
        cv2.putText(img, text, pos, font, font_size, font_color, 1,
                    cv2.LINE_AA)