def simulate(env):
    filename = os.path.dirname(os.path.abspath(__file__)) + "/data/" + "data.csv"
    log_file = open(filename, 'w', encoding='utf-8', newline='')
    log_writer = csv.writer(log_file)
    log_writer.writerow(["t", "angle", "steer"])
    detector = LaneDetector()
    for episode in range(NUM_EPISODES):
        obv = env.reset()  # TODO: reset delay
        obv = cv2.cvtColor(obv, cv2.COLOR_RGB2BGR)
        steer = 0
        base_time = time.time()

        for t in range(MAX_TIME_STEPS):
            is_okay, angle_error = detector.detect_lane(obv)
            angle_error = -angle_error
            if not detector.left:
                print(str(time.time() - base_time) + " no left")
            elif not detector.right:
                print(str(time.time() - base_time) + " no right")

            steer = steer_controller(angle_error)
            reduction = speed_controller(steer)
            speed = base_speed - np.abs(reduction)
            action = (steer, speed)
            obv, reward, done, _ = env.step(action)
            
            log_writer.writerow([time.time() - base_time, -angle_error, steer])

            obv = cv2.cvtColor(obv, cv2.COLOR_RGB2BGR)
            cv2.imshow('input', detector.original_image_array)
            if done:
                break
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    cv2.destroyAllWindows()
class ControllerProcess(multiprocessing.Process):
    def __init__(self, log_q, queue, event_handler, sim_event_handler):
        super().__init__()
        self.log = create_logger(log_q, controller=True)
        self.queue = queue
        self.eventH = event_handler
        self.eventH_sim = sim_event_handler
        self.log.info("Initialized the controller")

        self.controller = LaneDetector(self.log)

    def run(self):
        old_controls = None
        self.log.info("Starting the controller process...")
        initial_controls = self.controller.init()
        self.queue.put(initial_controls)
        self.log.debug("Put initial controls to the queue")
        while True:
            self.log.debug("Setting the simulation event. Requesting sensor data and/or sending data.")
            self.eventH_sim.set()
            self.log.debug("Waiting for an event from the simulator")
            while not self.eventH.is_set():
                self.eventH.wait()
            self.log.debug("Received an event from the simulator")
            self.eventH.clear()
            sensors = self.queue.get()
            controls = self.controller.execute(sensors, old_controls)
            old_controls = controls
            self.queue.put(controls)
 def __init__(self, calibration_data_file, smooth_frames = 5, debug = False, failed_frames_dir = 'failed_frames'):
     self.img_processor = ImageProcessor(calibration_data_file)
     self.lane_tracker = LaneDetector(smooth_frames = smooth_frames)
     self.frame_count = 0
     self.fail_count = 0
     self.debug = debug
     self.failed_frames_dir = failed_frames_dir
     self.last_frame = None
     self.processed_frames = None
    def __init__(self, log_q, queue, event_handler, sim_event_handler):
        super().__init__()
        self.log = create_logger(log_q, controller=True)
        self.queue = queue
        self.eventH = event_handler
        self.eventH_sim = sim_event_handler
        self.log.info("Initialized the controller")

        self.controller = LaneDetector(self.log)
Esempio n. 5
0
    def __init__(self):
        rospy.init_node("lane_detector")
        self.detector = LaneDetector()
        self.bridge = CvBridge()
        
        # subscribe to images
        rospy.Subscriber("left/image_rect_color", Image, self.on_left_image)

        self.path_pub = rospy.Publisher('/navigation/waypoints', Path, queue_size=1, latch=True)

        self.debug_lane_pub = rospy.Publisher("debug/lane_image", Image, queue_size=1)

        self.left_image = None
        self.publish()
def sliding_window_conv_detect_test(test_img_path):
    cam_intrinsic, cam_distortion = load_cached_camera_parameters(
        "./camera_params.p")
    img = mpimg.imread(test_img_path)
    warped_binary = rgb_to_warped_binary(img, cam_intrinsic, cam_distortion)
    result = LaneDetector.sliding_window_conv_detect(
        warped_binary,
        sliding_conv_config["width"],
        sliding_conv_config["height"],
        sliding_conv_config["margin"],
        return_debug_img=True)

    left_fit = result.l_fit
    right_fit = result.r_fit
    # Generate x and y values for plotting
    ploty = np.linspace(0, warped_binary.shape[0] - 1, warped_binary.shape[0])
    left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]
    right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]

    f, (axes1, axes2) = plt.subplots(1, 2, figsize=(20, 10))
    f.suptitle(test_img_path.split("/")[-1], fontsize=30)
    axes1.imshow(img)
    axes1.set_title('Original', fontsize=15)

    axes2.imshow(result.debug_image, cmap="gray")
    axes2.set_title('Detected', fontsize=15)
    axes2.plot(left_fitx, ploty, color='yellow')
    axes2.plot(right_fitx, ploty, color='yellow')
    axes2.set_xlim(0, img.shape[1])
    axes2.set_ylim(img.shape[0], 0)
    plt.show()
Esempio n. 7
0
 def __init__(self,
              model_file,
              calibration_file,
              min_confidence,
              heat_threshold,
              smooth_frames,
              detect_lanes=False,
              debug=False):
     self.vehicle_detector = VehicleDetector(model_file, min_confidence,
                                             heat_threshold, smooth_frames)
     self.lane_detector = LaneDetector(smooth_frames=5)
     self.image_processor = ImageProcessor(calibration_file)
     self.detect_lanes = detect_lanes
     self.debug = debug
     self.frame_count = 0
     self.processed_frames = None
Esempio n. 8
0
 def __init__(self, resolution=(640, 480), framerate=30, nb_frames=5):
     super(Camera, self).__init__()
     self._lock = threading.Lock()
     self._camera = PiCamera(framerate=framerate, resolution=resolution)
     self._stream = PiRGBArray(self._camera)
     self._lane_detector = LaneDetector()
     self._frames = deque(maxlen=nb_frames)
     self._lanes = deque(maxlen=nb_frames)
     self.start()
def pipe_test(test_img_path):
    detector = LaneDetector()
    img = mpimg.imread(test_img_path)
    result_img = detector.pipe_line(img)

    # combined = cv2.addWeighted(img, 1, result_img, 0.4, 0)

    f, (axes1, axes2) = plt.subplots(1, 2, figsize=(20, 10))
    f.suptitle(test_img_path.split("/")[-1], fontsize=30)
    axes1.imshow(img)
    axes1.set_xlim(0, img.shape[1])
    axes1.set_ylim(img.shape[0], 0)
    axes1.set_title('original frame', fontsize=15)

    axes2.imshow(result_img, cmap="gray")
    axes2.set_title('result frame with detected road', fontsize=15)
    axes2.set_xlim(0, img.shape[1])
    axes2.set_ylim(img.shape[0], 0)
    plt.show()
Esempio n. 10
0
    def __init__(self, lane_width, segment_count, y_offset_first_segment):
        '''
            Konstruktor zum Instanziieren eines Straßenmodells.

            Parameter
            ---------
            lane_width : Integer
                Straßenbreite
            segment_count : Integer
                Anzahl zu berechnende Segmente
            y_offset_first_segment : Integer
                y-Wert der untersten Segment Linie

        '''
        self.lane_width = lane_width
        self.segment_count = segment_count
        self.y_offset_first_segment = y_offset_first_segment
        self.segments = self._instantiate_segments()
        self.lane_detector = LaneDetector(self.lane_width)
Esempio n. 11
0
def show_test_images_pipeline(camera, test_images_dir):
    """
    Shows detection pipeline results step by step.

    :param Camera camera         : Camera object.
    :param str    test_images_dir: Path to directory containing test images.
    """
    paths_to_images = glob.glob(test_images_dir + "*.jpg")
    for image_path in paths_to_images:
        image = cv2.imread(image_path)
        lane_detector = LaneDetector(camera)
        undistorted_image = lane_detector.correct_distortions(image)
        thresholded_image = lane_detector.threshold(undistorted_image)
        birdseye_image = lane_detector.transform_perspective(thresholded_image)
        lane_detector.update_lane_lines_positions(birdseye_image)
        lines_detected = lane_detector.draw_lanes_on_image(undistorted_image, birdseye_image)
        plt.imshow(cv2.cvtColor(lines_detected, cv2.COLOR_BGR2RGB))
        plt.show()
Esempio n. 12
0
class DrivingAssistant:
    # Constructor
    def __init__(self,
                 classifier_codename,
                 dataset_codename,
                 classifier_threshold,
                 object_detection=True,
                 object_visualization=True,
                 lane_detection=True,
                 lane_visualization=True,
                 diagnostic_mode=True,
                 monitor_id=1,
                 window_top_offset=0,
                 window_left_offset=0,
                 window_width=None,
                 window_height=None,
                 window_scale=1.0):

        # Boolean flag for feature-customization
        self.object_detection = object_detection
        self.object_visualization = object_visualization
        self.diagnostic_mode = diagnostic_mode

        self.lane_detection = lane_detection
        self.lane_visualization = lane_visualization

        # Instance of the MSS-API for captureing screenshots
        self.window_manager = mss.mss()

        # Note:
        #   - monitor_id = 0 | grab all monitors together
        #   - monitor_id = n | grab a given monitor (n) : where n > 0
        self.target_window = self.window_manager.monitors[monitor_id]

        # Update position of the window that will be captured
        if window_left_offset:
            self.target_window['left'] += window_left_offset
            self.target_window['width'] -= window_left_offset
        if window_top_offset:
            self.target_window['top'] += window_top_offset
            self.target_window['height'] -= window_top_offset
        if window_width:
            self.target_window['width'] = window_width
        if window_height:
            self.target_window['height'] = window_height
        if window_scale:
            self.target_window['scale'] = window_scale

        print("Activating DeepEye Advanced Co-pilot Mode")

        self.object_detector = ObjectClassifier(
            classifier_codename=classifier_codename,
            dataset_codename=dataset_codename,
            classifier_threshold=classifier_threshold,
            visualization=object_visualization,
            diagnostic_mode=diagnostic_mode,
            frame_height=self.target_window['height'],
            frame_width=self.target_window['width'])

        self.lane_detector = LaneDetector(visualization=lane_visualization)

        self.threats = {
            "COLLISION": False,
            "PEDESTRIAN": False,
            "STOP_SIGN": False,
            "TRAFFIC_LIGHT": False,
            "VEHICLES": False,
            "BIKES": False,
            "FAR_LEFT": False,
            "FAR_RIGHT": False,
            "RIGHT": False,
            "LEFT": False,
            "CENTER": False,
            "UNKNOWN": True
        }

        self.frame_id = 0

        self.columns = [
            'FRAME_ID', 'PEDESTRIAN', 'VEHICLES', 'BIKES', 'STOP_SIGN',
            'TRAFFIC_LIGHT', 'OFF_LANE', 'COLLISION'
        ]

        self.data_frame = pd.DataFrame(columns=self.columns)

    def run(self):
        """
        Capture frames, initiate both objects and lane detectors, and then visualize output. 
        """
        # Get raw pixels from the screen, save it to a Numpy array
        original_frame = np.asarray(
            self.window_manager.grab(self.target_window))

        # set frame's height & width
        frame_height, frame_width = original_frame.shape[:2]

        # convert pixels from BGRA to RGB values
        original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGRA2BGR)

        if self.diagnostic_mode:
            pre = original_frame.copy()
            # draw a box around the area scaned for for PEDESTRIAN/VEHICLES detection
            visualization_utils.draw_bounding_box_on_image_array(
                pre,
                self.object_detector.roi["t"] / frame_height,
                self.object_detector.roi["l"] / frame_width,
                self.object_detector.roi["b"] / frame_height,
                self.object_detector.roi["r"] / frame_width,
                color=(255, 255, 0),  # BGR VALUE 
                display_str_list=(' ROI ', ))

            # draw a box around the area scaned for collision warnings
            visualization_utils.draw_bounding_box_on_image_array(
                pre,
                self.object_detector.roi["ct"] / frame_height,
                self.object_detector.roi["cl"] / frame_width,
                self.object_detector.roi["b"] / frame_height,
                self.object_detector.roi["cr"] / frame_width,
                color=(255, 0, 255),  # BGR VALUE 
                display_str_list=(' COLLISION ROI ', ))

            # save a screen shot of the current frame before getting processed
            if self.frame_id % 10 == 0:
                cv2.imwrite("test/pre/" + str(self.frame_id / 10) + ".jpg",
                            pre)

        # only detect objects in the given frame
        if self.object_detection and not self.lane_detection:
            (frame, self.threats) = self.object_detector.scan_road(
                original_frame, self.threats)

            if self.object_visualization:
                # Display frame with detected objects.
                cv2.imshow('DeepEye Dashboard', cv2.resize(frame, (640, 480)))

        # only detect lane in the given frame
        elif self.lane_detection and not self.object_detection:
            (frame, self.threats) = self.lane_detector.detect_lane(
                original_frame, self.threats)

            if self.lane_visualization:
                # Display frame with detected lane.
                cv2.imshow('DeepEye Dashboard', cv2.resize(frame, (640, 480)))

        # detect both objects and lane
        elif self.object_detection and self.lane_detection:

            # Visualize object detection ONLY
            if self.object_visualization and not self.lane_visualization:
                (frame, self.threats) = self.object_detector.scan_road(
                    original_frame, self.threats)

                # Display frame with detected lane.
                cv2.imshow('DeepEye Dashboard', cv2.resize(frame, (640, 480)))

                (_, self.threats) = self.lane_detector.detect_lane(
                    original_frame, self.threats)

            # Visualize lane detection ONLY
            elif self.lane_visualization and not self.object_visualization:
                (frame, self.threats) = self.lane_detector.detect_lane(
                    original_frame, self.threats)

                # Display frame with detected lane.
                cv2.imshow('DeepEye Dashboard', cv2.resize(frame, (640, 480)))

                (_, self.threats) = self.object_detector.scan_road(
                    original_frame, self.threats)

            # Visualize both object & lane detection
            elif self.object_visualization and self.lane_visualization:
                (frame, self.threats) = self.object_detector.scan_road(
                    original_frame, self.threats)
                (frame, self.threats) = self.lane_detector.detect_lane(
                    frame, self.threats)

                # Display frame with detected lane.
                cv2.imshow('DeepEye Dashboard', cv2.resize(frame, (640, 480)))

            # skip visualization
            else:
                (_, self.threats) = self.object_detector.scan_road(
                    original_frame, self.threats)
                (_, self.threats) = self.lane_detector.detect_lane(
                    original_frame, self.threats)

        # skip detection
        else:
            frame = original_frame

        if (self.frame_id % 10 == 0) and (self.diagnostic_mode):
            # draw a box around the area scaned for for PEDESTRIAN/VEHICLES detection
            visualization_utils.draw_bounding_box_on_image_array(
                frame,
                self.object_detector.roi["t"] / frame_height,
                self.object_detector.roi["l"] / frame_width,
                self.object_detector.roi["b"] / frame_height,
                self.object_detector.roi["r"] / frame_width,
                color=(255, 255, 0),  # BGR VALUE 
                display_str_list=(' ROI ', ))

            # draw a box around the area scaned for collision warnings
            visualization_utils.draw_bounding_box_on_image_array(
                frame,
                self.object_detector.roi["ct"] / frame_height,
                self.object_detector.roi["cl"] / frame_width,
                self.object_detector.roi["b"] / frame_height,
                self.object_detector.roi["cr"] / frame_width,
                color=(255, 0, 255),  # BGR VALUE 
                display_str_list=(' COLLISION ROI ', ))

            # save a screen shot of the current frame after getting processed
            cv2.imwrite("test/post/" + str(self.frame_id / 10) + ".jpg", frame)

            if self.threats["FAR_LEFT"] or self.threats["FAR_RIGHT"]:
                OFF_LANE = 1
            else:
                OFF_LANE = 0

            # append a new row to dataframe
            self.data_frame = self.data_frame.append(
                {
                    'FRAME_ID': int(self.frame_id / 10),
                    'PEDESTRIAN': int(self.threats['PEDESTRIAN']),
                    'VEHICLES': int(self.threats['VEHICLES']),
                    'BIKES': int(self.threats['BIKES']),
                    'STOP_SIGN': int(self.threats['STOP_SIGN']),
                    'TRAFFIC_LIGHT': int(self.threats['TRAFFIC_LIGHT']),
                    'OFF_LANE': int(OFF_LANE),
                    'COLLISION': int(self.threats['COLLISION']),
                },
                ignore_index=True)

        self.frame_id += 1
class VideoProcessor:
    """
    Class used to process a video file and that produces an annotated version with the detected lanes.
    """

    def __init__(self, calibration_data_file, smooth_frames = 5, debug = False, failed_frames_dir = 'failed_frames'):
        self.img_processor = ImageProcessor(calibration_data_file)
        self.lane_tracker = LaneDetector(smooth_frames = smooth_frames)
        self.frame_count = 0
        self.fail_count = 0
        self.debug = debug
        self.failed_frames_dir = failed_frames_dir
        self.last_frame = None
        self.processed_frames = None

    def process_frame(self, img):

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
       
        undistorted_img, thresholded_img, warped_img = self.img_processor.process_image(img)
        
        _, polyfit, curvature, deviation, fail_code = self.lane_tracker.detect_lanes(warped_img)
        
        fill_color = (0, 255, 0) if fail_code == 0 else (0, 255, 255)

        lane_img = self.lane_tracker.draw_lanes(undistorted_img, polyfit, fill_color = fill_color)
        lane_img = self.img_processor.unwarp_image(lane_img)

        out_image = cv2.addWeighted(undistorted_img, 1.0, lane_img, 1.0, 0)

        font = cv2.FONT_HERSHEY_DUPLEX
        font_color = (0, 255, 0)

        curvature_text = 'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(curvature[0], curvature[1])
        offset_text = 'Center Offset: {:.2f} m'.format(deviation)

        cv2.putText(out_image, curvature_text, (30, 60), font, 1, font_color, 2)
        cv2.putText(out_image, offset_text, (30, 90), font, 1, font_color, 2)

        if fail_code > 0:
            self.fail_count += 1
            failed_text = 'Detection Failed: {}'.format(LaneDetector.FAIL_CODES[fail_code])
            cv2.putText(out_image, failed_text, (30, 120), font, 1, (0, 0, 255), 2)
            if self.debug:
                print(failed_text)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '.png'), img)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_lanes.png'), out_image)
                if self.last_frame is not None: # Saves also the previous frame for comparison
                    cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_prev.png'), self.last_frame)

        self.frame_count += 1
        self.last_frame = img

        out_image = cv2.cvtColor(out_image, cv2.COLOR_BGR2RGB)

        return out_image

    def process_frame_split(self, img):

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
       
        undistorted_img, thresholded_img, processed_img = self.img_processor.process_image(img)

        lanes_centroids, polyfit, curvature, deviation, fail_code = self.lane_tracker.detect_lanes(processed_img)
        
        fill_color = (0, 255, 0) if fail_code == 0 else (0, 255, 255)

        lane_img = self.lane_tracker.draw_lanes(undistorted_img, polyfit, fill_color = fill_color)
        lane_img = self.img_processor.unwarp_image(lane_img)

        out_image = cv2.addWeighted(undistorted_img, 1.0, lane_img, 1.0, 0)

        color_img = self.img_processor.color_thresh(undistorted_img) * 255
        gradient_img = self.img_processor.gradient_thresh(undistorted_img) * 255
      
        processed_src = np.array(cv2.merge((thresholded_img, thresholded_img, thresholded_img)), np.uint8)
        src, _ = self.img_processor._warp_coordinates(img)

        src = np.array(src, np.int32)
        
        cv2.polylines(processed_src, [src], True, (0,0,255), 2)

        window_img = np.copy(processed_img)

        window_img = self.lane_tracker.draw_windows(window_img, lanes_centroids, polyfit, blend = True)

        lanes_img = np.copy(out_image)

        font = cv2.FONT_HERSHEY_DUPLEX
        font_color = (0, 255, 0)

        curvature_text = 'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(curvature[0], curvature[1])
        offset_text = 'Center Offset: {:.2f} m'.format(deviation)

        cv2.putText(out_image, curvature_text, (30, 60), font, 1, font_color, 2)
        cv2.putText(out_image, offset_text, (30, 90), font, 1, font_color, 2)

        if fail_code > 0:
            self.fail_count += 1
            failed_text = 'Detection Failed: {}'.format(LaneDetector.FAIL_CODES[fail_code])
            cv2.putText(out_image, failed_text, (30, 120), font, 1, (0, 0, 255), 2)
            if self.debug:
                print(failed_text)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '.png'), img)
                cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_lanes.png'), out_image)
                if self.last_frame is not None: # Saves also the previous frame for comparison
                    cv2.imwrite(os.path.join(self.failed_frames_dir, 'frame' + str(self.frame_count) + '_failed_' + str(fail_code) + '_prev.png'), self.last_frame)

        self.frame_count += 1

        undistorted_img = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2RGB)
        color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
        gradient_img = cv2.cvtColor(gradient_img, cv2.COLOR_GRAY2RGB)
        thresholded_img= cv2.cvtColor(thresholded_img, cv2.COLOR_GRAY2RGB)
        processed_src= cv2.cvtColor(processed_src, cv2.COLOR_BGR2RGB)
        processed_img= cv2.cvtColor(processed_img, cv2.COLOR_GRAY2RGB)
        window_img= cv2.cvtColor(window_img, cv2.COLOR_BGR2RGB) 
        lanes_img= cv2.cvtColor(lanes_img, cv2.COLOR_BGR2RGB)
        out_image= cv2.cvtColor(out_image, cv2.COLOR_BGR2RGB)

        return (undistorted_img, color_img, gradient_img, thresholded_img, processed_src, processed_img, window_img, lanes_img, out_image)

    def process_video(self, file_path, output, t_start = None, t_end = None):

        input_clip = VideoFileClip(file_path)
        
        if t_start is not None:
            input_clip = input_clip.subclip(t_start = t_start, t_end = t_end)
        
        output_clip = input_clip.fl_image(self.process_frame)
        output_clip.write_videofile(output, audio = False)

    def process_frame_stage(self, img, idx):
        
        if self.processed_frames is None:
            self.processed_frames = []

        if idx == 0:
            result = self.process_frame_split(img)
            self.processed_frames.append(result)
        else:
            self.frame_count += 1

        return self.processed_frames[self.frame_count - 1][idx]

    def process_video_split(self, file_path, output, t_start = None, t_end = None):

        input_clip = VideoFileClip(file_path)
        
        if t_start is not None:
            input_clip = input_clip.subclip(t_start = t_start, t_end = t_end)

        out_file_prefix = os.path.split(output)[1][:-4]
        
        idx = 0
        output_clip = input_clip.fl_image(lambda img: self.process_frame_stage(img, idx))
        output_clip.write_videofile(out_file_prefix + '_undistoreted.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_color.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_gradient.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_thresholded.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_processed_src.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_processed_dst.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_window.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_lanes.mp4', audio = False)
        idx += 1
        self.frame_count = 0
        output_clip.write_videofile(out_file_prefix + '_final.mp4', audio = False)

        print('Number of failed detection: {}'.format(self.fail_count))
Esempio n. 14
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    log.info("Manual driving mode is ON - use w, a ,d ,s and x to stop"
             ) if args.manual_driving else None

    frames_path = os.path.join(os.path.dirname(__file__), 'src', 'frames')
    road_model_xml = os.getcwd(
    ) + "/src/data/road-segmentation/road-segmentation-adas-0001.xml"
    road_model_bin = os.path.splitext(road_model_xml)[0] + ".bin"

    tl_model_xml = os.getcwd(
    ) + "/src/data/traffic-light/traffic-light-0001.xml"
    tl_model_bin = os.path.splitext(tl_model_xml)[0] + ".bin"

    device = "MYRIAD"
    fps = ""
    frame_num = 0
    camera_id = 0

    cap = cv2.VideoCapture(camera_id)
    log.info("Loading Camera id {}".format(camera_id))

    cap.set(cv2.CAP_PROP_FPS, 30)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
    log.info("Camera size {}x{}".format(CAMERA_WIDTH, CAMERA_HEIGHT))

    log.info("Setting device: {}".format(device))
    plugin = IEPlugin(device=device)

    # Read IR
    tl_net = IENetwork(model=tl_model_xml, weights=tl_model_bin)
    log.info("Traffic-Light network has been loaded:\n\t{}\n\t{}".format(
        tl_model_xml, tl_model_bin))

    # Open video capture for recognizing hte road
    assert cap.isOpened(), "Couldn't open Camera"
    success, frame = cap.read()
    assert success, "Can't snap image"
    """
    Verify road-segmentation start here
    """
    if args.verify_road:
        road_net = IENetwork(model=road_model_xml, weights=road_model_bin)
        log.info(
            "Road-Segmentation network has been loaded:\n\t{}\n\t{}".format(
                road_model_xml, road_model_bin))
        assert len(road_net.inputs.keys()
                   ) == 1, "Sample supports only single input topologies"
        assert len(road_net.outputs
                   ) == 1, "Sample supports only single output topologies"
        log.info("Preparing input blobs")
        input_blob = next(iter(road_net.inputs))
        out_blob = next(iter(road_net.outputs))
        road_net.batch_size = 1
        log.info("Batch size is {}".format(road_net.batch_size))

        # Read and pre-process input images
        n, c, h, w = road_net.inputs[input_blob].shape
        images = np.ndarray(shape=(n, c, h, w))

        if frame.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                "CAM", frame.shape[:-1], (h, w)))
            image = cv2.resize(frame, (w, h))

        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[frame_num] = image
        log.info("Snapping frame: {}".format(frame_num))
        frame_num += 1

        # Loading model to the plugin
        log.info("Loading road-segmentation model to the plugin")
        road_exec_net = plugin.load(network=road_net)

        # Start sync inference
        log.info("Starting inference ({} iterations)".format(1))
        infer_time = []
        for i in range(1):
            t0 = time()
            res = road_exec_net.infer(inputs={input_blob: images})
            infer_time.append((time() - t0) * 1000)
        log.info("Average running time of one iteration: {} ms".format(
            np.average(np.asarray(infer_time))))

        # Processing output blob
        log.info("Processing output blob")
        res = res[out_blob]
        _, _, out_h, out_w = res.shape
        t0 = time()
        for batch, data in enumerate(res):
            classes_map = np.zeros(shape=(out_h, out_w, 3), dtype=np.int)
            for i in range(out_h):
                for j in range(out_w):
                    if len(data[:, i, j]) == 1:
                        pixel_class = int(data[:, i, j])
                    else:
                        pixel_class = np.argmax(data[:, i, j])
                    classes_map[i,
                                j, :] = classes_color_map[min(pixel_class, 20)]

        # Check red color (road) percentage - for verifying road
        RED_MIN = np.array([0, 0, 128])
        RED_MAX = np.array([250, 250, 255])
        classes_map = select_region(classes_map)
        out_img = os.path.join(frames_path, "processed_image.bmp")
        cv2.imwrite(out_img, classes_map)
        log.info("Result image was saved to {}".format(out_img))

        size = classes_map.size
        dstr = cv2.inRange(classes_map, RED_MIN, RED_MAX)
        no_red = cv2.countNonZero(dstr)
        frac_red = np.divide(float(no_red), int(size))
        percent_red = int(np.multiply(frac_red, 100)) + 50  # 50 = black region

        log.info("Road-segmentation processing time is: {} sec.".format(
            (time() - t0) * 1000))
        log.info("Road detected {}% of the frame: ".format(str(percent_red)))

        if percent_red < 50:
            raise Exception(
                "Can't detect any road!! please put the car on a road")
    """
    Main function start here - detecting road and traffic light
    """
    tl_input_blob = next(iter(tl_net.inputs))
    tl_out_blob = next(iter(tl_net.outputs))

    # Loading model to the plugin
    log.info("Loading traffic-light model to the plugin")
    tl_exec_net = plugin.load(network=tl_net)

    def releaseAll():
        """
        Reset camera video, car and close all opened windows.
        This could cause when stop the program or when something went wrong.
        """
        cap.release()
        car.reset()
        cv2.destroyAllWindows()

    # Start running car and video
    try:
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        del_label = 'go'
        frame_count = 0
        stop_on_u_turn_count = 0

        # initialize car
        car = DriveBrickPi3()
        log.info("Car name is {}".format(car.name))
        # initialize road detection - start with first frame
        detector = LaneDetector(frame)

        # Start capturing...
        while cap.isOpened():
            t1 = time()

            success, orig_frame = cap.read()
            assert success, "Can't snap image"

            if args.mirror:
                orig_frame.flip(orig_frame, 1)
                log.info("Using camera mirror")

            # Update image
            detector.image = orig_frame

            # Set configurations for traffic light detection
            prepimg = cv2.resize(orig_frame, (300, 300))
            prepimg = prepimg[np.newaxis, :, :, :]
            prepimg = prepimg.transpose((0, 3, 1, 2))
            tl_outputs = tl_exec_net.infer(inputs={tl_input_blob: prepimg})
            res = tl_exec_net.requests[0].outputs[tl_out_blob]

            detecting_traffic_light = False

            # Search for all detected objects (for traffic light)
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                confidence = obj[2] * 100
                if 85 < confidence < 100:
                    detecting_traffic_light = True
                    best_proposal = obj

                    xmin = int(best_proposal[3] * initial_w)
                    ymin = int(best_proposal[4] * initial_h)
                    xmax = int(best_proposal[5] * initial_w)
                    ymax = int(best_proposal[6] * initial_h)
                    class_id = int(best_proposal[1])

                    # Make sure camera detecting only 3 classes
                    if class_id <= 2:
                        # Draw box and label\class_id
                        color = (255, 0, 0) if class_id == 1 else (50, 205, 50)
                        cv2.rectangle(orig_frame, (xmin, ymin), (xmax, ymax),
                                      color, 2)

                        det_label = classes_traffic_light_map[
                            class_id -
                            1] if classes_traffic_light_map else str(class_id)
                        label_and_prob = det_label + ", " + str(
                            confidence) + "%"
                        cv2.putText(orig_frame, label_and_prob,
                                    (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, color, 1)
                        if str(det_label) == 'go':
                            car.status = CAR_DIRECTION.FWD
                        elif str(del_label) == 'stop':
                            car.status = CAR_DIRECTION.STOP
                        else:
                            car.status = CAR_DIRECTION.STOP

            # Image process - start looking for mid line of the road
            # note that, the following function is looking for the yellow line in the middle
            mid_lines = detector.process()

            if not args.manual_driving:
                # when car status is forward (it means that we didn't see any traffic light or the traffic
                # light is green. and of course street was recognize with yellow middle line.
                if mid_lines is not None and car.status is CAR_DIRECTION.FWD:
                    x1, y1, x2, y2 = mid_lines[0][0]
                    stop_on_u_turn_count = 0

                    cv2.line(orig_frame, (x1, y1), (x2, y2), (0, 180, 0), 5)

                    slope = (y1 - y2) / (x1 - x2) if x1 - x2 != 0 else 50
                    log.debug("slope: {}".format(str(slope)))
                    log.debug("x1 {}, x2 {}, y1 {}, y2 {}".format(
                        str(x1), str(x2), str(y1), str(y2)))
                    if slope < 0:
                        # go left
                        log.debug("detecting left -> moving left")
                        car.moveCar(CAR_DIRECTION.LEFT)
                        sleep(0.1)
                        car.moveCar(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if 0 <= slope <= 7:
                        # go right
                        log.debug("detecting right -> moving right")
                        car.moveCar(CAR_DIRECTION.RIGHT)
                        sleep(0.1)
                        car.moveCar(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if slope > 7 or slope == 'inf':
                        # go forward
                        log.debug("Moving forward")

                        # Moving x2+100px due to the camera lens is not in the middle.
                        # if your web camera with is in the middle, please remove it.
                        x2 += 100

                        # keeping car on the middle (30 = gap of the middle line)
                        if x2 > (CAMERA_WIDTH / 2) + 30:
                            log.debug("not in the middle -> move right")
                            car.moveCar(CAR_DIRECTION.RIGHT)
                            sleep(0.1)

                        if x2 <= (CAMERA_WIDTH / 2) - 30:
                            log.debug("not in the middle -> move left")
                            car.moveCar(CAR_DIRECTION.LEFT)
                            sleep(0.1)

                        car.moveCar(CAR_DIRECTION.FWD)
                else:
                    # if reaching here, there are 2 options:
                    # 1- car stopped on the red light (traffic light)
                    # 2- car stopped because it reached the end of road -> do U-Turn
                    car.moveCar(CAR_DIRECTION.STOP)

                    # wait 30 frames to make sure that car reached end of road
                    stop_on_u_turn_count += 1

                    if stop_on_u_turn_count == 20 and detecting_traffic_light is False and car.status == CAR_DIRECTION.FWD:
                        log.debug("Detecting U-Turn")
                        car.moveCar(CAR_DIRECTION.FWD)
                        sleep(2.5)
                        car.moveCar(CAR_DIRECTION.RIGHT)
                        sleep(6)
                        car.moveCar(CAR_DIRECTION.REVERSE)
                        sleep(1)
                        car.moveCar(CAR_DIRECTION.RIGHT)
                        sleep(1)
                        stop_on_u_turn_count = 0

            if args.manual_driving:
                inp = str(input())  # Take input from the terminal

                if inp == 'w':
                    car.moveCar(CAR_DIRECTION.FWD)
                elif inp == 'a':
                    car.moveCar(CAR_DIRECTION.LEFT)
                elif inp == 'd':
                    car.moveCar(CAR_DIRECTION.RIGHT)
                elif inp == 's':
                    car.moveCar(CAR_DIRECTION.REVERSE)
                elif inp == 'x':
                    car.moveCar(CAR_DIRECTION.STOP)

            # count the frames
            frame_count += 1

            if args.show_fps:
                elapsedTime = time() - t1
                fps = "(Playback) {:.1f} FPS".format(1 / elapsedTime)
                cv2.putText(orig_frame, fps, (0, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 200, 0), 1, cv2.LINE_AA)

            if args.show_frame:
                cv2.imshow("Driving Pi", orig_frame)

            if cv2.waitKey(1):
                break  # ESC to quit

        # Release everything on finish
        releaseAll()

    except KeyboardInterrupt:
        releaseAll()
Esempio n. 15
0
    def __init__(self,
                 classifier_codename,
                 dataset_codename,
                 classifier_threshold,
                 object_detection=True,
                 object_visualization=True,
                 lane_detection=True,
                 lane_visualization=True,
                 diagnostic_mode=True,
                 monitor_id=1,
                 window_top_offset=0,
                 window_left_offset=0,
                 window_width=None,
                 window_height=None,
                 window_scale=1.0):

        # Boolean flag for feature-customization
        self.object_detection = object_detection
        self.object_visualization = object_visualization
        self.diagnostic_mode = diagnostic_mode

        self.lane_detection = lane_detection
        self.lane_visualization = lane_visualization

        # Instance of the MSS-API for captureing screenshots
        self.window_manager = mss.mss()

        # Note:
        #   - monitor_id = 0 | grab all monitors together
        #   - monitor_id = n | grab a given monitor (n) : where n > 0
        self.target_window = self.window_manager.monitors[monitor_id]

        # Update position of the window that will be captured
        if window_left_offset:
            self.target_window['left'] += window_left_offset
            self.target_window['width'] -= window_left_offset
        if window_top_offset:
            self.target_window['top'] += window_top_offset
            self.target_window['height'] -= window_top_offset
        if window_width:
            self.target_window['width'] = window_width
        if window_height:
            self.target_window['height'] = window_height
        if window_scale:
            self.target_window['scale'] = window_scale

        print("Activating DeepEye Advanced Co-pilot Mode")

        self.object_detector = ObjectClassifier(
            classifier_codename=classifier_codename,
            dataset_codename=dataset_codename,
            classifier_threshold=classifier_threshold,
            visualization=object_visualization,
            diagnostic_mode=diagnostic_mode,
            frame_height=self.target_window['height'],
            frame_width=self.target_window['width'])

        self.lane_detector = LaneDetector(visualization=lane_visualization)

        self.threats = {
            "COLLISION": False,
            "PEDESTRIAN": False,
            "STOP_SIGN": False,
            "TRAFFIC_LIGHT": False,
            "VEHICLES": False,
            "BIKES": False,
            "FAR_LEFT": False,
            "FAR_RIGHT": False,
            "RIGHT": False,
            "LEFT": False,
            "CENTER": False,
            "UNKNOWN": True
        }

        self.frame_id = 0

        self.columns = [
            'FRAME_ID', 'PEDESTRIAN', 'VEHICLES', 'BIKES', 'STOP_SIGN',
            'TRAFFIC_LIGHT', 'OFF_LANE', 'COLLISION'
        ]

        self.data_frame = pd.DataFrame(columns=self.columns)
"""
Entry point for an application
"""

import gallery

from camera import Camera
from lane_detector import LaneDetector

calibration_images_dir = '../camera_cal/'
test_images_dir = '../test_images/'

project_video = '../project_video.mp4'
project_video_output = '../project_video_output.mp4'

if __name__ == "__main__":
    camera = Camera.calibrate(calibration_images_dir, 9, 6)
    lane_detector = LaneDetector(camera)
    gallery.show_test_images_pipeline(camera, test_images_dir)
    lane_detector.detect_lane_lines_in_video(project_video,
                                             project_video_output)
Esempio n. 17
0
class LaneModel:
    '''
        Klasse repräsentiert das Straßenmodell.
    '''
    def __init__(self, lane_width, segment_count, y_offset_first_segment):
        '''
            Konstruktor zum Instanziieren eines Straßenmodells.

            Parameter
            ---------
            lane_width : Integer
                Straßenbreite
            segment_count : Integer
                Anzahl zu berechnende Segmente
            y_offset_first_segment : Integer
                y-Wert der untersten Segment Linie

        '''
        self.lane_width = lane_width
        self.segment_count = segment_count
        self.y_offset_first_segment = y_offset_first_segment
        self.segments = self._instantiate_segments()
        self.lane_detector = LaneDetector(self.lane_width)

    def update_segments(self, image):
        '''
            Methode aktualisiert alle Segment Linien auf dem Bild.

            Parameter
            ---------
            image : Bild
        '''
        for seg in self.segments:
            seg.update_non_zero_points(image)
            seg.left_point, seg.right_point = self.lane_detector.find_lane_points(
                seg)
            seg.update_point_distance()
            seg.update_point_center()

    def draw_segments(self, image):
        '''
            Methode zeichnet alle Segment Linien auf ein Bild.

            Parameter
            ---------
            image : Bild
        '''
        if self.segments:
            for seg in self.segments:
                seg.draw(image)

    def state_point_x(self):
        '''
            Methode berechnet den x-Wert für Ideallinie auf der Straße.
        '''
        # TODO : berücksichtigung mehrerer Segmente
        if self.segments:
            return self.segments[0].point_center
        return None

    def _instantiate_segments(self):
        seg_list = []
        for i in range(self.segment_count):
            y_offset_act_segment = self.y_offset_first_segment - (SEGMENT_GAB *
                                                                  i)
            seg_list.append(SegmentModel(y_offset_act_segment,
                                         self.lane_width))
        return seg_list
def sliding_window_detect_test(test_img_path):
    cam_intrinsic, cam_distortion = load_cached_camera_parameters(
        "./camera_params.p")
    img = mpimg.imread(test_img_path)
    warped_binary = rgb_to_warped_binary(img, cam_intrinsic, cam_distortion)
    result = LaneDetector.sliding_window_detect(
        warped_binary,
        sliding_window_config["height"],
        sliding_window_config["margin"],
        sliding_window_config["min_num_pixels"],
        return_debug_img=True)
    left_fit = result.l_fit
    right_fit = result.r_fit
    # Generate x and y values for plotting
    ploty = np.linspace(0, warped_binary.shape[0] - 1, warped_binary.shape[0])
    left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]
    right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]

    # Create an image to draw the detected region
    warp_zero = np.zeros_like(warped_binary).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array(
        [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))
    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
    # unwarp the result
    unwarp_matrix_ = cv2.getPerspectiveTransform(warp_config["dst_rect"],
                                                 warp_config["src_rect"])
    newwarp = warp_image(color_warp, unwarp_matrix_)
    # Combine the detected result with the original image
    result_img = cv2.addWeighted(img, 1, newwarp, 0.3, 0)

    f, (axes1, axes2) = plt.subplots(1, 2, figsize=(20, 10))
    f.suptitle(test_img_path.split("/")[-1], fontsize=30)
    axes1.imshow(result_img)
    axes1.set_title('Original with detected result', fontsize=15)
    left_curvature = result.l_curvature
    right_curvature = result.r_curvature
    center_offset = result.center_offset
    axes1.text(0.5,
               0.95,
               'left curvature : %.2fm' % (left_curvature, ),
               color="yellow",
               horizontalalignment='center',
               transform=axes1.transAxes)
    axes1.text(0.5,
               0.90,
               'right curvature: %.2fm' % (right_curvature, ),
               color="yellow",
               horizontalalignment='center',
               transform=axes1.transAxes)
    axes1.text(0.5,
               0.85,
               'center offset  : %2fm' % (center_offset, ),
               color="yellow",
               horizontalalignment='center',
               transform=axes1.transAxes)

    axes2.imshow(result.debug_image)
    axes2.set_title('Detected', fontsize=15)
    axes2.plot(left_fitx, ploty, color='yellow')
    axes2.plot(right_fitx, ploty, color='yellow')
    axes2.set_xlim(0, img.shape[1])
    axes2.set_ylim(img.shape[0], 0)
    plt.show()
Esempio n. 19
0
class Detector:
    def __init__(self):
        rospy.init_node("lane_detector")
        self.detector = LaneDetector()
        self.bridge = CvBridge()
        
        # subscribe to images
        rospy.Subscriber("left/image_rect_color", Image, self.on_left_image)

        self.path_pub = rospy.Publisher('/navigation/waypoints', Path, queue_size=1, latch=True)

        self.debug_lane_pub = rospy.Publisher("debug/lane_image", Image, queue_size=1)

        self.left_image = None
        self.publish()

    def on_left_image(self, data):
        try:
            self.left_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            rospy.logerr(e)
        

    def publish(self):
        rate = rospy.Rate(10)
        while not rospy.is_shutdown():
            if self.left_image is not None:
                try:
                    # start = rospy.get_time()
                    target_line, debug_image = self.detector.process_images(self.left_image)
                    self.generate_path(target_line)
                    self.debug_publish(debug_image)
                    # rospy.logwarn("time elapsed doing work: %s", rospy.get_time() - start)
                except CvBridgeError as e:
                    rospy.logerr(e)
                except Exception as e:
                    rospy.logerr(e)
        
            rate.sleep()

    def debug_publish(self, image):
        self.debug_lane_pub.publish(self.bridge.cv2_to_imgmsg(image, "rgb8"))
        pass

    def generate_path(self, target_line):
        path = Path()
        path.header.frame_id = "/zed_initial_frame"
        # rospy.logwarn(target_line[150])
        for index, point in enumerate(target_line[150:]):
            pose = self.generate_pose(index/1000, point/1000, 0, 0, 0, 0, 0)  # ~ measuring a pixel per mm
            path.poses.append(pose)

        self.path_pub.publish(path)

    def generate_pose(self, px, py, pz, ox, oy, oz, ow):
        pose = PoseStamped()
        # pose.header.frame_id = "/zed_initial_frame"
        pose.pose.position.x = px
        pose.pose.position.y = py
        pose.pose.position.z = pz
        pose.pose.orientation.x = ox
        pose.pose.orientation.y = oy
        pose.pose.orientation.z = oz
        pose.pose.orientation.w = ow
        return pose
Esempio n. 20
0
def main():
    args = build_argparser().parse_args()

    log_level = log.DEBUG if args.debug else log.INFO
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log_level,
                    stream=sys.stdout)
    log.info("Manual driving mode is ON - use w, a ,d ,s and x to stop"
             ) if args.manual_driving else None

    tl_model_xml = os.getcwd(
    ) + "/src/data/traffic-light/traffic-light-0001.xml"
    tl_model_bin = os.path.splitext(tl_model_xml)[0] + ".bin"

    device = "MYRIAD"
    fps = ""

    # start camera module
    cam = VideoCamera(args.camera_type)

    # Open video capture for recognizing hte road
    frame = None
    try:
        if args.camera_type == "usb":
            frame = cam.frame()
        elif args.camera_type == "camerapi":
            frame = cam.frame()
    except:
        raise Exception("Can't get a frame from camera!!")

    # Verify road-segmentation start here
    if args.verify_road:
        verify_street(frame=frame)
    """
    Main function start here - detecting road and traffic light
    """
    log.info("Setting device: {}".format(device))
    plugin = IEPlugin(device=device)

    # Read IR
    tl_net = IENetwork(model=tl_model_xml, weights=tl_model_bin)
    log.info("Traffic-Light network has been loaded:\n\t{}\n\t{}".format(
        tl_model_xml, tl_model_bin))

    tl_input_blob = next(iter(tl_net.inputs))
    tl_out_blob = next(iter(tl_net.outputs))

    # Loading model to the plugin
    log.info("Loading traffic-light model to the plugin")
    tl_exec_net = plugin.load(network=tl_net)

    def release_all():
        """
        Reset camera video, car and close all opened windows.
        This could cause when stop the program or when something went wrong.
        """
        car.reset()
        cv2.destroyAllWindows()

    # Start running car and video
    try:
        del_label = 'go'  # default label is 'GO' start moving the car forward
        frame_count = 0
        stop_on_u_turn_count = 0

        # initialize car
        car = DriveBrickPi3()
        log.info("Car name is {}".format(car.name))

        # initialize road detection - start with first frame
        detector = LaneDetector(frame)

        # Start capturing...
        log.info("Starting Game...")
        while True:
            t1 = time()

            orig_frame = cam.frame()

            if args.mirror:
                orig_frame = cv2.flip(orig_frame, 1)

            # Set updated frame
            detector.image = orig_frame

            # Set configurations for traffic light detection
            prepimg = cv2.resize(orig_frame, (300, 300))
            prepimg = prepimg[np.newaxis, :, :, :]
            prepimg = prepimg.transpose((0, 3, 1, 2))
            tl_outputs = tl_exec_net.infer(inputs={tl_input_blob: prepimg})
            res = tl_exec_net.requests[0].outputs[tl_out_blob]
            detecting_traffic_light = False

            # Search for all detected objects (for traffic light)
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                confidence = obj[2] * 100
                if 50 < confidence < 100:
                    detecting_traffic_light = True
                    best_proposal = obj

                    xmin = int(best_proposal[3] * res_width)
                    ymin = int(best_proposal[4] * res_height)
                    xmax = int(best_proposal[5] * res_width)
                    ymax = int(best_proposal[6] * res_height)
                    class_id = int(best_proposal[1])

                    # Make sure camera detecting only the number of the classes
                    if class_id <= len(classes_traffic_light_map):
                        # Draw box and label\class_id
                        color = (255, 0, 0) if class_id == 1 else (50, 205, 50)
                        cv2.rectangle(orig_frame, (xmin, ymin), (xmax, ymax),
                                      color, 2)

                        det_label = classes_traffic_light_map[
                            class_id -
                            1] if classes_traffic_light_map else str(class_id)
                        label_and_prob = det_label + ", " + str(
                            confidence) + "%"
                        cv2.putText(orig_frame, label_and_prob,
                                    (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, color, 1)

                        if str(det_label) == 'go':
                            car.status = CAR_DIRECTION.FWD
                        elif str(del_label) == 'stop':
                            car.status = CAR_DIRECTION.STOP
                        else:
                            car.status = CAR_DIRECTION.STOP

            # Image process - start looking for mid line of the road
            # note that, the following function is looking for the yellow line in the middle
            mid_lines = detector.get_lane()

            if not args.manual_driving:
                # when car status is forward (it means that we didn't see any traffic light or the traffic
                # light is green. and of course street was recognize with yellow middle line.
                if mid_lines is not None and car.status is CAR_DIRECTION.FWD:
                    x1, y1, x2, y2 = mid_lines[0][0]
                    stop_on_u_turn_count = 0

                    cv2.line(orig_frame, (x1, y1), (x2, y2), (0, 180, 0), 5)

                    slope = (y1 - y2) / (x1 - x2) if x1 - x2 != 0 else 50
                    log.debug("slope: {}".format(str(slope)))
                    log.debug("x1 {}, x2 {}, y1 {}, y2 {}".format(
                        str(x1), str(x2), str(y1), str(y2)))
                    if slope < 0:
                        # go left
                        log.debug("detecting left -> moving left")
                        car.move_car(CAR_DIRECTION.LEFT)
                        sleep(0.1)
                        car.move_car(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if 0 <= slope <= 7:
                        # go right
                        log.debug("detecting right -> moving right")
                        car.move_car(CAR_DIRECTION.RIGHT)
                        sleep(0.1)
                        car.move_car(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if slope > 7 or slope == 'inf':
                        log.debug("Moving forward")
                        # Moving x2+100px due to the camera lens is not in the middle.
                        # if your web camera with is in the middle, please remove it.
                        x2 += 100

                        # keeping car on the middle (30 = gap of the middle line)
                        if x2 > (res_width / 2) + 30:
                            log.debug("not in the middle -> move right")
                            car.move_car(CAR_DIRECTION.RIGHT)
                            sleep(0.1)

                        if x2 <= (res_width / 2) - 30:
                            log.debug("not in the middle -> move left")
                            car.move_car(CAR_DIRECTION.LEFT)
                            sleep(0.1)

                        car.move_car(CAR_DIRECTION.FWD)
                else:
                    # if reaching here, there are 2 options:
                    # 1- car stopped on the red light (traffic light)
                    # 2- car stopped because it reached the end of road -> do U-Turn
                    car.move_car(CAR_DIRECTION.STOP)

                    # wait 20 frames to make sure that car reached end of road
                    stop_on_u_turn_count += 1

                    if stop_on_u_turn_count == 20 and detecting_traffic_light is False and car.status == CAR_DIRECTION.FWD:
                        log.debug("Detecting U-Turn")
                        car.move_car(CAR_DIRECTION.FWD)
                        sleep(2.5)
                        car.move_car(CAR_DIRECTION.RIGHT)
                        sleep(6)
                        car.move_car(CAR_DIRECTION.REVERSE)
                        sleep(1)
                        car.move_car(CAR_DIRECTION.RIGHT)
                        sleep(1)
                        stop_on_u_turn_count = 0

            if args.manual_driving:
                inp = str(input())  # Take input from the terminal

                if inp == 'w':
                    car.move_car(CAR_DIRECTION.FWD)
                elif inp == 'a':
                    car.move_car(CAR_DIRECTION.LEFT)
                elif inp == 'd':
                    car.move_car(CAR_DIRECTION.RIGHT)
                elif inp == 's':
                    car.move_car(CAR_DIRECTION.REVERSE)
                elif inp == 'x':
                    car.move_car(CAR_DIRECTION.STOP)

            # count the frames
            frame_count += 1

            if args.show_fps:
                elapsed_time = time() - t1
                fps = "(Playback) {:.1f} FPS".format(1 / elapsed_time)
                cv2.putText(orig_frame, fps, (0, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 200, 0), 1, cv2.LINE_AA)

            if args.show_frame:
                cv2.imshow("Driving Pi", orig_frame)

            key = cv2.waitKey(1) & 0xFF
            if key & 0xFF == ord('q') or key == 27:
                break  # ESC to quit

            cam.clean_video()

        # Release everything on finish
        release_all()

    except KeyboardInterrupt:
        release_all()
Esempio n. 21
0
    parser.add_argument('--w_margin',
                        type=int,
                        default=35,
                        help='Sliding window margin from center')

    args = parser.parse_args()

    img_files = []

    for ext in ('*.png', '*.jpg'):
        img_files.extend(glob(os.path.join(args.dir, ext)))

    img_processor = ImageProcessor(args.calibration_data_file)
    lane_tracker = LaneDetector(window_width=args.w_width,
                                window_height=args.w_height,
                                margin=args.w_margin,
                                smooth_frames=0)

    for img_file in tqdm(img_files, unit=' images', desc='Image processing'):

        print(img_file)
        input_img = cv2.imread(img_file)

        undistorted_img, thresholded_img, processed_img = img_processor.process_image(
            input_img)

        out_file_prefix = os.path.join(args.output,
                                       os.path.split(img_file)[1][:-4])
        cv2.imwrite(out_file_prefix + '_processed.jpg', processed_img)

        cv2.imwrite(out_file_prefix + '_undistorted.jpg', undistorted_img)
Esempio n. 22
0
    
    rospy.init_node('lane_detection')
    rospy.loginfo("Starting land_detection.py")

    if len(sys.argv) < 2:
        rospy.loginfo("Error in lane_detection")
        rospy.loginfo("args 'homography_file' 'filter_file' ")
        exit(1)

    homography_file = sys.argv[1]
    #filter_file = "/home/nesvera/catkin_ws/src/travis/travis_image_processing/src/lane_detector/data/default.travis"
    filter_file = "/home/taura/catkin_ws/src/travis/travis_image_processing/src/lane_detector/data/lane.travis"
    debug = 1

    global lane_detector
    lane_detector = LaneDetector(homography_file, filter_file, debug)

    bridge = CvBridge()

    # Publisher
    lane_status_pub = rospy.Publisher("/travis/lane_info", LaneInfo, queue_size=1)

    # Subscriber
    rospy.Subscriber("/camera/image_raw/compressed", CompressedImage, image_callback)

    if debug == 1:
        lane_detector.debug()

    else:
        rospy.spin()
import glob
import cv2
import os
import pickle

from lane_detector import LaneDetector
from frame_composer import FrameComposer
from undistorter import Undistorter

with open('./undistorter.pkl', 'rb') as f:
    undistorter = pickle.load(f)


ld = LaneDetector(undistorter)

files = glob.glob('./data/test_images/*.jpg')
# files = ['./data/test_images/test1.jpg']

for file in files:

    image = cv2.imread(file)
    frame_with_lane_filled, frame_with_lines_n_windows_2d, thresholded_frame, radius_of_curvature = ld.run(image)

    base = os.path.basename(file)

    fc = FrameComposer(image)
    fc.add_mask_over_base(frame_with_lane_filled)
    fc.add_upper_bar((frame_with_lines_n_windows_2d, thresholded_frame))
    fc.add_text('Raduis of curvature: {}'.format(radius_of_curvature))

    cv2.imwrite('./data/test_images_output/{}_{}.jpg'.format(os.path.splitext(base)[0], 'lane_detected'), fc.get_frame())
Esempio n. 24
0
"""Apply lane detection frame by frame to a video in real time"""

import sys  # Python standard library import
sys.path.append('../')
import cv2  # OpenCV library import
from lane_detector import LaneDetector
from object_detect.utils import FPS
from object_detect.utils import fps_label

PATH_TO_VIDEO = '../test_videos/dashcam3.mp4'

video_capture = cv2.VideoCapture(PATH_TO_VIDEO)

fps = FPS().start()
ld = LaneDetector()

while video_capture.isOpened():
    ret, frame = video_capture.read()
    if ret:

        height, width, layers = frame.shape
        half_height = int(height / 2)
        half_width = int(width / 2)

        frame = cv2.resize(frame, (half_width, half_height))

        detected_lane = ld.detect_lanes(frame)

        fps.update()
        fps.stop()
        fps_label(("FPS: {:.2f}".format(fps.fps())), detected_lane)
Esempio n. 25
0
import argparse
import cv2
from moviepy.editor import VideoFileClip
from pathlib import Path

import util
from lane_detector import LaneDetector

mtx, dist = util.read_mtx_dist()
detector = LaneDetector()

def process(img, save=False):
    undistorted = cv2.undistort(img, mtx, dist, None, mtx)
    if save:
        util.save_rgb_img('writeup_images/undistorted.jpg', undistorted)
    return detector.process(img, save=save)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', required=True)
    parser.add_argument('--output', required=True)
    # Save intermediate images. This mode is available wheen only single image process
    parser.add_argument('--save', action='store_true') 
    args = parser.parse_args()

    ext = Path(args.input).suffix

    if ext in ['.mp4']:
        clip = VideoFileClip(args.input)
        project_clip = clip.fl_image(process)
        project_clip.write_videofile(args.output, audio=False)
Esempio n. 26
0
for result in results:
    file_index = result["filename"][-5]
    cv2.imwrite("OUTPUT/calibration/1-corners%s.jpg" % file_index,
                result["img"])
    cv2.imwrite("OUTPUT/calibration/2-corners-undistorted-%s.jpg" % file_index,
                camera.undistort(result["img"]))

# HSV color mask
color_mask = (
    ((20, 50, 75), (110, 255, 255)),  # yellow lines
    ((0, 0, 220), (255, 255, 255))  # white lines
)

# Process images
for filename in glob.glob("raw_images/*.jpg"):
    detector = LaneDetector(camera, perspective, color_mask)
    detector.run(cv2.imread(filename), "OUTPUT/test%s" % filename[-5])

# Process video
cap = cv2.VideoCapture("road.mp4")
out = cv2.VideoWriter("road_processed.avi", cv2.VideoWriter_fourcc(*'XVID'),
                      25, (1280, 720))
detector = LaneDetector(camera, perspective, color_mask)
i = 0

while cap.isOpened():
    ret, frame = cap.read()

    if not ret:
        break
Esempio n. 27
0
    parser = argparse.ArgumentParser(description='Detect cars on video')
    parser.add_argument('video_file_path')

    args = parser.parse_args()

    original_video = VideoFileClip(args.video_file_path)

    svc = joblib.load('svc_model.pkl')
    scaler = joblib.load('scaler.pkl')

    with open('./undistorter.pkl', 'rb') as f:
        undistorter = pickle.load(f)

    vd = HistoryKeepingVehicleDetector(svc, scaler)
    ld = LaneDetector(undistorter)

    def process_frame(frame):
        final_boxes, components, heatmap, all_boxes = vd.run(frame)
        frame_with_lane_filled, frame_with_lines_n_windows_2d, thresholded_frame, radius_of_curvature = ld.run(
            frame)

        fc = FrameComposer(final_boxes)
        fc.add_mask_over_base(frame_with_lane_filled)
        fc.add_upper_bar(
            (thresholded_frame, frame_with_lines_n_windows_2d, all_boxes))
        fc.add_text('Radius of curvature: {}'.format(radius_of_curvature))

        return fc.get_frame()

    output_video = original_video.fl_image(lambda frame: process_frame(frame))
Esempio n. 28
0
class VideoProcessor:
    def __init__(self,
                 model_file,
                 calibration_file,
                 min_confidence,
                 heat_threshold,
                 smooth_frames,
                 detect_lanes=False,
                 debug=False):
        self.vehicle_detector = VehicleDetector(model_file, min_confidence,
                                                heat_threshold, smooth_frames)
        self.lane_detector = LaneDetector(smooth_frames=5)
        self.image_processor = ImageProcessor(calibration_file)
        self.detect_lanes = detect_lanes
        self.debug = debug
        self.frame_count = 0
        self.processed_frames = None

    def process_video(self,
                      video_file,
                      file_out,
                      t_start=None,
                      t_end=None,
                      process_pool=None):

        input_clip = VideoFileClip(video_file)

        if t_start is not None:
            input_clip = input_clip.subclip(t_start=t_start, t_end=t_end)

        if self.debug:
            self.processed_frames = []

            stage_idx = 0

            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame_stage(frame, stage_idx,
                                                       process_pool))
            output_clip.write_videofile(file_out, audio=False)

            if len(self.processed_frames) > 0:
                out_file_path = os.path.split(file_out)
                out_file_name = out_file_path[1].split('.')
                for _ in range(len(self.processed_frames[0]) - 1):
                    self.frame_count = 0
                    stage_idx += 1
                    stage_file = '{}.{}'.format(
                        os.path.join(out_file_path[0], out_file_name[0]) +
                        '_' + str(stage_idx), out_file_name[1])
                    output_clip.write_videofile(stage_file, audio=False)
        else:
            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame(frame, process_pool))
            output_clip.write_videofile(file_out, audio=False)

    def process_frame(self, frame, process_pool):

        img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        if self.detect_lanes:
            # Uses the undistored image
            img, _, warped_img = self.image_processor.process_image(img)

        bboxes, heatmap, windows = self.vehicle_detector.detect_vehicles(
            img, process_pool=process_pool)

        frame_out = np.copy(img) if self.debug else img
        # Labelled image
        frame_out = draw_bboxes(frame_out,
                                bboxes, (250, 150, 55),
                                1,
                                fill=True)

        frame_out_text = 'Frame Smoothing: {}, Min Confidence: {}, Threshold: {}'.format(
            self.vehicle_detector.smooth_frames,
            self.vehicle_detector.min_confidence,
            self.vehicle_detector.heat_threshold)

        self.write_text(frame_out, frame_out_text)
        self.write_text(frame_out,
                        'Detected Cars: {}'.format(len(bboxes)),
                        pos=(30, frame_out.shape[0] - 30),
                        font_color=(0, 250, 150))

        if self.detect_lanes:
            _, polyfit, curvature, deviation, fail_code = self.lane_detector.detect_lanes(
                warped_img)

            fill_color = (0, 255, 0) if fail_code == 0 else (0, 255, 255)

            lane_img = self.lane_detector.draw_lanes(frame_out,
                                                     polyfit,
                                                     fill_color=fill_color)
            lane_img = self.image_processor.unwarp_image(lane_img)

            frame_out = cv2.addWeighted(frame_out, 1.0, lane_img, 1.0, 0)

            curvature_text = 'Left Curvature: {:.1f}, Right Curvature: {:.1f}'.format(
                curvature[0], curvature[1])
            offset_text = 'Center Offset: {:.2f} m'.format(deviation)

            self.write_text(frame_out, curvature_text, pos=(30, 60))
            self.write_text(frame_out, offset_text, pos=(30, 90))

        frame_out = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)

        if self.debug:

            result = []

            self.write_frame_count(frame_out)
            result.append(frame_out)

            # Unthresholded heatmap image
            heatmap_o = self.vehicle_detector._heatmap(img, windows, 0)
            heatmap_o = self.normalize_heatmap(heatmap_o)
            heatmap_o = np.dstack((heatmap_o, np.zeros_like(heatmap_o),
                                   np.zeros_like(heatmap_o)))
            self.write_frame_count(heatmap_o)

            result.append(heatmap_o)

            # Heatmap image
            heatmap = self.normalize_heatmap(heatmap)
            heatmap = np.dstack(
                (np.zeros_like(heatmap), np.zeros_like(heatmap), heatmap))
            self.write_frame_count(heatmap)

            result.append(cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB))

            heatmap_img = cv2.addWeighted(img, 1, heatmap, 0.8, 0)

            result.append(cv2.cvtColor(heatmap_img, cv2.COLOR_BGR2RGB))

            all_windows = []

            # Windows search image
            for scale, cells_per_step, layer_windows in windows:

                all_windows.extend(layer_windows)

                layer_img = draw_windows(
                    np.copy(img),
                    layer_windows,
                    min_confidence=self.vehicle_detector.min_confidence)

                w_tot = len(layer_windows)
                w_pos = len(
                    list(
                        filter(
                            lambda bbox: bbox[1] >= self.vehicle_detector.
                            min_confidence, layer_windows)))
                w_rej = len(
                    list(
                        filter(
                            lambda bbox: bbox[1] > 0 and bbox[1] < self.
                            vehicle_detector.min_confidence, layer_windows)))

                self.write_text(
                    layer_img,
                    'Scale: {}, Cells per Steps: {}, Min Confidence: {}'.
                    format(scale, cells_per_step,
                           self.vehicle_detector.min_confidence))
                layer_text = 'Windows (Total/Positive/Rejected): {}/{}/{}'.format(
                    w_tot, w_pos, w_rej)
                self.write_text(layer_img,
                                layer_text,
                                pos=(30, layer_img.shape[0] - 30))
                self.write_frame_count(layer_img)

                result.append(cv2.cvtColor(layer_img, cv2.COLOR_BGR2RGB))

            # Combined scales image
            box_img = draw_windows(
                np.copy(img),
                all_windows,
                min_confidence=self.vehicle_detector.min_confidence)

            w_tot = len(all_windows)
            w_pos = len(
                list(
                    filter(
                        lambda bbox: bbox[1] >= self.vehicle_detector.
                        min_confidence, all_windows)))
            w_rej = len(
                list(
                    filter(
                        lambda bbox: bbox[1] > 0 and bbox[1] < self.
                        vehicle_detector.min_confidence, all_windows)))

            self.write_text(
                box_img, 'Min Confidence: {}'.format(
                    self.vehicle_detector.min_confidence))
            box_text = 'Windows (Total/Positive/Rejected): {}/{}/{}'.format(
                w_tot, w_pos, w_rej)
            self.write_text(box_img,
                            box_text,
                            pos=(30, layer_img.shape[0] - 30))
            self.write_frame_count(box_img)

            result.append(cv2.cvtColor(box_img, cv2.COLOR_BGR2RGB))

        else:
            result = frame_out

        return result

    def process_frame_stage(self, frame, stage_idx, process_pool):

        if stage_idx == 0:
            result = self.process_frame(frame, process_pool)
            self.processed_frames.append(result)

        frame_out = self.processed_frames[self.frame_count][stage_idx]

        self.frame_count += 1

        return frame_out

    def normalize_heatmap(self, heatmap, a=0, b=255):

        min_v = np.min(heatmap)
        max_v = np.max(heatmap)

        heatmap = a + ((heatmap - min_v) * (b - a)) / (max_v - min_v)

        return heatmap.astype(np.uint8)

    def write_frame_count(self, img):
        self.write_text(img,
                        '{}'.format(self.frame_count),
                        pos=(img.shape[1] - 75, 30))

    def write_text(self,
                   img,
                   text,
                   pos=(30, 30),
                   font=cv2.FONT_HERSHEY_DUPLEX,
                   font_color=(255, 255, 255),
                   font_size=0.8):
        cv2.putText(img, text, pos, font, font_size, font_color, 1,
                    cv2.LINE_AA)