def simulate(env):
    filename = os.path.dirname(os.path.abspath(__file__)) + "/data/" + "data.csv"
    log_file = open(filename, 'w', encoding='utf-8', newline='')
    log_writer = csv.writer(log_file)
    log_writer.writerow(["t", "angle", "steer"])
    detector = LaneDetector()
    for episode in range(NUM_EPISODES):
        obv = env.reset()  # TODO: reset delay
        obv = cv2.cvtColor(obv, cv2.COLOR_RGB2BGR)
        steer = 0
        base_time = time.time()

        for t in range(MAX_TIME_STEPS):
            is_okay, angle_error = detector.detect_lane(obv)
            angle_error = -angle_error
            if not detector.left:
                print(str(time.time() - base_time) + " no left")
            elif not detector.right:
                print(str(time.time() - base_time) + " no right")

            steer = steer_controller(angle_error)
            reduction = speed_controller(steer)
            speed = base_speed - np.abs(reduction)
            action = (steer, speed)
            obv, reward, done, _ = env.step(action)
            
            log_writer.writerow([time.time() - base_time, -angle_error, steer])

            obv = cv2.cvtColor(obv, cv2.COLOR_RGB2BGR)
            cv2.imshow('input', detector.original_image_array)
            if done:
                break
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    cv2.destroyAllWindows()
 def __init__(self, calibration_data_file, smooth_frames = 5, debug = False, failed_frames_dir = 'failed_frames'):
     self.img_processor = ImageProcessor(calibration_data_file)
     self.lane_tracker = LaneDetector(smooth_frames = smooth_frames)
     self.frame_count = 0
     self.fail_count = 0
     self.debug = debug
     self.failed_frames_dir = failed_frames_dir
     self.last_frame = None
     self.processed_frames = None
示例#3
0
文件: camera.py 项目: ypicard/raspcar
 def __init__(self, resolution=(640, 480), framerate=30, nb_frames=5):
     super(Camera, self).__init__()
     self._lock = threading.Lock()
     self._camera = PiCamera(framerate=framerate, resolution=resolution)
     self._stream = PiRGBArray(self._camera)
     self._lane_detector = LaneDetector()
     self._frames = deque(maxlen=nb_frames)
     self._lanes = deque(maxlen=nb_frames)
     self.start()
    def __init__(self, log_q, queue, event_handler, sim_event_handler):
        super().__init__()
        self.log = create_logger(log_q, controller=True)
        self.queue = queue
        self.eventH = event_handler
        self.eventH_sim = sim_event_handler
        self.log.info("Initialized the controller")

        self.controller = LaneDetector(self.log)
示例#5
0
    def __init__(self):
        rospy.init_node("lane_detector")
        self.detector = LaneDetector()
        self.bridge = CvBridge()
        
        # subscribe to images
        rospy.Subscriber("left/image_rect_color", Image, self.on_left_image)

        self.path_pub = rospy.Publisher('/navigation/waypoints', Path, queue_size=1, latch=True)

        self.debug_lane_pub = rospy.Publisher("debug/lane_image", Image, queue_size=1)

        self.left_image = None
        self.publish()
示例#6
0
 def __init__(self,
              model_file,
              calibration_file,
              min_confidence,
              heat_threshold,
              smooth_frames,
              detect_lanes=False,
              debug=False):
     self.vehicle_detector = VehicleDetector(model_file, min_confidence,
                                             heat_threshold, smooth_frames)
     self.lane_detector = LaneDetector(smooth_frames=5)
     self.image_processor = ImageProcessor(calibration_file)
     self.detect_lanes = detect_lanes
     self.debug = debug
     self.frame_count = 0
     self.processed_frames = None
示例#7
0
def show_test_images_pipeline(camera, test_images_dir):
    """
    Shows detection pipeline results step by step.

    :param Camera camera         : Camera object.
    :param str    test_images_dir: Path to directory containing test images.
    """
    paths_to_images = glob.glob(test_images_dir + "*.jpg")
    for image_path in paths_to_images:
        image = cv2.imread(image_path)
        lane_detector = LaneDetector(camera)
        undistorted_image = lane_detector.correct_distortions(image)
        thresholded_image = lane_detector.threshold(undistorted_image)
        birdseye_image = lane_detector.transform_perspective(thresholded_image)
        lane_detector.update_lane_lines_positions(birdseye_image)
        lines_detected = lane_detector.draw_lanes_on_image(undistorted_image, birdseye_image)
        plt.imshow(cv2.cvtColor(lines_detected, cv2.COLOR_BGR2RGB))
        plt.show()
示例#8
0
    def __init__(self, lane_width, segment_count, y_offset_first_segment):
        '''
            Konstruktor zum Instanziieren eines Straßenmodells.

            Parameter
            ---------
            lane_width : Integer
                Straßenbreite
            segment_count : Integer
                Anzahl zu berechnende Segmente
            y_offset_first_segment : Integer
                y-Wert der untersten Segment Linie

        '''
        self.lane_width = lane_width
        self.segment_count = segment_count
        self.y_offset_first_segment = y_offset_first_segment
        self.segments = self._instantiate_segments()
        self.lane_detector = LaneDetector(self.lane_width)
def pipe_test(test_img_path):
    detector = LaneDetector()
    img = mpimg.imread(test_img_path)
    result_img = detector.pipe_line(img)

    # combined = cv2.addWeighted(img, 1, result_img, 0.4, 0)

    f, (axes1, axes2) = plt.subplots(1, 2, figsize=(20, 10))
    f.suptitle(test_img_path.split("/")[-1], fontsize=30)
    axes1.imshow(img)
    axes1.set_xlim(0, img.shape[1])
    axes1.set_ylim(img.shape[0], 0)
    axes1.set_title('original frame', fontsize=15)

    axes2.imshow(result_img, cmap="gray")
    axes2.set_title('result frame with detected road', fontsize=15)
    axes2.set_xlim(0, img.shape[1])
    axes2.set_ylim(img.shape[0], 0)
    plt.show()
示例#10
0
    parser = argparse.ArgumentParser(description='Detect cars on video')
    parser.add_argument('video_file_path')

    args = parser.parse_args()

    original_video = VideoFileClip(args.video_file_path)

    svc = joblib.load('svc_model.pkl')
    scaler = joblib.load('scaler.pkl')

    with open('./undistorter.pkl', 'rb') as f:
        undistorter = pickle.load(f)

    vd = HistoryKeepingVehicleDetector(svc, scaler)
    ld = LaneDetector(undistorter)

    def process_frame(frame):
        final_boxes, components, heatmap, all_boxes = vd.run(frame)
        frame_with_lane_filled, frame_with_lines_n_windows_2d, thresholded_frame, radius_of_curvature = ld.run(
            frame)

        fc = FrameComposer(final_boxes)
        fc.add_mask_over_base(frame_with_lane_filled)
        fc.add_upper_bar(
            (thresholded_frame, frame_with_lines_n_windows_2d, all_boxes))
        fc.add_text('Radius of curvature: {}'.format(radius_of_curvature))

        return fc.get_frame()

    output_video = original_video.fl_image(lambda frame: process_frame(frame))
示例#11
0
def main():
    args = build_argparser().parse_args()

    log_level = log.DEBUG if args.debug else log.INFO
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log_level,
                    stream=sys.stdout)
    log.info("Manual driving mode is ON - use w, a ,d ,s and x to stop"
             ) if args.manual_driving else None

    tl_model_xml = os.getcwd(
    ) + "/src/data/traffic-light/traffic-light-0001.xml"
    tl_model_bin = os.path.splitext(tl_model_xml)[0] + ".bin"

    device = "MYRIAD"
    fps = ""

    # start camera module
    cam = VideoCamera(args.camera_type)

    # Open video capture for recognizing hte road
    frame = None
    try:
        if args.camera_type == "usb":
            frame = cam.frame()
        elif args.camera_type == "camerapi":
            frame = cam.frame()
    except:
        raise Exception("Can't get a frame from camera!!")

    # Verify road-segmentation start here
    if args.verify_road:
        verify_street(frame=frame)
    """
    Main function start here - detecting road and traffic light
    """
    log.info("Setting device: {}".format(device))
    plugin = IEPlugin(device=device)

    # Read IR
    tl_net = IENetwork(model=tl_model_xml, weights=tl_model_bin)
    log.info("Traffic-Light network has been loaded:\n\t{}\n\t{}".format(
        tl_model_xml, tl_model_bin))

    tl_input_blob = next(iter(tl_net.inputs))
    tl_out_blob = next(iter(tl_net.outputs))

    # Loading model to the plugin
    log.info("Loading traffic-light model to the plugin")
    tl_exec_net = plugin.load(network=tl_net)

    def release_all():
        """
        Reset camera video, car and close all opened windows.
        This could cause when stop the program or when something went wrong.
        """
        car.reset()
        cv2.destroyAllWindows()

    # Start running car and video
    try:
        del_label = 'go'  # default label is 'GO' start moving the car forward
        frame_count = 0
        stop_on_u_turn_count = 0

        # initialize car
        car = DriveBrickPi3()
        log.info("Car name is {}".format(car.name))

        # initialize road detection - start with first frame
        detector = LaneDetector(frame)

        # Start capturing...
        log.info("Starting Game...")
        while True:
            t1 = time()

            orig_frame = cam.frame()

            if args.mirror:
                orig_frame = cv2.flip(orig_frame, 1)

            # Set updated frame
            detector.image = orig_frame

            # Set configurations for traffic light detection
            prepimg = cv2.resize(orig_frame, (300, 300))
            prepimg = prepimg[np.newaxis, :, :, :]
            prepimg = prepimg.transpose((0, 3, 1, 2))
            tl_outputs = tl_exec_net.infer(inputs={tl_input_blob: prepimg})
            res = tl_exec_net.requests[0].outputs[tl_out_blob]
            detecting_traffic_light = False

            # Search for all detected objects (for traffic light)
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                confidence = obj[2] * 100
                if 50 < confidence < 100:
                    detecting_traffic_light = True
                    best_proposal = obj

                    xmin = int(best_proposal[3] * res_width)
                    ymin = int(best_proposal[4] * res_height)
                    xmax = int(best_proposal[5] * res_width)
                    ymax = int(best_proposal[6] * res_height)
                    class_id = int(best_proposal[1])

                    # Make sure camera detecting only the number of the classes
                    if class_id <= len(classes_traffic_light_map):
                        # Draw box and label\class_id
                        color = (255, 0, 0) if class_id == 1 else (50, 205, 50)
                        cv2.rectangle(orig_frame, (xmin, ymin), (xmax, ymax),
                                      color, 2)

                        det_label = classes_traffic_light_map[
                            class_id -
                            1] if classes_traffic_light_map else str(class_id)
                        label_and_prob = det_label + ", " + str(
                            confidence) + "%"
                        cv2.putText(orig_frame, label_and_prob,
                                    (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, color, 1)

                        if str(det_label) == 'go':
                            car.status = CAR_DIRECTION.FWD
                        elif str(del_label) == 'stop':
                            car.status = CAR_DIRECTION.STOP
                        else:
                            car.status = CAR_DIRECTION.STOP

            # Image process - start looking for mid line of the road
            # note that, the following function is looking for the yellow line in the middle
            mid_lines = detector.get_lane()

            if not args.manual_driving:
                # when car status is forward (it means that we didn't see any traffic light or the traffic
                # light is green. and of course street was recognize with yellow middle line.
                if mid_lines is not None and car.status is CAR_DIRECTION.FWD:
                    x1, y1, x2, y2 = mid_lines[0][0]
                    stop_on_u_turn_count = 0

                    cv2.line(orig_frame, (x1, y1), (x2, y2), (0, 180, 0), 5)

                    slope = (y1 - y2) / (x1 - x2) if x1 - x2 != 0 else 50
                    log.debug("slope: {}".format(str(slope)))
                    log.debug("x1 {}, x2 {}, y1 {}, y2 {}".format(
                        str(x1), str(x2), str(y1), str(y2)))
                    if slope < 0:
                        # go left
                        log.debug("detecting left -> moving left")
                        car.move_car(CAR_DIRECTION.LEFT)
                        sleep(0.1)
                        car.move_car(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if 0 <= slope <= 7:
                        # go right
                        log.debug("detecting right -> moving right")
                        car.move_car(CAR_DIRECTION.RIGHT)
                        sleep(0.1)
                        car.move_car(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if slope > 7 or slope == 'inf':
                        log.debug("Moving forward")
                        # Moving x2+100px due to the camera lens is not in the middle.
                        # if your web camera with is in the middle, please remove it.
                        x2 += 100

                        # keeping car on the middle (30 = gap of the middle line)
                        if x2 > (res_width / 2) + 30:
                            log.debug("not in the middle -> move right")
                            car.move_car(CAR_DIRECTION.RIGHT)
                            sleep(0.1)

                        if x2 <= (res_width / 2) - 30:
                            log.debug("not in the middle -> move left")
                            car.move_car(CAR_DIRECTION.LEFT)
                            sleep(0.1)

                        car.move_car(CAR_DIRECTION.FWD)
                else:
                    # if reaching here, there are 2 options:
                    # 1- car stopped on the red light (traffic light)
                    # 2- car stopped because it reached the end of road -> do U-Turn
                    car.move_car(CAR_DIRECTION.STOP)

                    # wait 20 frames to make sure that car reached end of road
                    stop_on_u_turn_count += 1

                    if stop_on_u_turn_count == 20 and detecting_traffic_light is False and car.status == CAR_DIRECTION.FWD:
                        log.debug("Detecting U-Turn")
                        car.move_car(CAR_DIRECTION.FWD)
                        sleep(2.5)
                        car.move_car(CAR_DIRECTION.RIGHT)
                        sleep(6)
                        car.move_car(CAR_DIRECTION.REVERSE)
                        sleep(1)
                        car.move_car(CAR_DIRECTION.RIGHT)
                        sleep(1)
                        stop_on_u_turn_count = 0

            if args.manual_driving:
                inp = str(input())  # Take input from the terminal

                if inp == 'w':
                    car.move_car(CAR_DIRECTION.FWD)
                elif inp == 'a':
                    car.move_car(CAR_DIRECTION.LEFT)
                elif inp == 'd':
                    car.move_car(CAR_DIRECTION.RIGHT)
                elif inp == 's':
                    car.move_car(CAR_DIRECTION.REVERSE)
                elif inp == 'x':
                    car.move_car(CAR_DIRECTION.STOP)

            # count the frames
            frame_count += 1

            if args.show_fps:
                elapsed_time = time() - t1
                fps = "(Playback) {:.1f} FPS".format(1 / elapsed_time)
                cv2.putText(orig_frame, fps, (0, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 200, 0), 1, cv2.LINE_AA)

            if args.show_frame:
                cv2.imshow("Driving Pi", orig_frame)

            key = cv2.waitKey(1) & 0xFF
            if key & 0xFF == ord('q') or key == 27:
                break  # ESC to quit

            cam.clean_video()

        # Release everything on finish
        release_all()

    except KeyboardInterrupt:
        release_all()
示例#12
0
import argparse
import cv2
from moviepy.editor import VideoFileClip
from pathlib import Path

import util
from lane_detector import LaneDetector

mtx, dist = util.read_mtx_dist()
detector = LaneDetector()

def process(img, save=False):
    undistorted = cv2.undistort(img, mtx, dist, None, mtx)
    if save:
        util.save_rgb_img('writeup_images/undistorted.jpg', undistorted)
    return detector.process(img, save=save)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', required=True)
    parser.add_argument('--output', required=True)
    # Save intermediate images. This mode is available wheen only single image process
    parser.add_argument('--save', action='store_true') 
    args = parser.parse_args()

    ext = Path(args.input).suffix

    if ext in ['.mp4']:
        clip = VideoFileClip(args.input)
        project_clip = clip.fl_image(process)
        project_clip.write_videofile(args.output, audio=False)
示例#13
0
for result in results:
    file_index = result["filename"][-5]
    cv2.imwrite("OUTPUT/calibration/1-corners%s.jpg" % file_index,
                result["img"])
    cv2.imwrite("OUTPUT/calibration/2-corners-undistorted-%s.jpg" % file_index,
                camera.undistort(result["img"]))

# HSV color mask
color_mask = (
    ((20, 50, 75), (110, 255, 255)),  # yellow lines
    ((0, 0, 220), (255, 255, 255))  # white lines
)

# Process images
for filename in glob.glob("raw_images/*.jpg"):
    detector = LaneDetector(camera, perspective, color_mask)
    detector.run(cv2.imread(filename), "OUTPUT/test%s" % filename[-5])

# Process video
cap = cv2.VideoCapture("road.mp4")
out = cv2.VideoWriter("road_processed.avi", cv2.VideoWriter_fourcc(*'XVID'),
                      25, (1280, 720))
detector = LaneDetector(camera, perspective, color_mask)
i = 0

while cap.isOpened():
    ret, frame = cap.read()

    if not ret:
        break
示例#14
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    log.info("Manual driving mode is ON - use w, a ,d ,s and x to stop"
             ) if args.manual_driving else None

    frames_path = os.path.join(os.path.dirname(__file__), 'src', 'frames')
    road_model_xml = os.getcwd(
    ) + "/src/data/road-segmentation/road-segmentation-adas-0001.xml"
    road_model_bin = os.path.splitext(road_model_xml)[0] + ".bin"

    tl_model_xml = os.getcwd(
    ) + "/src/data/traffic-light/traffic-light-0001.xml"
    tl_model_bin = os.path.splitext(tl_model_xml)[0] + ".bin"

    device = "MYRIAD"
    fps = ""
    frame_num = 0
    camera_id = 0

    cap = cv2.VideoCapture(camera_id)
    log.info("Loading Camera id {}".format(camera_id))

    cap.set(cv2.CAP_PROP_FPS, 30)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
    log.info("Camera size {}x{}".format(CAMERA_WIDTH, CAMERA_HEIGHT))

    log.info("Setting device: {}".format(device))
    plugin = IEPlugin(device=device)

    # Read IR
    tl_net = IENetwork(model=tl_model_xml, weights=tl_model_bin)
    log.info("Traffic-Light network has been loaded:\n\t{}\n\t{}".format(
        tl_model_xml, tl_model_bin))

    # Open video capture for recognizing hte road
    assert cap.isOpened(), "Couldn't open Camera"
    success, frame = cap.read()
    assert success, "Can't snap image"
    """
    Verify road-segmentation start here
    """
    if args.verify_road:
        road_net = IENetwork(model=road_model_xml, weights=road_model_bin)
        log.info(
            "Road-Segmentation network has been loaded:\n\t{}\n\t{}".format(
                road_model_xml, road_model_bin))
        assert len(road_net.inputs.keys()
                   ) == 1, "Sample supports only single input topologies"
        assert len(road_net.outputs
                   ) == 1, "Sample supports only single output topologies"
        log.info("Preparing input blobs")
        input_blob = next(iter(road_net.inputs))
        out_blob = next(iter(road_net.outputs))
        road_net.batch_size = 1
        log.info("Batch size is {}".format(road_net.batch_size))

        # Read and pre-process input images
        n, c, h, w = road_net.inputs[input_blob].shape
        images = np.ndarray(shape=(n, c, h, w))

        if frame.shape[:-1] != (h, w):
            log.warning("Image {} is resized from {} to {}".format(
                "CAM", frame.shape[:-1], (h, w)))
            image = cv2.resize(frame, (w, h))

        image = image.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        images[frame_num] = image
        log.info("Snapping frame: {}".format(frame_num))
        frame_num += 1

        # Loading model to the plugin
        log.info("Loading road-segmentation model to the plugin")
        road_exec_net = plugin.load(network=road_net)

        # Start sync inference
        log.info("Starting inference ({} iterations)".format(1))
        infer_time = []
        for i in range(1):
            t0 = time()
            res = road_exec_net.infer(inputs={input_blob: images})
            infer_time.append((time() - t0) * 1000)
        log.info("Average running time of one iteration: {} ms".format(
            np.average(np.asarray(infer_time))))

        # Processing output blob
        log.info("Processing output blob")
        res = res[out_blob]
        _, _, out_h, out_w = res.shape
        t0 = time()
        for batch, data in enumerate(res):
            classes_map = np.zeros(shape=(out_h, out_w, 3), dtype=np.int)
            for i in range(out_h):
                for j in range(out_w):
                    if len(data[:, i, j]) == 1:
                        pixel_class = int(data[:, i, j])
                    else:
                        pixel_class = np.argmax(data[:, i, j])
                    classes_map[i,
                                j, :] = classes_color_map[min(pixel_class, 20)]

        # Check red color (road) percentage - for verifying road
        RED_MIN = np.array([0, 0, 128])
        RED_MAX = np.array([250, 250, 255])
        classes_map = select_region(classes_map)
        out_img = os.path.join(frames_path, "processed_image.bmp")
        cv2.imwrite(out_img, classes_map)
        log.info("Result image was saved to {}".format(out_img))

        size = classes_map.size
        dstr = cv2.inRange(classes_map, RED_MIN, RED_MAX)
        no_red = cv2.countNonZero(dstr)
        frac_red = np.divide(float(no_red), int(size))
        percent_red = int(np.multiply(frac_red, 100)) + 50  # 50 = black region

        log.info("Road-segmentation processing time is: {} sec.".format(
            (time() - t0) * 1000))
        log.info("Road detected {}% of the frame: ".format(str(percent_red)))

        if percent_red < 50:
            raise Exception(
                "Can't detect any road!! please put the car on a road")
    """
    Main function start here - detecting road and traffic light
    """
    tl_input_blob = next(iter(tl_net.inputs))
    tl_out_blob = next(iter(tl_net.outputs))

    # Loading model to the plugin
    log.info("Loading traffic-light model to the plugin")
    tl_exec_net = plugin.load(network=tl_net)

    def releaseAll():
        """
        Reset camera video, car and close all opened windows.
        This could cause when stop the program or when something went wrong.
        """
        cap.release()
        car.reset()
        cv2.destroyAllWindows()

    # Start running car and video
    try:
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        del_label = 'go'
        frame_count = 0
        stop_on_u_turn_count = 0

        # initialize car
        car = DriveBrickPi3()
        log.info("Car name is {}".format(car.name))
        # initialize road detection - start with first frame
        detector = LaneDetector(frame)

        # Start capturing...
        while cap.isOpened():
            t1 = time()

            success, orig_frame = cap.read()
            assert success, "Can't snap image"

            if args.mirror:
                orig_frame.flip(orig_frame, 1)
                log.info("Using camera mirror")

            # Update image
            detector.image = orig_frame

            # Set configurations for traffic light detection
            prepimg = cv2.resize(orig_frame, (300, 300))
            prepimg = prepimg[np.newaxis, :, :, :]
            prepimg = prepimg.transpose((0, 3, 1, 2))
            tl_outputs = tl_exec_net.infer(inputs={tl_input_blob: prepimg})
            res = tl_exec_net.requests[0].outputs[tl_out_blob]

            detecting_traffic_light = False

            # Search for all detected objects (for traffic light)
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                confidence = obj[2] * 100
                if 85 < confidence < 100:
                    detecting_traffic_light = True
                    best_proposal = obj

                    xmin = int(best_proposal[3] * initial_w)
                    ymin = int(best_proposal[4] * initial_h)
                    xmax = int(best_proposal[5] * initial_w)
                    ymax = int(best_proposal[6] * initial_h)
                    class_id = int(best_proposal[1])

                    # Make sure camera detecting only 3 classes
                    if class_id <= 2:
                        # Draw box and label\class_id
                        color = (255, 0, 0) if class_id == 1 else (50, 205, 50)
                        cv2.rectangle(orig_frame, (xmin, ymin), (xmax, ymax),
                                      color, 2)

                        det_label = classes_traffic_light_map[
                            class_id -
                            1] if classes_traffic_light_map else str(class_id)
                        label_and_prob = det_label + ", " + str(
                            confidence) + "%"
                        cv2.putText(orig_frame, label_and_prob,
                                    (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX,
                                    0.6, color, 1)
                        if str(det_label) == 'go':
                            car.status = CAR_DIRECTION.FWD
                        elif str(del_label) == 'stop':
                            car.status = CAR_DIRECTION.STOP
                        else:
                            car.status = CAR_DIRECTION.STOP

            # Image process - start looking for mid line of the road
            # note that, the following function is looking for the yellow line in the middle
            mid_lines = detector.process()

            if not args.manual_driving:
                # when car status is forward (it means that we didn't see any traffic light or the traffic
                # light is green. and of course street was recognize with yellow middle line.
                if mid_lines is not None and car.status is CAR_DIRECTION.FWD:
                    x1, y1, x2, y2 = mid_lines[0][0]
                    stop_on_u_turn_count = 0

                    cv2.line(orig_frame, (x1, y1), (x2, y2), (0, 180, 0), 5)

                    slope = (y1 - y2) / (x1 - x2) if x1 - x2 != 0 else 50
                    log.debug("slope: {}".format(str(slope)))
                    log.debug("x1 {}, x2 {}, y1 {}, y2 {}".format(
                        str(x1), str(x2), str(y1), str(y2)))
                    if slope < 0:
                        # go left
                        log.debug("detecting left -> moving left")
                        car.moveCar(CAR_DIRECTION.LEFT)
                        sleep(0.1)
                        car.moveCar(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if 0 <= slope <= 7:
                        # go right
                        log.debug("detecting right -> moving right")
                        car.moveCar(CAR_DIRECTION.RIGHT)
                        sleep(0.1)
                        car.moveCar(CAR_DIRECTION.FWD)
                        sleep(0.1)

                    if slope > 7 or slope == 'inf':
                        # go forward
                        log.debug("Moving forward")

                        # Moving x2+100px due to the camera lens is not in the middle.
                        # if your web camera with is in the middle, please remove it.
                        x2 += 100

                        # keeping car on the middle (30 = gap of the middle line)
                        if x2 > (CAMERA_WIDTH / 2) + 30:
                            log.debug("not in the middle -> move right")
                            car.moveCar(CAR_DIRECTION.RIGHT)
                            sleep(0.1)

                        if x2 <= (CAMERA_WIDTH / 2) - 30:
                            log.debug("not in the middle -> move left")
                            car.moveCar(CAR_DIRECTION.LEFT)
                            sleep(0.1)

                        car.moveCar(CAR_DIRECTION.FWD)
                else:
                    # if reaching here, there are 2 options:
                    # 1- car stopped on the red light (traffic light)
                    # 2- car stopped because it reached the end of road -> do U-Turn
                    car.moveCar(CAR_DIRECTION.STOP)

                    # wait 30 frames to make sure that car reached end of road
                    stop_on_u_turn_count += 1

                    if stop_on_u_turn_count == 20 and detecting_traffic_light is False and car.status == CAR_DIRECTION.FWD:
                        log.debug("Detecting U-Turn")
                        car.moveCar(CAR_DIRECTION.FWD)
                        sleep(2.5)
                        car.moveCar(CAR_DIRECTION.RIGHT)
                        sleep(6)
                        car.moveCar(CAR_DIRECTION.REVERSE)
                        sleep(1)
                        car.moveCar(CAR_DIRECTION.RIGHT)
                        sleep(1)
                        stop_on_u_turn_count = 0

            if args.manual_driving:
                inp = str(input())  # Take input from the terminal

                if inp == 'w':
                    car.moveCar(CAR_DIRECTION.FWD)
                elif inp == 'a':
                    car.moveCar(CAR_DIRECTION.LEFT)
                elif inp == 'd':
                    car.moveCar(CAR_DIRECTION.RIGHT)
                elif inp == 's':
                    car.moveCar(CAR_DIRECTION.REVERSE)
                elif inp == 'x':
                    car.moveCar(CAR_DIRECTION.STOP)

            # count the frames
            frame_count += 1

            if args.show_fps:
                elapsedTime = time() - t1
                fps = "(Playback) {:.1f} FPS".format(1 / elapsedTime)
                cv2.putText(orig_frame, fps, (0, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 200, 0), 1, cv2.LINE_AA)

            if args.show_frame:
                cv2.imshow("Driving Pi", orig_frame)

            if cv2.waitKey(1):
                break  # ESC to quit

        # Release everything on finish
        releaseAll()

    except KeyboardInterrupt:
        releaseAll()
"""
Entry point for an application
"""

import gallery

from camera import Camera
from lane_detector import LaneDetector

calibration_images_dir = '../camera_cal/'
test_images_dir = '../test_images/'

project_video = '../project_video.mp4'
project_video_output = '../project_video_output.mp4'

if __name__ == "__main__":
    camera = Camera.calibrate(calibration_images_dir, 9, 6)
    lane_detector = LaneDetector(camera)
    gallery.show_test_images_pipeline(camera, test_images_dir)
    lane_detector.detect_lane_lines_in_video(project_video,
                                             project_video_output)
示例#16
0
    def __init__(self,
                 classifier_codename,
                 dataset_codename,
                 classifier_threshold,
                 object_detection=True,
                 object_visualization=True,
                 lane_detection=True,
                 lane_visualization=True,
                 diagnostic_mode=True,
                 monitor_id=1,
                 window_top_offset=0,
                 window_left_offset=0,
                 window_width=None,
                 window_height=None,
                 window_scale=1.0):

        # Boolean flag for feature-customization
        self.object_detection = object_detection
        self.object_visualization = object_visualization
        self.diagnostic_mode = diagnostic_mode

        self.lane_detection = lane_detection
        self.lane_visualization = lane_visualization

        # Instance of the MSS-API for captureing screenshots
        self.window_manager = mss.mss()

        # Note:
        #   - monitor_id = 0 | grab all monitors together
        #   - monitor_id = n | grab a given monitor (n) : where n > 0
        self.target_window = self.window_manager.monitors[monitor_id]

        # Update position of the window that will be captured
        if window_left_offset:
            self.target_window['left'] += window_left_offset
            self.target_window['width'] -= window_left_offset
        if window_top_offset:
            self.target_window['top'] += window_top_offset
            self.target_window['height'] -= window_top_offset
        if window_width:
            self.target_window['width'] = window_width
        if window_height:
            self.target_window['height'] = window_height
        if window_scale:
            self.target_window['scale'] = window_scale

        print("Activating DeepEye Advanced Co-pilot Mode")

        self.object_detector = ObjectClassifier(
            classifier_codename=classifier_codename,
            dataset_codename=dataset_codename,
            classifier_threshold=classifier_threshold,
            visualization=object_visualization,
            diagnostic_mode=diagnostic_mode,
            frame_height=self.target_window['height'],
            frame_width=self.target_window['width'])

        self.lane_detector = LaneDetector(visualization=lane_visualization)

        self.threats = {
            "COLLISION": False,
            "PEDESTRIAN": False,
            "STOP_SIGN": False,
            "TRAFFIC_LIGHT": False,
            "VEHICLES": False,
            "BIKES": False,
            "FAR_LEFT": False,
            "FAR_RIGHT": False,
            "RIGHT": False,
            "LEFT": False,
            "CENTER": False,
            "UNKNOWN": True
        }

        self.frame_id = 0

        self.columns = [
            'FRAME_ID', 'PEDESTRIAN', 'VEHICLES', 'BIKES', 'STOP_SIGN',
            'TRAFFIC_LIGHT', 'OFF_LANE', 'COLLISION'
        ]

        self.data_frame = pd.DataFrame(columns=self.columns)
示例#17
0
"""Apply lane detection frame by frame to a video in real time"""

import sys  # Python standard library import
sys.path.append('../')
import cv2  # OpenCV library import
from lane_detector import LaneDetector
from object_detect.utils import FPS
from object_detect.utils import fps_label

PATH_TO_VIDEO = '../test_videos/dashcam3.mp4'

video_capture = cv2.VideoCapture(PATH_TO_VIDEO)

fps = FPS().start()
ld = LaneDetector()

while video_capture.isOpened():
    ret, frame = video_capture.read()
    if ret:

        height, width, layers = frame.shape
        half_height = int(height / 2)
        half_width = int(width / 2)

        frame = cv2.resize(frame, (half_width, half_height))

        detected_lane = ld.detect_lanes(frame)

        fps.update()
        fps.stop()
        fps_label(("FPS: {:.2f}".format(fps.fps())), detected_lane)
示例#18
0
    parser.add_argument('--w_margin',
                        type=int,
                        default=35,
                        help='Sliding window margin from center')

    args = parser.parse_args()

    img_files = []

    for ext in ('*.png', '*.jpg'):
        img_files.extend(glob(os.path.join(args.dir, ext)))

    img_processor = ImageProcessor(args.calibration_data_file)
    lane_tracker = LaneDetector(window_width=args.w_width,
                                window_height=args.w_height,
                                margin=args.w_margin,
                                smooth_frames=0)

    for img_file in tqdm(img_files, unit=' images', desc='Image processing'):

        print(img_file)
        input_img = cv2.imread(img_file)

        undistorted_img, thresholded_img, processed_img = img_processor.process_image(
            input_img)

        out_file_prefix = os.path.join(args.output,
                                       os.path.split(img_file)[1][:-4])
        cv2.imwrite(out_file_prefix + '_processed.jpg', processed_img)

        cv2.imwrite(out_file_prefix + '_undistorted.jpg', undistorted_img)
示例#19
0
    
    rospy.init_node('lane_detection')
    rospy.loginfo("Starting land_detection.py")

    if len(sys.argv) < 2:
        rospy.loginfo("Error in lane_detection")
        rospy.loginfo("args 'homography_file' 'filter_file' ")
        exit(1)

    homography_file = sys.argv[1]
    #filter_file = "/home/nesvera/catkin_ws/src/travis/travis_image_processing/src/lane_detector/data/default.travis"
    filter_file = "/home/taura/catkin_ws/src/travis/travis_image_processing/src/lane_detector/data/lane.travis"
    debug = 1

    global lane_detector
    lane_detector = LaneDetector(homography_file, filter_file, debug)

    bridge = CvBridge()

    # Publisher
    lane_status_pub = rospy.Publisher("/travis/lane_info", LaneInfo, queue_size=1)

    # Subscriber
    rospy.Subscriber("/camera/image_raw/compressed", CompressedImage, image_callback)

    if debug == 1:
        lane_detector.debug()

    else:
        rospy.spin()