コード例 #1
0
def get_horizon_points(gpg: GroundProjectionGeometry, shift: int):
    x = +1000
    y = x * 3  # enough for field of view

    p_left = gpg.ground2pixel(GroundPoint(Point(x, y, 0)))

    p_right = gpg.ground2pixel(GroundPoint(Point(x, -y, 0)))

    S = 2 ** shift
    # XXX
    return (int(p_left.x * S), int(p_left.y * S)), (int(p_right.x * S), int(p_right.y * S))
コード例 #2
0
    def cb_camera_info(self, msg: CameraInfo):
        """
        Initializes a :py:class:`image_processing.GroundProjectionGeometry` object and a
        :py:class:`image_processing.Rectify` object for image rectification

        Args:
            msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.

        """
        if not self.camera_info_received:
            self.rectifier = Rectify(msg)
            self.ground_projector = GroundProjectionGeometry(
                im_width=msg.width, im_height=msg.height, homography=np.array(self.homography).reshape((3, 3))
            )
        self.camera_info_received = True
コード例 #3
0
def find_ground_coordinates(
    gpg: GroundProjectionGeometry, sl: SegmentList, skip_not_on_ground: bool = True
) -> SegmentList:
    """
        Creates a new segment list with the ground coordinates set.

    """
    cutoff = 0.01
    sl2 = SegmentList()
    sl2.header = sl.header

    # Get ground truth of segmentList
    for s1 in sl.segments:

        g0 = gpg.vector2ground(s1.pixels_normalized[0])
        g1 = gpg.vector2ground(s1.pixels_normalized[1])
        if skip_not_on_ground:
            if g0.x < cutoff or g1.x < cutoff:
                continue

        points = [g0, g1]
        pixels_normalized = [s1.pixels_normalized[0], s1.pixels_normalized[1]]
        color = s1.color
        s2 = Segment(points=points, pixels_normalized=pixels_normalized, color=color)
        # TODO what about normal and points
        sl2.segments.append(s2)
    return sl2
コード例 #4
0
def predict_segments(sm: SegmentsMap, gpg: GroundProjectionGeometry) -> SegmentList:
    """
    Predicts what segments the robot would see.

    Assumes map is in FRAME_AXLE.
    """
    x_frustum = +0.1
    fov = np.deg2rad(150)
    res = []
    for segment in sm.segments:
        p1 = segment.points[0]
        p2 = segment.points[1]

        # If we are both in FRAME_AXLE
        if (sm.points[p1].id_frame != FRAME_AXLE) or (sm.points[p2].id_frame != FRAME_AXLE):
            msg = "Cannot deal with points not in frame FRAME_AXLE"
            raise NotImplementedError(msg)

        w1 = sm.points[p1].coords
        w2 = sm.points[p2].coords

        coords_inside = clip_to_view([w1, w2], x_frustum, fov)

        if not coords_inside:
            continue
        w1 = coords_inside[0]
        w2 = coords_inside[1]
        point1 = Point(w1[0], w1[1], w1[2])
        point2 = Point(w2[0], w2[1], w2[2])

        pixel1 = gpg.ground2pixel(GroundPoint(point1))
        pixel2 = gpg.ground2pixel(GroundPoint(point2))
        normalized1 = gpg.pixel2vector(pixel1)
        normalized2 = gpg.pixel2vector(pixel2)

        pixels_normalized = [normalized1, normalized2]
        normal = Vector2D(0, 0)  # XXX: compute normal
        points = [point1, point2]

        #         color = segment_color_constant_from_string(segment.color)
        assert segment.color in [Segment.RED, Segment.YELLOW, Segment.WHITE]
        s = Segment(pixels_normalized=pixels_normalized, normal=normal, points=points, color=segment.color)
        res.append(s)

    return SegmentList(segments=res)
コード例 #5
0
ファイル: segment.py プロジェクト: light5551/dt-core
def rectify_segment(rectify: Rectify, gpg: GroundProjectionGeometry,
                    s1: Segment) -> Segment:
    pixels_normalized = []

    for i in (0, 1):
        # normalized coordinates
        nc = s1.pixels_normalized[i]
        # get pixel coordinates
        pixels = gpg.vector2pixel(nc)
        # rectify
        pr = rectify.rectify_point(pixels)
        # recompute normalized coordinates
        # t = Pixel(pr[0], pr[1])
        v = gpg.pixel2vector(ImageSpaceResdepPoint(pr))
        pixels_normalized.append(v)

    s2 = Segment(color=s1.color, pixels_normalized=pixels_normalized)
    return s2
コード例 #6
0
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb_det,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        # self.pub_seglist_filtered = rospy.Publisher("~seglist_filtered",
        #                                             SegmentList,
        #                                             queue_size=1,
        #                                             dt_topic_type=TopicType.DEBUG)

        self.pub_segmented_img = rospy.Publisher(
            "~debug/segmented_image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.DEBUG)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.homography = self.load_extrinsics()
        homography = np.array(self.homography).reshape((3, 3))
        self.bridge = CvBridge()
        self.gpg = GroundProjectionGeometry(160, 120, homography)
        # self.gpg = GroundProjectionGeometry(320, 240, homography)
        self.initialized = True
        self.log("Initialized!")
コード例 #7
0
ファイル: image_simulation.py プロジェクト: light5551/dt-core
def simulate_image(sm_orig: SegmentsMap, pose: SE2value,
                   gpg: GroundProjectionGeometry, rectifier: Rectify,
                   blur_sigma: float) -> SimulationData:
    H, W = gpg.get_shape()
    #     H_pad = int(0.3*H)
    #     W_pad = int(0.3*W)
    #     H_padded = H + H_pad
    #     W_padded = W + W_pad
    blank = np.zeros(dtype="uint8", shape=(H, W, 3))
    blank.fill(128)

    tinfo = TransformationsInfo()

    frames = list(set(_.id_frame for _ in list(sm_orig.points.values())))
    id_frame = frames[0]
    dtu.logger.debug(f"frames: {frames} choose {id_frame}")

    tinfo.add_transformation(frame1=id_frame, frame2=FRAME_AXLE, g=pose)

    sm_axle = tinfo.transform_map_to_frame(sm_orig, FRAME_AXLE)

    rectified_synthetic = plot_map(blank, sm_axle, gpg, do_segments=False)
    rectified_segments = plot_map(blank,
                                  sm_axle,
                                  gpg,
                                  do_segments=True,
                                  do_ground=True,
                                  do_faces=False,
                                  do_horizon=False)

    distorted_synthetic = rectifier.distort(rectified_synthetic)

    distorted_synthetic = add_noise(distorted_synthetic)
    #     distorted_synthetic = cv2.medianBlur(distorted_synthetic, 3)
    distorted_synthetic = cv2.GaussianBlur(distorted_synthetic, (0, 0),
                                           blur_sigma)

    return SimulationData(
        distorted_synthetic_bgr=distorted_synthetic,
        rectified_synthetic_bgr=rectified_synthetic,
        rectified_segments_bgr=rectified_segments,
    )
コード例 #8
0
    def go(self):
        robot_name = dtu.get_current_robot_name()

        output = self.options.output
        if output is None:
            output = 'out-calibrate-extrinsics'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.input is None:

            print("{}\nCalibrating using the ROS image stream...\n".format(
                "*" * 20))
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', robot_name,
                                      'camera_node/image/compressed')
            print('Topic to listen to is: %s' % topic_name)

            print('Let\'s wait for an image. Say cheese!')

            # Dummy for getting a ROS message
            rospy.init_node('calibrate_extrinsics')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\n'
                    'Didn\'t get any message!: %s\n '
                    'MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!'
                    '\n\n\n' % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            print('Picture taken: %s ' % str(bgr.shape))

        else:
            print('Loading input image %s' % self.options.input)
            bgr = dtu.bgr_from_jpg_fn(self.options.input)

        if bgr.shape[1] != 640:
            interpolation = cv2.INTER_CUBIC
            bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation)
            print('Resized to: %s ' % str(bgr.shape))
        # Disable the old calibration file
        print("Disableing old homography")
        disable_old_homography(robot_name)
        print("Obtaining camera info")
        try:
            camera_info = get_camera_info_for_robot(robot_name)
        except Exception as E:
            print("Error on obtaining camera info!")
            print(E)
        print("Get default homography")
        try:
            homography_dummy = get_homography_default()
        except Exception as E:
            print("Error on getting homography")
            print(E)
        print("Rectify image")
        try:
            rect = Rectify(camera_info)
        except Exception as E:
            print("Error rectifying image!")
            print(E)
        print("Calculate GPG")
        try:
            gpg = GroundProjectionGeometry(camera_info.width,
                                           camera_info.height,
                                           homography_dummy.reshape((3, 3)))
        except Exception as E:
            print("Error calculating GPG!")
            print(E)
        print("Ordered Dict")
        res = OrderedDict()
        try:
            bgr_rectified = rect.rectify(bgr, interpolation=cv2.INTER_CUBIC)

            res['bgr'] = bgr
            res['bgr_rectified'] = bgr_rectified

            _new_matrix, res['rectified_full_ratio_auto'] = rect.rectify_full(
                bgr, ratio=1.65)

            (result_gpg, status) = gpg.estimate_homography(bgr_rectified)

            if status is not None:
                raise Exception(status)

            save_homography(result_gpg.H, robot_name)
            msg = '''

To check that this worked well, place the robot on the road, and run:

    rosrun complete_image_pipeline single_image

Look at the produced jpgs.

'''
            print(msg)
        except Exception as E:
            print(E)
        finally:
            dtu.write_bgr_images_as_jpgs(res, output)
コード例 #9
0
class ObjectDetectionNode(DTROS):
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.sub_camera_info = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/camera_node/camera_info",
            CameraInfo,
            self.cb_camera_info,
            queue_size=1)

        self.sub_lane_reading = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/lane_filter_node/lane_pose",
            LanePose,
            self.cbLanePoses,
            queue_size=1)

        self.initialized = False
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        self.ground_projector = None
        self.rectifier = None
        self.homography = self.load_extrinsics()
        self.camera_info_received = False
        self.log(str(self.homography))
        self.lane_width = rospy.get_param('~lanewidth', None)
        self.safe_distance = rospy.get_param('~safe_distance', None)

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.initialized = True
        self.image_count = 0
        self.obstacle_left_lane = False
        self.obstacle_right_lane = False
        self.log("Initialized!")

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def cb_camera_info(self, msg):
        """
        Initializes a :py:class:`image_processing.GroundProjectionGeometry` object and a
        :py:class:`image_processing.Rectify` object for image rectification

        Args:
            msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.

        """
        if not self.camera_info_received:
            self.rectifier = Rectify(msg)
            self.ground_projector = GroundProjectionGeometry(
                im_width=msg.width,
                im_height=msg.height,
                homography=np.array(self.homography).reshape((3, 3)))
            self.im_width = msg.width
            self.im_height = msg.height

        self.camera_info_received = True

    def cbLanePoses(self, input_pose_msg):
        """Callback receiving pose messages
        Computes v and omega using PPController
        Args:
            input_pose_msg (:obj:`LanePose`): Message containing information about the current lane pose.
        """
        self.pose_msg = input_pose_msg

    def image_cb(self, image_msg):
        if not self.initialized:
            return

        # TODO to get better hz, you might want to only call your wrapper's predict function only once ever 4-5 images?
        # This way, you're not calling the model again for two practically identical images. Experiment to find a good number of skipped
        # images.

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        image = cv2.resize(image, (224, 224))
        if self.image_count != 0:
            self.image_count = np.mod(self.image_count + 1, 3)
        else:
            bboxes, classes, scores = self.model_wrapper.predict(image)
            im_boxed = self.plotWithBoundingBoxes(image, bboxes[0], classes[0],
                                                  scores[0])
            cv2.imshow('detected objects', im_boxed)
            cv2.waitKey(1)
            self.det2bool(
                bboxes[0], classes[0]
            )  # [0] because our batch size given to the wrapper is 1

        msg = BoolStamped()
        msg.header = image_msg.header
        if self.obstacle_right_lane:
            msg.data = True
            if self.obstacle_left_lane:
                pass
            else:
                ## OVERTAKING
                # msg.data = overtake
                pass

        self.pub_obj_dets.publish(msg)

    def det2bool(self, bboxes, classes):
        # TODO remove these debugging prints
        # print(bboxes)
        # print(classes)

        # This is a dummy solution, remove this next line
        # return len(bboxes) > 1

        # TODO filter the predictions: the environment here is a bit different versus the data collection environment, and your model might output a bit
        # of noise. For example, you might see a bunch of predictions with x1=223.4 and x2=224, which makes
        # no sense. You should remove these.

        # TODO also filter detections which are outside of the road, or too far away from the bot. Only return True when there's a pedestrian (aka a duckie)
        # in front of the bot, which you know the bot will have to avoid. A good heuristic would be "if centroid of bounding box is in the center of the image,
        # assume duckie is in the road" and "if bouding box's area is more than X pixels, assume duckie is close to us"

        self.obstacle_right_lane = False
        self.obstacle_left_lane = False
        obj_det_list = []
        for i in range(len(bboxes)):
            x1, y1, x2, y2 = bboxes[i]
            label = classes[i]
            if label == 1:
                if (x2 - x1 >= 2) and (y2 - y1 >= 2):
                    low_center = Point((x1 + x2) / 2, y2)
                    rect_pixel = self.rectifier.rectify_point(low_center)
                    ground_point = self.ground_projector.pixel2ground(
                        rect_pixel)
                    duckie_lane_pose = np.cos(self.pose_msg.phi) * (
                        ground_point.y + self.pose_msg.d) + np.sin(
                            self.pose_msg.phi) * ground_point.x
                    dist = np.sqrt(ground_point.x**2 + ground_point.y**2)
                    if np.abs(duckie_lane_pose
                              ) <= self.lane_width / 2:  #in our lane
                        if dist <= self.safe_distance:
                            self.obstacle_right_lane = True
                    elif np.abs(duckie_lane_pose
                                ) > self.lane_width / 2:  #in left lane
                        if dist <= self.safe_distance * 1.5:
                            self.obstacle_left_lane = True
            # TODO if label isn't a duckie, skip
            # TODO if detection is a pedestrian in front of us:
            #   return True

    def plotWithBoundingBoxes(self, seg_im, boxes, labels, scores):
        for i in range(len(labels)):
            cv2.rectangle(seg_im, (boxes[i][0], boxes[i][1]),
                          (boxes[i][2], boxes[i][3]), (255, 255, 255), 1)
            cv2.rectangle(seg_im, (boxes[i][0], boxes[i][1]),
                          (boxes[i][0] + 20, boxes[i][1] - 6), (255, 255, 255),
                          cv.FILLED)
            cv2.putText(seg_im, f"{labels[i]} : {scores[i]}",
                        (boxes[i][0], boxes[i][1]), cv.FONT_HERSHEY_COMPLEX,
                        0.5, (0, 0, 0), 1)
        return seg_im

    def load_extrinsics(self):
        """
        Loads the homography matrix from the extrinsic calibration file.

        Returns:
            :obj:`numpy array`: the loaded homography matrix

        """
        # load intrinsic calibration
        cali_file_folder = '/data/config/calibrations/camera_extrinsic/'
        cali_file = cali_file_folder + rospy.get_namespace().strip(
            "/") + ".yaml"

        # Locate calibration yaml file or use the default otherwise
        if not os.path.isfile(cali_file):
            self.log(
                "Can't find calibration file: %s.\n Using default calibration instead."
                % cali_file, 'warn')
            cali_file = (cali_file_folder + "default.yaml")

        # Shutdown if no calibration file not found
        if not os.path.isfile(cali_file):
            msg = 'Found no calibration file ... aborting'
            self.log(msg, 'err')
            rospy.signal_shutdown(msg)

        try:
            with open(cali_file, 'r') as stream:
                calib_data = yaml.load(stream)
        except yaml.YAMLError:
            msg = 'Error in parsing calibration file %s ... aborting' % cali_file
            self.log(msg, 'err')
            rospy.signal_shutdown(msg)

        return calib_data['homography']
コード例 #10
0
    def go(self):
        robot_name = dtu.get_current_robot_name()

        # noinspection PyUnresolvedReferences
        output = self.options.output
        # noinspection PyUnresolvedReferences
        the_input = self.options.input
        if output is None:
            output = "out/calibrate-extrinsics"  # + dtu.get_md5(self.options.image)[:6]
            self.info(f"No --output given, using {output}")

        if the_input is None:

            self.info(
                ("{}\nCalibrating using the ROS image stream...\n".format("*" *
                                                                          20)))
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join("/", robot_name,
                                      "camera_node/image/compressed")
            logger.info(f"Topic to listen to is: {topic_name}")

            self.info("Let's wait for an image. Say cheese!")

            # Dummy for getting a ROS message
            rospy.init_node("calibrate_extrinsics")

            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                self.info("Image captured")
            except rospy.ROSException as e:
                print((
                    "\n\n\n"
                    f"Didn't get any message: {e}\n "
                    "MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER."
                ))
                raise
            img_msg = cast(CompressedImage, img_msg)
            bgr = dtu.bgr_from_rgb(dru.rgb_from_ros(img_msg))
            self.info(f"Picture taken: {str(bgr.shape)} ")

        else:
            self.info(f"Loading input image {the_input}")
            bgr = dtu.bgr_from_jpg_fn(the_input)

        if bgr.shape[1] != 640:
            interpolation = cv2.INTER_CUBIC
            bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation)
            self.info(f"Resized to: {str(bgr.shape)} ")
        # Disable the old calibration file
        self.info("Disableing old homography")
        disable_old_homography(robot_name)
        self.info("Obtaining camera info")
        try:
            camera_info = get_camera_info_for_robot(robot_name)
        except Exception as e:
            msg = "Error on obtaining camera info."
            raise Exception(msg) from e

        self.info("Get default homography")
        try:
            homography_dummy = get_homography_default()
        except Exception as e:
            msg = "Error on getting homography."
            raise Exception(msg) from e

        self.info("Rectify image")
        try:
            rect = Rectify(camera_info)
        except Exception as e:
            msg = "Error rectifying image."
            raise Exception(msg) from e

        self.info("Calculate GPG")
        try:
            gpg = GroundProjectionGeometry(camera_info.width,
                                           camera_info.height,
                                           homography_dummy.reshape((3, 3)))
        except Exception as e:
            msg = "Error calculating GroundProjectionGeometry."
            raise Exception(msg) from e
        self.info("Ordered Dict")
        res = {}
        try:
            bgr_rectified = rect.rectify(bgr, interpolation=cv2.INTER_CUBIC)

            res["bgr"] = bgr
            res["bgr_rectified"] = bgr_rectified

            _new_matrix, res["rectified_full_ratio_auto"] = rect.rectify_full(
                bgr, ratio=1.65)

            (result_gpg, status) = gpg.estimate_homography(bgr_rectified)

            if status is not None:
                raise Exception(status)

            save_homography(result_gpg.H, robot_name)
            msg = """

To check that this worked well, place the robot on the road, and run:

    rosrun complete_image_pipeline single_image

Look at the produced jpgs.

"""
            self.info(msg)
        except Exception as E:
            self.error(E)
        finally:
            dtu.write_bgr_images_as_jpgs(res, output)
コード例 #11
0
class GroundProjectionNode(DTROS):
    """
    This node projects the line segments detected in the image to the ground plane and in the robot's
    reference frame.
    In this way it enables lane localization in the 2D ground plane. This projection is performed using the
    homography
    matrix obtained from the extrinsic calibration procedure.

    Args:
        node_name (:obj:`str`): a unique, descriptive name for the node that ROS will use

    Subscribers:
        ~camera_info (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera. Needed for
        rectifying the segments.
        ~lineseglist_in (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in pixel space from
        unrectified images

    Publishers:
        ~lineseglist_out (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in the ground plane
        relative to the robot origin
        ~debug/ground_projection_image/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug image
        that shows the robot relative to the projected segments. Useful to check if the extrinsic
        calibration is accurate.
    """

    bridge: CvBridge
    ground_projector: Optional[GroundProjectionGeometry]
    rectifier: Optional[Rectify]

    def __init__(self, node_name: str):
        # Initialize the DTROS parent class
        super(GroundProjectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        self.bridge = CvBridge()
        self.ground_projector = None
        self.rectifier = None
        self.homography = self.load_extrinsics()
        self.first_processing_done = False
        self.camera_info_received = False

        # subscribers
        self.sub_camera_info = rospy.Subscriber("~camera_info",
                                                CameraInfo,
                                                self.cb_camera_info,
                                                queue_size=1)
        self.sub_lineseglist_ = rospy.Subscriber("~lineseglist_in",
                                                 SegmentList,
                                                 self.lineseglist_cb,
                                                 queue_size=1)

        # publishers
        self.pub_lineseglist = rospy.Publisher(
            "~lineseglist_out",
            SegmentList,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION)
        self.pub_debug_img = rospy.Publisher(
            "~debug/ground_projection_image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.DEBUG,
        )

        self.bridge = CvBridge()

        self.debug_img_bg = None

        # Seems to be never used:
        # self.service_homog_ = rospy.Service("~estimate_homography", EstimateHomography,
        # self.estimate_homography_cb)
        # self.service_gnd_coord_ = rospy.Service("~get_ground_coordinate", GetGroundCoord,
        # self.get_ground_coordinate_cb)
        # self.service_img_coord_ = rospy.Service("~get_image_coordinate", GetImageCoord,
        # self.get_image_coordinate_cb)

    def cb_camera_info(self, msg: CameraInfo):
        """
        Initializes a :py:class:`image_processing.GroundProjectionGeometry` object and a
        :py:class:`image_processing.Rectify` object for image rectification

        Args:
            msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.

        """
        if not self.camera_info_received:
            self.rectifier = Rectify(msg)
            self.ground_projector = GroundProjectionGeometry(
                im_width=msg.width,
                im_height=msg.height,
                homography=np.array(self.homography).reshape((3, 3)))
        self.camera_info_received = True

    def pixel_msg_to_ground_msg(self, point_msg) -> PointMsg:
        """
        Creates a :py:class:`ground_projection.Point` object from a normalized point message from an
        unrectified
        image. It converts it to pixel coordinates and rectifies it. Then projects it to the ground plane and
        converts it to a ROS Point message.

        Args:
            point_msg (:obj:`geometry_msgs.msg.Point`): Normalized point coordinates from an unrectified
            image.

        Returns:
            :obj:`geometry_msgs.msg.Point`: Point coordinates in the ground reference frame.

        """
        # normalized coordinates to pixel:
        norm_pt = Point.from_message(point_msg)
        pixel = self.ground_projector.vector2pixel(norm_pt)
        # rectify
        rect = self.rectifier.rectify_point(pixel)
        # convert to Point
        rect_pt = Point.from_message(rect)
        # project on ground
        ground_pt = self.ground_projector.pixel2ground(rect_pt)
        # point to message
        ground_pt_msg = PointMsg()
        ground_pt_msg.x = ground_pt.x
        ground_pt_msg.y = ground_pt.y
        ground_pt_msg.z = ground_pt.z

        return ground_pt_msg

    def lineseglist_cb(self, seglist_msg):
        """
        Projects a list of line segments on the ground reference frame point by point by
        calling :py:meth:`pixel_msg_to_ground_msg`. Then publishes the projected list of segments.

        Args:
            seglist_msg (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in pixel space from
            unrectified images

        """
        if self.camera_info_received:
            seglist_out = SegmentList()
            seglist_out.header = seglist_msg.header
            for received_segment in seglist_msg.segments:
                new_segment = Segment()
                new_segment.points[0] = self.pixel_msg_to_ground_msg(
                    received_segment.pixels_normalized[0])
                new_segment.points[1] = self.pixel_msg_to_ground_msg(
                    received_segment.pixels_normalized[1])
                new_segment.color = received_segment.color
                # TODO what about normal and points
                seglist_out.segments.append(new_segment)
            self.pub_lineseglist.publish(seglist_out)

            if not self.first_processing_done:
                self.log("First projected segments published.")
                self.first_processing_done = True

            if self.pub_debug_img.get_num_connections() > 0:
                debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(
                    self.debug_image(seglist_out))
                debug_image_msg.header = seglist_out.header
                self.pub_debug_img.publish(debug_image_msg)
        else:
            self.log("Waiting for a CameraInfo message", "warn")

    # def get_ground_coordinate_cb(self, req):
    #     return GetGroundCoordResponse(self.pixel_msg_to_ground_msg(req.uv))
    #
    # def get_image_coordinate_cb(self, req):
    #     return GetImageCoordResponse(self.gpg.ground2pixel(req.gp))
    #
    # def estimate_homography_cb(self, req):
    #     rospy.loginfo("Estimating homography")
    #     rospy.loginfo("Waiting for raw image")
    #     img_msg = rospy.wait_for_message("/" + self.robot_name + "/camera_node/image/raw", Image)
    #     rospy.loginfo("Got raw image")
    #     try:
    #         cv_image = self.bridge.imgmsg_to_cv2(img_msg, desired_encoding="bgr8")
    #     except CvBridgeError as e:
    #         rospy.logerr(e)
    #     self.gp.estimate_homography(cv_image)
    #     rospy.loginfo("wrote homography")
    #     return EstimateHomographyResponse()

    def load_extrinsics(self):
        """
        Loads the homography matrix from the extrinsic calibration file.

        Returns:
            :obj:`numpy array`: the loaded homography matrix

        """
        # load intrinsic calibration
        cali_file_folder = "/data/config/calibrations/camera_extrinsic/"
        cali_file = cali_file_folder + rospy.get_namespace().strip(
            "/") + ".yaml"

        # Locate calibration yaml file or use the default otherwise
        if not os.path.isfile(cali_file):
            self.log(
                f"Can't find calibration file: {cali_file}.\n Using default calibration instead.",
                "warn")
            cali_file = os.path.join(cali_file_folder, "default.yaml")

        # Shutdown if no calibration file not found
        if not os.path.isfile(cali_file):
            msg = "Found no calibration file ... aborting"
            self.logerr(msg)
            rospy.signal_shutdown(msg)

        try:
            with open(cali_file, "r") as stream:
                calib_data = yaml.load(stream, Loader=yaml.Loader)
        except yaml.YAMLError:
            msg = f"Error in parsing calibration file {cali_file} ... aborting"
            self.logerr(msg)
            rospy.signal_shutdown(msg)

        return calib_data["homography"]

    def debug_image(self, seg_list):
        """
        Generates a debug image with all the projected segments plotted with respect to the robot's origin.

        Args:
            seg_list (:obj:`duckietown_msgs.msg.SegmentList`): Line segments in the ground plane relative
            to the robot origin

        Returns:
            :obj:`numpy array`: an OpenCV image

        """
        # dimensions of the image are 1m x 1m so, 1px = 2.5mm
        # the origin is at x=200 and y=300

        # if that's the first call, generate the background
        if self.debug_img_bg is None:

            # initialize gray image
            self.debug_img_bg = np.ones((400, 400, 3), np.uint8) * 128

            # draw vertical lines of the grid
            for vline in np.arange(40, 361, 40):
                cv2.line(self.debug_img_bg,
                         pt1=(vline, 20),
                         pt2=(vline, 300),
                         color=(255, 255, 0),
                         thickness=1)

            # draw the coordinates
            cv2.putText(
                self.debug_img_bg,
                "-20cm",
                (120 - 25, 300 + 15),
                cv2.FONT_HERSHEY_PLAIN,
                0.8,
                (255, 255, 0),
                1,
            )
            cv2.putText(
                self.debug_img_bg,
                "  0cm",
                (200 - 25, 300 + 15),
                cv2.FONT_HERSHEY_PLAIN,
                0.8,
                (255, 255, 0),
                1,
            )
            cv2.putText(
                self.debug_img_bg,
                "+20cm",
                (280 - 25, 300 + 15),
                cv2.FONT_HERSHEY_PLAIN,
                0.8,
                (255, 255, 0),
                1,
            )

            # draw horizontal lines of the grid
            for hline in np.arange(20, 301, 40):
                cv2.line(self.debug_img_bg,
                         pt1=(40, hline),
                         pt2=(360, hline),
                         color=(255, 255, 0),
                         thickness=1)

            # draw the coordinates
            cv2.putText(self.debug_img_bg, "20cm", (2, 220 + 3),
                        cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)
            cv2.putText(self.debug_img_bg, " 0cm", (2, 300 + 3),
                        cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 0), 1)

            # draw robot marker at the center
            cv2.line(
                self.debug_img_bg,
                pt1=(200 + 0, 300 - 20),
                pt2=(200 + 0, 300 + 0),
                color=(255, 0, 0),
                thickness=1,
            )

            cv2.line(
                self.debug_img_bg,
                pt1=(200 + 20, 300 - 20),
                pt2=(200 + 0, 300 + 0),
                color=(255, 0, 0),
                thickness=1,
            )

            cv2.line(
                self.debug_img_bg,
                pt1=(200 - 20, 300 - 20),
                pt2=(200 + 0, 300 + 0),
                color=(255, 0, 0),
                thickness=1,
            )

        # map segment color variables to BGR colors
        color_map = {
            Segment.WHITE: (255, 255, 255),
            Segment.RED: (0, 0, 255),
            Segment.YELLOW: (0, 255, 255)
        }

        image = self.debug_img_bg.copy()

        # plot every segment if both ends are in the scope of the image (within 50cm from the origin)
        for segment in seg_list.segments:
            if not np.any(
                    np.abs([
                        segment.points[0].x, segment.points[0].y,
                        segment.points[1].x, segment.points[1].y
                    ]) > 0.50):
                cv2.line(
                    image,
                    pt1=(int(segment.points[0].y * -400) + 200,
                         int(segment.points[0].x * -400) + 300),
                    pt2=(int(segment.points[1].y * -400) + 200,
                         int(segment.points[1].x * -400) + 300),
                    color=color_map.get(segment.color, (0, 0, 0)),
                    thickness=1,
                )

        return image
コード例 #12
0
ファイル: pipeline.py プロジェクト: marschla/dt-core
def run_pipeline(image, all_details=False, ground_truth=None, actual_map=None):
    """
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:

            res['input_image']

        ground_truth = pose
    """

    print('backend: %s' % matplotlib.get_backend())
    print('fname: %s' % matplotlib.matplotlib_fname())

    quick = False

    dtu.check_isinstance(image, np.ndarray)

    res = OrderedDict()
    stats = OrderedDict()

    res['Raw input image'] = image
    gpg = GroundProjectionGeometry()
    rect = Rectify()
    line_detector = LineDetector()
    lane_filter = LaneFilterHistogram()
    ai = AntiInstagram()

    pts = ProcessingTimingStats()
    pts.reset()
    pts.received_message()
    pts.decided_to_process()

    with pts.phase('calculate AI transform'):
        [lower, upper] = ai.calculate_color_balance_thresholds(image)

    with pts.phase('apply AI transform'):
        transformed = ai.apply_color_balance(lower, upper, image)

    with pts.phase('edge detection'):
        # note: do not apply transform twice!
        segment_list2 = image_prep.process(pts,
                                           image,
                                           line_detector,
                                           transform=ai.apply_color_balance)

        if all_details:

            res['resized and corrected'] = image_prep.image_corrected

    dtu.logger.debug('segment_list2: %s' % len(segment_list2.segments))

    if all_details:
        res['segments_on_image_input_transformed'] = \
            vs_fancy_display(image_prep.image_cv, segment_list2)

    if all_details:
        res['segments_on_image_input_transformed_resized'] = \
            vs_fancy_display(image_prep.image_resized, segment_list2)

    if all_details:
        grid = get_grid(image.shape[:2])
        res['grid'] = grid
        res['grid_remapped'] = gpg.rectify(grid)

#     res['difference between the two'] = res['image_input']*0.5 + res['image_input_rect']*0.5

    with pts.phase('rectify_segments'):
        segment_list2_rect = rectify_segments(gpg, segment_list2)

    # Project to ground
    with pts.phase('find_ground_coordinates'):
        sg = find_ground_coordinates(gpg, segment_list2_rect)

    lane_filter.initialize()
    if all_details:
        res['prior'] = lane_filter.get_plot_phi_d()

    with pts.phase('lane filter update'):
        print(type(lane_filter).__name__)
        if type(lane_filter).__name__ == 'LaneFilterHistogram':
            # XXX merging pain
            _likelihood = lane_filter.update(sg.segments)
        else:
            _likelihood = lane_filter.update(sg)

    if not quick:
        with pts.phase('lane filter plot'):
            res['likelihood'] = lane_filter.get_plot_phi_d(
                ground_truth=ground_truth)
    easy_algo_db = get_easy_algo_db()

    if isinstance(lane_filter, LaneFilterMoreGeneric):
        template_name = lane_filter.localization_template
    else:
        template_name = 'DT17_template_straight_straight'
        dtu.logger.debug('Using default template %r for visualization' %
                         template_name)

    localization_template = \
        easy_algo_db.create_instance(FAMILY_LOC_TEMPLATES, template_name)

    with pts.phase('lane filter get_estimate()'):
        est = lane_filter.get_estimate()

    # Coordinates in TILE frame
    g = localization_template.pose_from_coords(est)
    tinfo = TransformationsInfo()
    tinfo.add_transformation(frame1=FRAME_GLOBAL, frame2=FRAME_AXLE, g=g)

    if all_details:
        if actual_map is not None:
            #         sm_axle = tinfo.transform_map_to_frame(actual_map, FRAME_AXLE)
            res['real'] = plot_map_and_segments(actual_map,
                                                tinfo,
                                                sg.segments,
                                                dpi=120,
                                                ground_truth=ground_truth)

    with pts.phase('rectify'):
        rectified0 = gpg.rectify(image)

    rectified = ai.apply_color_balance(rectified0)

    if all_details:
        res['image_input_rect'] = rectified

    res['segments rectified on image rectified'] = \
        vs_fancy_display(rectified, segment_list2_rect)

    assumed = localization_template.get_map()

    if not quick:
        with pts.phase('plot_map_and_segments'):
            res['model assumed for localization'] = plot_map_and_segments(
                assumed,
                tinfo,
                sg.segments,
                dpi=120,
                ground_truth=ground_truth)

    assumed_axle = tinfo.transform_map_to_frame(assumed, FRAME_AXLE)

    with pts.phase('plot_map reprojected'):
        res['map reprojected on image'] = plot_map(rectified,
                                                   assumed_axle,
                                                   gpg,
                                                   do_ground=False,
                                                   do_horizon=True,
                                                   do_faces=False,
                                                   do_faces_outline=True,
                                                   do_segments=False)

    with pts.phase('quality computation'):
        predicted_segment_list_rectified = predict_segments(sm=assumed_axle,
                                                            gpg=gpg)
        quality_res, quality_stats = judge_quality(
            image, segment_list2_rect, predicted_segment_list_rectified)
        res.update(quality_res)


#     res['blurred']= cv2.medianBlur(image, 11)
    stats['estimate'] = est
    stats.update(quality_stats)

    dtu.logger.info(pts.get_stats())
    return res, stats
コード例 #13
0
def plot_map(
    base0: dtu.NPImageBGR,
    sm: SegmentsMap,
    gpg: GroundProjectionGeometry,
    do_ground: bool = True,
    do_faces: bool = True,
    do_faces_outline: bool = False,
    do_segments: bool = True,
    do_horizon: bool = True,
) -> dtu.NPImageBGR:
    """
    base: already rectified image

    sm= SegmentsMap in frame FRAME_AXLE
    """
    image = base0.copy()
    x_frustum = +0.1
    fov = np.deg2rad(150)

    if do_ground:
        color_ground = (30, 10, 22)

        color_sky = (244, 134, 66)
        plot_ground_sky(image, gpg, color_ground, color_sky)

    if do_horizon:
        color_horizon = (255, 140, 80)
        plot_horizon(image, gpg, color_horizon)

    if do_faces or do_faces_outline:
        for face in sm.faces:
            # list of arrays with cut stuff
            coords = [sm.points[_].coords for _ in face.points]

            color = dtu.bgr_color_from_string(face.color)
            # don't do outline for black
            if face.color == "black":
                do_outline = False
            else:
                do_outline = do_faces_outline
            paint_polygon_world(
                image, coords, gpg, color, x_frustum, fov, do_faces_outline=do_outline, do_faces_fill=do_faces
            )

    if do_segments:
        for segment in sm.segments:
            p1 = segment.points[0]
            p2 = segment.points[1]

            # If we are both in FRAME_AXLE
            if (sm.points[p1].id_frame != FRAME_AXLE) or (sm.points[p2].id_frame != FRAME_AXLE):
                msg = "Cannot deal with points not in frame FRAME_AXLE"
                raise NotImplementedError(msg)

            w1 = sm.points[p1].coords
            w2 = sm.points[p2].coords

            coords_inside = clip_to_view([w1, w2], x_frustum, fov)

            if not coords_inside:
                continue

            #             print('coords_inside: %s' % coords_inside)
            w1 = coords_inside[0]
            w2 = coords_inside[1]
            # XXX: more generated
            uv1 = gpg.ground2pixel(GroundPoint(Point(w1[0], w1[1], w1[2])))
            uv2 = gpg.ground2pixel(GroundPoint(Point(w2[0], w2[1], w2[2])))
            uv1 = gpg.vector2pixel(uv1)  # FIXME
            uv2 = gpg.vector2pixel(uv2)  # FIXME
            shift = 8
            S = 2 ** shift
            width = 2
            paint = (255, 120, 120)
            #         paint = BGR_WHITE
            ti = lambda a, b: (int(np.round(a * S)), int(np.round(b * S)))

            p1 = ti(uv1.x, uv1.y)  # XXX uv?
            p2 = ti(uv2.x, uv2.y)  # XXX uv?
            cv2.line(image, p1, p2, paint, width, shift=shift, lineType=AA)

    return image