Ejemplo n.º 1
0
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.sub_camera_info = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/camera_node/camera_info",
            CameraInfo,
            self.cb_camera_info,
            queue_size=1)

        self.sub_lane_reading = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/lane_filter_node/lane_pose",
            LanePose,
            self.cbLanePoses,
            queue_size=1)

        self.initialized = False
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        self.ground_projector = None
        self.rectifier = None
        self.homography = self.load_extrinsics()
        self.camera_info_received = False
        self.log(str(self.homography))
        self.lane_width = rospy.get_param('~lanewidth', None)
        self.safe_distance = rospy.get_param('~safe_distance', None)

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.initialized = True
        self.image_count = 0
        self.obstacle_left_lane = False
        self.obstacle_right_lane = False
        self.log("Initialized!")
Ejemplo n.º 2
0
    def __init__(self, node_name):

        super(AntiInstagramNode, self).__init__(node_name=node_name, node_type=NodeType.PERCEPTION)

        # Read parameters

        self._interval = rospy.get_param("~interval")
        self._color_balance_percentage = rospy.get_param("~color_balance_scale")
        self._output_scale = rospy.get_param("~output_scale")

        # Construct publisher
        self.pub = rospy.Publisher(
            "~thresholds", AntiInstagramThresholds, queue_size=1, dt_topic_type=TopicType.PERCEPTION
        )

        # Construct subscriber
        self.uncorrected_image_subscriber = rospy.Subscriber(
            "~uncorrected_image/compressed",
            CompressedImage,
            self.store_image_msg,
            buff_size=10000000,
            queue_size=1,
        )

        # Initialize Timer
        rospy.Timer(rospy.Duration(self._interval), self.calculate_new_parameters)

        # Initialize objects and data
        self.ai = AntiInstagram()
        self.bridge = CvBridge()
        self.image_msg = None
        self.mutex = Lock()

        # ---
        self.log("Initialized.")
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode, self).__init__(
            node_name=node_name,
            node_type=NodeType.PERCEPTION
        )
        self.initialized = False
        self.log("Initializing!")


        # Construct publishers
        self.pub_obj_dets = rospy.Publisher(
            "~duckie_detected",
            BoolStamped,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION
        )

        self. pub_detections_image = rospy.Publisher(
            "~object_detections_img", Image, queue_size=1, dt_topic_type=TopicType.DEBUG
        )

        # Construct subscribers
        self.sub_image = rospy.Subscriber(
            "~image/compressed",
            CompressedImage,
            self.image_cb,
            buff_size=10000000,
            queue_size=1
        )
        
        self.sub_thresholds = rospy.Subscriber(
            "~thresholds",
            AntiInstagramThresholds,
            self.thresholds_cb,
            queue_size=1
        )
        
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds=dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file','.')
        self.veh = rospy.get_namespace().strip("/")
        aido_eval = rospy.get_param("~AIDO_eval", False)
        self.log(f"AIDO EVAL VAR: {aido_eval}")
        self.log("Starting model loading!")
        self._debug = rospy.get_param("~debug", False)
        self.model_wrapper = Wrapper(aido_eval)
        self.log("Finished model loading!")
        self.frame_id = 0
        self.initialized = True
        self.log("Initialized!")
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb_det,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        # self.pub_seglist_filtered = rospy.Publisher("~seglist_filtered",
        #                                             SegmentList,
        #                                             queue_size=1,
        #                                             dt_topic_type=TopicType.DEBUG)

        self.pub_segmented_img = rospy.Publisher(
            "~debug/segmented_image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.DEBUG)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.homography = self.load_extrinsics()
        homography = np.array(self.homography).reshape((3, 3))
        self.bridge = CvBridge()
        self.gpg = GroundProjectionGeometry(160, 120, homography)
        # self.gpg = GroundProjectionGeometry(320, 240, homography)
        self.initialized = True
        self.log("Initialized!")
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)
        self.initialized = False

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.frame_id = 0
        self.initialized = True
        self.log("Initialized!")
Ejemplo n.º 6
0
class ObjectDetectionNode(DTROS):
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.initialized = True
        self.log("Initialized!")

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb(self, image_msg):
        if not self.initialized:
            return

        # TODO to get better hz, you might want to only call your wrapper's predict function only once ever 4-5 images?
        # This way, you're not calling the model again for two practically identical images. Experiment to find a good number of skipped
        # images.

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        image = cv2.resize(image, (224, 224))
        bboxes, classes, scores = self.model_wrapper.predict(image)

        msg = BoolStamped()
        msg.header = image_msg.header
        msg.data = self.det2bool(
            bboxes[0],
            classes[0])  # [0] because our batch size given to the wrapper is 1

        self.pub_obj_dets.publish(msg)

    def det2bool(self, bboxes, classes):
        # TODO remove these debugging prints
        print(bboxes)
        print(classes)

        # This is a dummy solution, remove this next line
        return len(bboxes) > 1

        # TODO filter the predictions: the environment here is a bit different versus the data collection environment, and your model might output a bit
        # of noise. For example, you might see a bunch of predictions with x1=223.4 and x2=224, which makes
        # no sense. You should remove these.

        # TODO also filter detections which are outside of the road, or too far away from the bot. Only return True when there's a pedestrian (aka a duckie)
        # in front of the bot, which you know the bot will have to avoid. A good heuristic would be "if centroid of bounding box is in the center of the image,
        # assume duckie is in the road" and "if bouding box's area is more than X pixels, assume duckie is close to us"

        obj_det_list = []
        for i in range(len(bboxes)):
            x1, y1, x2, y2 = bboxes[i]
            label = classes[i]
Ejemplo n.º 7
0
class ObjectDetectionNode(DTROS):
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.sub_camera_info = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/camera_node/camera_info",
            CameraInfo,
            self.cb_camera_info,
            queue_size=1)

        self.sub_lane_reading = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/lane_filter_node/lane_pose",
            LanePose,
            self.cbLanePoses,
            queue_size=1)

        self.initialized = False
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        self.ground_projector = None
        self.rectifier = None
        self.homography = self.load_extrinsics()
        self.camera_info_received = False
        self.log(str(self.homography))
        self.lane_width = rospy.get_param('~lanewidth', None)
        self.safe_distance = rospy.get_param('~safe_distance', None)

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.initialized = True
        self.image_count = 0
        self.obstacle_left_lane = False
        self.obstacle_right_lane = False
        self.log("Initialized!")

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def cb_camera_info(self, msg):
        """
        Initializes a :py:class:`image_processing.GroundProjectionGeometry` object and a
        :py:class:`image_processing.Rectify` object for image rectification

        Args:
            msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.

        """
        if not self.camera_info_received:
            self.rectifier = Rectify(msg)
            self.ground_projector = GroundProjectionGeometry(
                im_width=msg.width,
                im_height=msg.height,
                homography=np.array(self.homography).reshape((3, 3)))
            self.im_width = msg.width
            self.im_height = msg.height

        self.camera_info_received = True

    def cbLanePoses(self, input_pose_msg):
        """Callback receiving pose messages
        Computes v and omega using PPController
        Args:
            input_pose_msg (:obj:`LanePose`): Message containing information about the current lane pose.
        """
        self.pose_msg = input_pose_msg

    def image_cb(self, image_msg):
        if not self.initialized:
            return

        # TODO to get better hz, you might want to only call your wrapper's predict function only once ever 4-5 images?
        # This way, you're not calling the model again for two practically identical images. Experiment to find a good number of skipped
        # images.

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        image = cv2.resize(image, (224, 224))
        if self.image_count != 0:
            self.image_count = np.mod(self.image_count + 1, 3)
        else:
            bboxes, classes, scores = self.model_wrapper.predict(image)
            im_boxed = self.plotWithBoundingBoxes(image, bboxes[0], classes[0],
                                                  scores[0])
            cv2.imshow('detected objects', im_boxed)
            cv2.waitKey(1)
            self.det2bool(
                bboxes[0], classes[0]
            )  # [0] because our batch size given to the wrapper is 1

        msg = BoolStamped()
        msg.header = image_msg.header
        if self.obstacle_right_lane:
            msg.data = True
            if self.obstacle_left_lane:
                pass
            else:
                ## OVERTAKING
                # msg.data = overtake
                pass

        self.pub_obj_dets.publish(msg)

    def det2bool(self, bboxes, classes):
        # TODO remove these debugging prints
        # print(bboxes)
        # print(classes)

        # This is a dummy solution, remove this next line
        # return len(bboxes) > 1

        # TODO filter the predictions: the environment here is a bit different versus the data collection environment, and your model might output a bit
        # of noise. For example, you might see a bunch of predictions with x1=223.4 and x2=224, which makes
        # no sense. You should remove these.

        # TODO also filter detections which are outside of the road, or too far away from the bot. Only return True when there's a pedestrian (aka a duckie)
        # in front of the bot, which you know the bot will have to avoid. A good heuristic would be "if centroid of bounding box is in the center of the image,
        # assume duckie is in the road" and "if bouding box's area is more than X pixels, assume duckie is close to us"

        self.obstacle_right_lane = False
        self.obstacle_left_lane = False
        obj_det_list = []
        for i in range(len(bboxes)):
            x1, y1, x2, y2 = bboxes[i]
            label = classes[i]
            if label == 1:
                if (x2 - x1 >= 2) and (y2 - y1 >= 2):
                    low_center = Point((x1 + x2) / 2, y2)
                    rect_pixel = self.rectifier.rectify_point(low_center)
                    ground_point = self.ground_projector.pixel2ground(
                        rect_pixel)
                    duckie_lane_pose = np.cos(self.pose_msg.phi) * (
                        ground_point.y + self.pose_msg.d) + np.sin(
                            self.pose_msg.phi) * ground_point.x
                    dist = np.sqrt(ground_point.x**2 + ground_point.y**2)
                    if np.abs(duckie_lane_pose
                              ) <= self.lane_width / 2:  #in our lane
                        if dist <= self.safe_distance:
                            self.obstacle_right_lane = True
                    elif np.abs(duckie_lane_pose
                                ) > self.lane_width / 2:  #in left lane
                        if dist <= self.safe_distance * 1.5:
                            self.obstacle_left_lane = True
            # TODO if label isn't a duckie, skip
            # TODO if detection is a pedestrian in front of us:
            #   return True

    def plotWithBoundingBoxes(self, seg_im, boxes, labels, scores):
        for i in range(len(labels)):
            cv2.rectangle(seg_im, (boxes[i][0], boxes[i][1]),
                          (boxes[i][2], boxes[i][3]), (255, 255, 255), 1)
            cv2.rectangle(seg_im, (boxes[i][0], boxes[i][1]),
                          (boxes[i][0] + 20, boxes[i][1] - 6), (255, 255, 255),
                          cv.FILLED)
            cv2.putText(seg_im, f"{labels[i]} : {scores[i]}",
                        (boxes[i][0], boxes[i][1]), cv.FONT_HERSHEY_COMPLEX,
                        0.5, (0, 0, 0), 1)
        return seg_im

    def load_extrinsics(self):
        """
        Loads the homography matrix from the extrinsic calibration file.

        Returns:
            :obj:`numpy array`: the loaded homography matrix

        """
        # load intrinsic calibration
        cali_file_folder = '/data/config/calibrations/camera_extrinsic/'
        cali_file = cali_file_folder + rospy.get_namespace().strip(
            "/") + ".yaml"

        # Locate calibration yaml file or use the default otherwise
        if not os.path.isfile(cali_file):
            self.log(
                "Can't find calibration file: %s.\n Using default calibration instead."
                % cali_file, 'warn')
            cali_file = (cali_file_folder + "default.yaml")

        # Shutdown if no calibration file not found
        if not os.path.isfile(cali_file):
            msg = 'Found no calibration file ... aborting'
            self.log(msg, 'err')
            rospy.signal_shutdown(msg)

        try:
            with open(cali_file, 'r') as stream:
                calib_data = yaml.load(stream)
        except yaml.YAMLError:
            msg = 'Error in parsing calibration file %s ... aborting' % cali_file
            self.log(msg, 'err')
            rospy.signal_shutdown(msg)

        return calib_data['homography']
class ObjectDetectionNode(DTROS):

    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode, self).__init__(
            node_name=node_name,
            node_type=NodeType.PERCEPTION
        )
        self.initialized = False
        self.log("Initializing!")


        # Construct publishers
        self.pub_obj_dets = rospy.Publisher(
            "~duckie_detected",
            BoolStamped,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION
        )

        self. pub_detections_image = rospy.Publisher(
            "~object_detections_img", Image, queue_size=1, dt_topic_type=TopicType.DEBUG
        )

        # Construct subscribers
        self.sub_image = rospy.Subscriber(
            "~image/compressed",
            CompressedImage,
            self.image_cb,
            buff_size=10000000,
            queue_size=1
        )
        
        self.sub_thresholds = rospy.Subscriber(
            "~thresholds",
            AntiInstagramThresholds,
            self.thresholds_cb,
            queue_size=1
        )
        
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds=dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file','.')
        self.veh = rospy.get_namespace().strip("/")
        aido_eval = rospy.get_param("~AIDO_eval", False)
        self.log(f"AIDO EVAL VAR: {aido_eval}")
        self.log("Starting model loading!")
        self._debug = rospy.get_param("~debug", False)
        self.model_wrapper = Wrapper(aido_eval)
        self.log("Finished model loading!")
        self.frame_id = 0
        self.initialized = True
        self.log("Initialized!")
    
    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb(self, image_msg):
        if not self.initialized:
            return

        if self.frame_id != 0:
            return
        self.frame_id += 1
        self.frame_id = self.frame_id % (1 + NUMBER_FRAMES_SKIPPED())

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return
        
        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"],
                image
            )
        
        image = cv2.resize(image, (416,416))
        bboxes, classes, scores = self.model_wrapper.predict(image)
        
        msg = BoolStamped()
        msg.header = image_msg.header
        msg.data = self.det2bool(bboxes, classes, scores)
        
        self.pub_obj_dets.publish(msg)

        if self._debug:
            colors = {0: (0, 255, 255), 1: (0, 165, 255), 2: (0, 250, 0), 3: (0, 0, 255)}
            names = {0: "duckie", 1: "cone", 2: "truck", 3: "bus"}
            font = cv2.FONT_HERSHEY_SIMPLEX
            for clas, box in zip(classes, bboxes):
                pt1 = np.array([int(box[0]), int(box[1])])
                pt2 = np.array([int(box[2]), int(box[3])])
                pt1 = tuple(pt1)
                pt2 = tuple(pt2)
                color = colors[clas.item()]
                name = names[clas.item()]
                image = cv2.rectangle(image, pt1, pt2, color, 2)
                text_location = (pt1[0], min(416, pt1[1]+20))
                image = cv2.putText(image, name, text_location, font, 1, color, thickness=3)
            obj_det_img = self.bridge.cv2_to_imgmsg(image, encoding="bgr8")
            self.pub_detections_image.publish(obj_det_img)


    def det2bool(self, bboxes, classes, scores):

        box_ids = np.array(list(map(filter_by_bboxes, bboxes))).nonzero()[0]
        cla_ids = np.array(list(map(filter_by_classes, classes))).nonzero()[0]
        sco_ids = np.array(list(map(filter_by_scores, scores))).nonzero()[0]


        box_cla_ids = set(list(box_ids)).intersection(set(list(cla_ids)))
        box_cla_sco_ids = set(list(sco_ids)).intersection(set(list(box_cla_ids)))


        if len(box_cla_sco_ids) > 0:
            return True
        else:
            return False
Ejemplo n.º 9
0
def run_pipeline(image, all_details=False, ground_truth=None, actual_map=None):
    """
        Image: numpy (H,W,3) == BGR
        Returns a dictionary, res with the following fields:

            res['input_image']

        ground_truth = pose
    """

    print('backend: %s' % matplotlib.get_backend())
    print('fname: %s' % matplotlib.matplotlib_fname())

    quick = False

    dtu.check_isinstance(image, np.ndarray)

    res = OrderedDict()
    stats = OrderedDict()
    vehicle_name = dtu.get_current_robot_name()

    res['Raw input image'] = image
    gp = get_ground_projection(vehicle_name)
    gpg = gp.get_ground_projection_geometry
    line_detector = LineDetector()
    lane_filter = LaneFilterHistogram(None)
    print("pass lane_filter")
    ai = AntiInstagram()
    pts = ProcessingTimingStats()
    pts.reset()
    pts.received_message()
    pts.decided_to_process()

    with pts.phase('calculate AI transform'):
        [lower, upper] = ai.calculate_color_balance_thresholds(image)

    with pts.phase('apply AI transform'):
        transformed = ai.apply_color_balance(lower, upper, image)

    with pts.phase('edge detection'):
        # note: do not apply transform twice!
        segment_list2 = image_prep.process(pts,
                                           image,
                                           line_detector,
                                           transform=ai.apply_color_balance)

        if all_details:

            res['resized and corrected'] = image_prep.image_corrected

    dtu.logger.debug('segment_list2: %s' % len(segment_list2.segments))

    if all_details:
        res['segments_on_image_input_transformed'] = \
            vs_fancy_display(image_prep.image_cv, segment_list2)

    if all_details:
        res['segments_on_image_input_transformed_resized'] = \
            vs_fancy_display(image_prep.image_resized, segment_list2)

    if all_details:
        grid = get_grid(image.shape[:2])
        res['grid'] = grid
        res['grid_remapped'] = gpg.rectify(grid)

#     res['difference between the two'] = res['image_input']*0.5 + res['image_input_rect']*0.5

    with pts.phase('rectify_segments'):
        segment_list2_rect = rectify_segments(gpg, segment_list2)

    # Project to ground
    with pts.phase('find_ground_coordinates'):
        sg = find_ground_coordinates(gpg, segment_list2_rect)

    lane_filter.initialize()
    if all_details:
        res['prior'] = lane_filter.get_plot_phi_d()

    with pts.phase('lane filter update'):
        print(type(lane_filter).__name__)
        if type(lane_filter).__name__ == 'LaneFilterHistogram':
            # XXX merging pain
            _likelihood = lane_filter.update(sg.segments)
        else:
            _likelihood = lane_filter.update(sg)

    if not quick:
        with pts.phase('lane filter plot'):
            res['likelihood'] = lane_filter.get_plot_phi_d(
                ground_truth=ground_truth)
    easy_algo_db = get_easy_algo_db()

    if isinstance(lane_filter, LaneFilterMoreGeneric):
        template_name = lane_filter.localization_template
    else:
        template_name = 'DT17_template_straight_straight'
        dtu.logger.debug('Using default template %r for visualization' %
                         template_name)

    localization_template = \
        easy_algo_db.create_instance(FAMILY_LOC_TEMPLATES, template_name)

    with pts.phase('lane filter get_estimate()'):
        est = lane_filter.get_estimate()

    # Coordinates in TILE frame
    g = localization_template.pose_from_coords(est)
    tinfo = TransformationsInfo()
    tinfo.add_transformation(frame1=FRAME_GLOBAL, frame2=FRAME_AXLE, g=g)

    if all_details:
        if actual_map is not None:
            #         sm_axle = tinfo.transform_map_to_frame(actual_map, FRAME_AXLE)
            res['real'] = plot_map_and_segments(actual_map,
                                                tinfo,
                                                sg.segments,
                                                dpi=120,
                                                ground_truth=ground_truth)

    with pts.phase('rectify'):
        rectified0 = gpg.rectify(image)

    rectified = ai.apply_color_balance(rectified0)

    if all_details:
        res['image_input_rect'] = rectified

    res['segments rectified on image rectified'] = \
        vs_fancy_display(rectified, segment_list2_rect)

    assumed = localization_template.get_map()

    if not quick:
        with pts.phase('plot_map_and_segments'):
            res['model assumed for localization'] = plot_map_and_segments(
                assumed,
                tinfo,
                sg.segments,
                dpi=120,
                ground_truth=ground_truth)

    assumed_axle = tinfo.transform_map_to_frame(assumed, FRAME_AXLE)

    with pts.phase('plot_map reprojected'):
        res['map reprojected on image'] = plot_map(rectified,
                                                   assumed_axle,
                                                   gpg,
                                                   do_ground=False,
                                                   do_horizon=True,
                                                   do_faces=False,
                                                   do_faces_outline=True,
                                                   do_segments=False)

    with pts.phase('quality computation'):
        predicted_segment_list_rectified = predict_segments(sm=assumed_axle,
                                                            gpg=gpg)
        quality_res, quality_stats = judge_quality(
            image, segment_list2_rect, predicted_segment_list_rectified)
        res.update(quality_res)


#     res['blurred']= cv2.medianBlur(image, 11)
    stats['estimate'] = est
    stats.update(quality_stats)

    dtu.logger.info(pts.get_stats())
    return res, stats
class ObjectDetectionNode(DTROS):
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb_det,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        # self.pub_seglist_filtered = rospy.Publisher("~seglist_filtered",
        #                                             SegmentList,
        #                                             queue_size=1,
        #                                             dt_topic_type=TopicType.DEBUG)

        self.pub_segmented_img = rospy.Publisher(
            "~debug/segmented_image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.DEBUG)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.homography = self.load_extrinsics()
        homography = np.array(self.homography).reshape((3, 3))
        self.bridge = CvBridge()
        self.gpg = GroundProjectionGeometry(160, 120, homography)
        # self.gpg = GroundProjectionGeometry(320, 240, homography)
        self.initialized = True
        self.log("Initialized!")

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb_det(self, image_msg):
        if not self.initialized:
            return

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        img_reg = cv2.resize(image, (224, 224))
        img_rgb = cv2.cvtColor(img_reg, cv2.COLOR_BGR2RGB)
        boxes, classes, scores = self.model_wrapper.predict(img_rgb)
        boxes, classes, scores = boxes[0], classes[0], scores[0]
        if type(boxes) != type(None):
            img_w_boxes = add_boxes(img_reg, boxes, classes, scores)
        else:
            img_w_boxes = img_reg
        detection_img = self.bridge.cv2_to_compressed_imgmsg(img_w_boxes)
        detection_img.header.stamp = image_msg.header.stamp
        self.pub_segmented_img.publish(detection_img)

        msg = BoolStamped()
        msg.header = image_msg.header
        msg.data = self.det2bool(
            boxes,
            classes)  # [0] because our batch size given to the wrapper is 1

        self.pub_obj_dets.publish(msg)

        #
        # msg = BoolStamped()
        # msg.header = image_msg.header
        # if len(duckie_segments) == 0:
        #     # No duckie detection at all!
        #     msg.data = False
        # else:
        #     msg.data = self.det2bool(duckie_segments, min_num_seg=3, x_lim=0.2, y_lim=0.05)
        #     if msg.data:
        #         print("A duckie is facing the bot, let's stop and wait for it to cross")
        # self.pub_obj_dets.publish(msg)

    def det2bool(self, boxes, classes):
        if type(boxes) != type(None):
            for i in range(len(boxes)):
                if classes[
                        i] != 1:  #everything except duckie is not important for now
                    continue
                else:
                    x1, y1, x2, y2 = boxes[i]
                    centroid_x = 0.5 * (x1 + x2)
                    centroid_y = 0.5 * (y1 + y2)
                    if 224 >= centroid_x >= 0.5 * 224:  # in the bottow 50% of the image
                        if 0.75 * 224 >= centroid_y >= 0.25 * 224:  #in the middle third of the image (horizontal)
                            if abs((x2 - x1) * (y2 - y1)) >= 700:
                                print("duckie detected")
                                return True

        return False

    def load_extrinsics(self):
        """
        Loads the homography matrix from the extrinsic calibration file.
        Returns:
            :obj:`numpy array`: the loaded homography matrix
        """
        # load intrinsic calibration
        cali_file_folder = '/data/config/calibrations/camera_extrinsic/'
        cali_file = cali_file_folder + rospy.get_namespace().strip(
            "/") + ".yaml"

        # Locate calibration yaml file or use the default otherwise
        if not os.path.isfile(cali_file):
            self.log(
                "Can't find calibration file: %s.\n Using default calibration instead."
                % cali_file, 'warn')
            cali_file = (cali_file_folder + "default.yaml")

        # Shutdown if no calibration file not found
        if not os.path.isfile(cali_file):
            msg = 'Found no calibration file ... aborting'
            self.log(msg, 'err')
            rospy.signal_shutdown(msg)

        try:
            with open(cali_file, 'r') as stream:
                calib_data = yaml.load(stream)
        except yaml.YAMLError:
            msg = 'Error in parsing calibration file %s ... aborting' % cali_file
            self.log(msg, 'err')
            rospy.signal_shutdown(msg)

        return calib_data['homography']
Ejemplo n.º 11
0
class AntiInstagramNode(DTROS):

    """

    Subscriber:
        ~uncorrected_image/compressed: The uncompressed image coming from the camera


    Publisher:
        ~

    """

    def __init__(self, node_name):

        super(AntiInstagramNode, self).__init__(node_name=node_name, node_type=NodeType.PERCEPTION)

        # Read parameters

        self._interval = rospy.get_param("~interval")
        self._color_balance_percentage = rospy.get_param("~color_balance_scale")
        self._output_scale = rospy.get_param("~output_scale")

        # Construct publisher
        self.pub = rospy.Publisher(
            "~thresholds", AntiInstagramThresholds, queue_size=1, dt_topic_type=TopicType.PERCEPTION
        )

        # Construct subscriber
        self.uncorrected_image_subscriber = rospy.Subscriber(
            "~uncorrected_image/compressed",
            CompressedImage,
            self.store_image_msg,
            buff_size=10000000,
            queue_size=1,
        )

        # Initialize Timer
        rospy.Timer(rospy.Duration(self._interval), self.calculate_new_parameters)

        # Initialize objects and data
        self.ai = AntiInstagram()
        self.bridge = CvBridge()
        self.image_msg = None
        self.mutex = Lock()

        # ---
        self.log("Initialized.")

    def store_image_msg(self, image_msg):
        with self.mutex:
            self.image_msg = image_msg

    def decode_image_msg(self):
        with self.mutex:
            try:
                image = self.bridge.compressed_imgmsg_to_cv2(self.image_msg, "bgr8")
            except ValueError as e:
                self.log(f"Anti_instagram cannot decode image: {e}")
        return image

    def calculate_new_parameters(self, event):
        if self.image_msg is None:
            self.log("Waiting for first image!")
            return
        image = self.decode_image_msg()
        (lower_thresholds, higher_thresholds) = self.ai.calculate_color_balance_thresholds(
            image, self._output_scale, self._color_balance_percentage
        )

        # Publish parameters
        msg = AntiInstagramThresholds()
        msg.low = lower_thresholds
        msg.high = higher_thresholds
        self.pub.publish(msg)
Ejemplo n.º 12
0
    def __init__(self, node_name):
        # Initialize the DTROS parent class
        super(LineDetectorNode, self).__init__(node_name=node_name,
                                               node_type=NodeType.PERCEPTION)

        # Define parameters
        self._line_detector_parameters = rospy.get_param(
            "~line_detector_parameters", None)
        self._colors = rospy.get_param("~colors", None)
        self._img_size = rospy.get_param("~img_size", None)
        self._top_cutoff = rospy.get_param("~top_cutoff", None)

        self.bridge = CvBridge()

        # The thresholds to be used for AntiInstagram color correction
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()

        # This holds the colormaps for the debug/ranges images after they are computed once
        self.colormaps = dict()

        # Create a new LineDetector object with the parameters from the Parameter Server / config file
        self.detector = LineDetector(**self._line_detector_parameters)
        # Update the color ranges objects
        self.color_ranges = {
            color: ColorRange.fromDict(d)
            for color, d in list(self._colors.items())
        }

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1,
                                         dt_topic_type=TopicType.PERCEPTION)
        self.pub_d_segments = rospy.Publisher("~debug/segments/compressed",
                                              CompressedImage,
                                              queue_size=1,
                                              dt_topic_type=TopicType.DEBUG)
        self.pub_d_edges = rospy.Publisher("~debug/edges/compressed",
                                           CompressedImage,
                                           queue_size=1,
                                           dt_topic_type=TopicType.DEBUG)
        self.pub_d_maps = rospy.Publisher("~debug/maps/compressed",
                                          CompressedImage,
                                          queue_size=1,
                                          dt_topic_type=TopicType.DEBUG)
        # these are not compressed because compression adds undesired blur
        self.pub_d_ranges_HS = rospy.Publisher("~debug/ranges_HS",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_SV = rospy.Publisher("~debug/ranges_SV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_HV = rospy.Publisher("~debug/ranges_HV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)
Ejemplo n.º 13
0
class LineDetectorNode(DTROS):
    """
    The ``LineDetectorNode`` is responsible for detecting the line white, yellow and red line segment in an image and
    is used for lane localization.

    Upon receiving an image, this node reduces its resolution, cuts off the top part so that only the
    road-containing part of the image is left, extracts the white, red, and yellow segments and publishes them.
    The main functionality of this node is implemented in the :py:class:`line_detector.LineDetector` class.

    The performance of this node can be very sensitive to its configuration parameters. Therefore, it also provides a
    number of debug topics which can be used for fine-tuning these parameters. These configuration parameters can be
    changed dynamically while the node is running via ``rosparam set`` commands.

    Args:
        node_name (:obj:`str`): a unique, descriptive name for the node that ROS will use

    Configuration:
        ~line_detector_parameters (:obj:`dict`): A dictionary with the parameters for the detector. The full list can be found in :py:class:`line_detector.LineDetector`.
        ~colors (:obj:`dict`): A dictionary of colors and color ranges to be detected in the image. The keys (color names) should match the ones in the Segment message definition, otherwise an exception will be thrown! See the ``config`` directory in the node code for the default ranges.
        ~img_size (:obj:`list` of ``int``): The desired downsized resolution of the image. Lower resolution would result in faster detection but lower performance, default is ``[120,160]``
        ~top_cutoff (:obj:`int`): The number of rows to be removed from the top of the image _after_ resizing, default is 40

    Subscriber:
        ~camera_node/image/compressed (:obj:`sensor_msgs.msg.CompressedImage`): The camera images
        ~anti_instagram_node/thresholds(:obj:`duckietown_msgs.msg.AntiInstagramThresholds`): The thresholds to do color correction

    Publishers:
        ~segment_list (:obj:`duckietown_msgs.msg.SegmentList`): A list of the detected segments. Each segment is an :obj:`duckietown_msgs.msg.Segment` message
        ~debug/segments/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug topic with the segments drawn on the input image
        ~debug/edges/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug topic with the Canny edges drawn on the input image
        ~debug/maps/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug topic with the regions falling in each color range drawn on the input image
        ~debug/ranges_HS (:obj:`sensor_msgs.msg.Image`): Debug topic with a histogram of the colors in the input image and the color ranges, Hue-Saturation projection
        ~debug/ranges_SV (:obj:`sensor_msgs.msg.Image`): Debug topic with a histogram of the colors in the input image and the color ranges, Saturation-Value projection
        ~debug/ranges_HV (:obj:`sensor_msgs.msg.Image`): Debug topic with a histogram of the colors in the input image and the color ranges, Hue-Value projection

    """
    def __init__(self, node_name):
        # Initialize the DTROS parent class
        super(LineDetectorNode, self).__init__(node_name=node_name,
                                               node_type=NodeType.PERCEPTION)

        # Define parameters
        self._line_detector_parameters = rospy.get_param(
            "~line_detector_parameters", None)
        self._colors = rospy.get_param("~colors", None)
        self._img_size = rospy.get_param("~img_size", None)
        self._top_cutoff = rospy.get_param("~top_cutoff", None)

        self.bridge = CvBridge()

        # The thresholds to be used for AntiInstagram color correction
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()

        # This holds the colormaps for the debug/ranges images after they are computed once
        self.colormaps = dict()

        # Create a new LineDetector object with the parameters from the Parameter Server / config file
        self.detector = LineDetector(**self._line_detector_parameters)
        # Update the color ranges objects
        self.color_ranges = {
            color: ColorRange.fromDict(d)
            for color, d in list(self._colors.items())
        }

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1,
                                         dt_topic_type=TopicType.PERCEPTION)
        self.pub_d_segments = rospy.Publisher("~debug/segments/compressed",
                                              CompressedImage,
                                              queue_size=1,
                                              dt_topic_type=TopicType.DEBUG)
        self.pub_d_edges = rospy.Publisher("~debug/edges/compressed",
                                           CompressedImage,
                                           queue_size=1,
                                           dt_topic_type=TopicType.DEBUG)
        self.pub_d_maps = rospy.Publisher("~debug/maps/compressed",
                                          CompressedImage,
                                          queue_size=1,
                                          dt_topic_type=TopicType.DEBUG)
        # these are not compressed because compression adds undesired blur
        self.pub_d_ranges_HS = rospy.Publisher("~debug/ranges_HS",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_SV = rospy.Publisher("~debug/ranges_SV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_HV = rospy.Publisher("~debug/ranges_HV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb(self, image_msg):
        """
        Processes the incoming image messages.

        Performs the following steps for each incoming image:

        #. Performs color correction
        #. Resizes the image to the ``~img_size`` resolution
        #. Removes the top ``~top_cutoff`` rows in order to remove the part of the image that doesn't include the road
        #. Extracts the line segments in the image using :py:class:`line_detector.LineDetector`
        #. Converts the coordinates of detected segments to normalized ones
        #. Creates and publishes the resultant :obj:`duckietown_msgs.msg.SegmentList` message
        #. Creates and publishes debug images if there is a subscriber to the respective topics

        Args:
            image_msg (:obj:`sensor_msgs.msg.CompressedImage`): The receive image message

        """

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr(f"Could not decode image: {e}")
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        # Resize the image to the desired dimensions
        height_original, width_original = image.shape[0:2]
        img_size = (self._img_size[1], self._img_size[0])
        if img_size[0] != width_original or img_size[1] != height_original:
            image = cv2.resize(image,
                               img_size,
                               interpolation=cv2.INTER_NEAREST)
        image = image[self._top_cutoff:, :, :]

        # Extract the line segments for every color
        self.detector.setImage(image)
        detections = {
            color: self.detector.detectLines(ranges)
            for color, ranges in list(self.color_ranges.items())
        }

        # Construct a SegmentList
        segment_list = SegmentList()
        segment_list.header.stamp = image_msg.header.stamp

        # Remove the offset in coordinates coming from the removing of the top part and
        arr_cutoff = np.array([0, self._top_cutoff, 0, self._top_cutoff])
        arr_ratio = np.array([
            1.0 / self._img_size[1],
            1.0 / self._img_size[0],
            1.0 / self._img_size[1],
            1.0 / self._img_size[0],
        ])

        # Fill in the segment_list with all the detected segments
        for color, det in list(detections.items()):
            # Get the ID for the color from the Segment msg definition
            # Throw and exception otherwise
            if len(det.lines) > 0 and len(det.normals) > 0:
                try:
                    color_id = getattr(Segment, color)
                    lines_normalized = (det.lines + arr_cutoff) * arr_ratio
                    segment_list.segments.extend(
                        self._to_segment_msg(lines_normalized, det.normals,
                                             color_id))
                except AttributeError:
                    self.logerr(
                        f"Color name {color} is not defined in the Segment message"
                    )

        # Publish the message
        self.pub_lines.publish(segment_list)

        # If there are any subscribers to the debug topics, generate a debug image and publish it
        if self.pub_d_segments.get_num_connections() > 0:
            colorrange_detections = {
                self.color_ranges[c]: det
                for c, det in list(detections.items())
            }
            debug_img = plotSegments(image, colorrange_detections)
            debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(debug_img)
            debug_image_msg.header = image_msg.header
            self.pub_d_segments.publish(debug_image_msg)

        if self.pub_d_edges.get_num_connections() > 0:
            debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(
                self.detector.canny_edges)
            debug_image_msg.header = image_msg.header
            self.pub_d_edges.publish(debug_image_msg)

        if self.pub_d_maps.get_num_connections() > 0:
            colorrange_detections = {
                self.color_ranges[c]: det
                for c, det in list(detections.items())
            }
            debug_img = plotMaps(image, colorrange_detections)
            debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(debug_img)
            debug_image_msg.header = image_msg.header
            self.pub_d_maps.publish(debug_image_msg)

        for channels in ["HS", "SV", "HV"]:
            publisher = getattr(self, f"pub_d_ranges_{channels}")
            if publisher.get_num_connections() > 0:
                debug_img = self._plot_ranges_histogram(channels)
                debug_image_msg = self.bridge.cv2_to_imgmsg(debug_img,
                                                            encoding="bgr8")
                debug_image_msg.header = image_msg.header
                publisher.publish(debug_image_msg)

    @staticmethod
    def _to_segment_msg(lines, normals, color):
        """
        Converts line detections to a list of Segment messages.

        Converts the resultant line segments and normals from the line detection to a list of Segment messages.

        Args:
            lines (:obj:`numpy array`): An ``Nx4`` array where each row represents a line.
            normals (:obj:`numpy array`): An ``Nx2`` array where each row represents the normal of a line.
            color (:obj:`str`): Color name string, should be one of the pre-defined in the Segment message definition.

        Returns:
            :obj:`list` of :obj:`duckietown_msgs.msg.Segment`: List of Segment messages

        """
        segment_msg_list = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
            segment_msg_list.append(segment)
        return segment_msg_list

    def _plot_ranges_histogram(self, channels):
        """Utility method for plotting color histograms and color ranges.

        Args:
            channels (:obj:`str`): The desired two channels, should be one of ``['HS','SV','HV']``

        Returns:
            :obj:`numpy array`: The resultant plot image

        """
        channel_to_axis = {"H": 0, "S": 1, "V": 2}
        axis_to_range = {0: 180, 1: 256, 2: 256}

        # Get which is the third channel that will not be shown in this plot
        missing_channel = "HSV".replace(channels[0],
                                        "").replace(channels[1], "")

        hsv_im = self.detector.hsv
        # Get the pixels as a list (flatten the horizontal and vertical dimensions)
        hsv_im = hsv_im.reshape((-1, 3))

        channel_idx = [
            channel_to_axis[channels[0]], channel_to_axis[channels[1]]
        ]

        # Get only the relevant channels
        x_bins = np.arange(0, axis_to_range[channel_idx[1]] + 1, 2)
        y_bins = np.arange(0, axis_to_range[channel_idx[0]] + 1, 2)
        h, _, _ = np.histogram2d(x=hsv_im[:, channel_idx[0]],
                                 y=hsv_im[:, channel_idx[1]],
                                 bins=[y_bins, x_bins])
        # Log-normalized histogram
        np.log(h, out=h, where=(h != 0))
        h = (255 * h / np.max(h)).astype(np.uint8)

        # Make a color map, for the missing channel, just take the middle of the range
        if channels not in self.colormaps:
            colormap_1, colormap_0 = np.meshgrid(x_bins[:-1], y_bins[:-1])
            colormap_2 = np.ones_like(colormap_0) * (
                axis_to_range[channel_to_axis[missing_channel]] / 2)

            channel_to_map = {
                channels[0]: colormap_0,
                channels[1]: colormap_1,
                missing_channel: colormap_2
            }

            self.colormaps[channels] = np.stack([
                channel_to_map["H"], channel_to_map["S"], channel_to_map["V"]
            ],
                                                axis=-1).astype(np.uint8)
            self.colormaps[channels] = cv2.cvtColor(self.colormaps[channels],
                                                    cv2.COLOR_HSV2BGR)

        # resulting histogram image as a blend of the two images
        im = cv2.cvtColor(h[:, :, None], cv2.COLOR_GRAY2BGR)
        im = cv2.addWeighted(im, 0.5, self.colormaps[channels], 1 - 0.5, 0.0)

        # now plot the color ranges on top
        for _, color_range in list(self.color_ranges.items()):
            # convert HSV color to BGR
            c = color_range.representative
            c = np.uint8([[[c[0], c[1], c[2]]]])
            color = cv2.cvtColor(c, cv2.COLOR_HSV2BGR).squeeze().astype(int)
            for i in range(len(color_range.low)):
                cv2.rectangle(
                    im,
                    pt1=(
                        (color_range.high[i, channel_idx[1]] / 2).astype(
                            np.uint8),
                        (color_range.high[i, channel_idx[0]] / 2).astype(
                            np.uint8),
                    ),
                    pt2=(
                        (color_range.low[i, channel_idx[1]] / 2).astype(
                            np.uint8),
                        (color_range.low[i, channel_idx[0]] / 2).astype(
                            np.uint8),
                    ),
                    color=color,
                    lineType=cv2.LINE_4,
                )
        # ---
        return im
Ejemplo n.º 14
0
class ObjectDetectionNode(DTROS):
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)
        self.initialized = False

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.frame_id = 0
        self.initialized = True
        self.log("Initialized!")

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb(self, image_msg):
        if not self.initialized:
            return

        if self.frame_id != 0:
            return
        self.frame_id += 1
        from integration import NUMBER_FRAMES_SKIPPED
        self.frame_id = self.frame_id % (1 + NUMBER_FRAMES_SKIPPED())

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        image = cv2.resize(image, (416, 416))
        bboxes, classes, scores = self.model_wrapper.predict(image)

        msg = BoolStamped()
        msg.header = image_msg.header
        msg.data = self.det2bool(bboxes, classes, scores)

        self.pub_obj_dets.publish(msg)

    def det2bool(self, bboxes, classes, scores):
        print(f"Before filtering: {len(bboxes)} detections")

        from integration import filter_by_classes
        from integration import filter_by_bboxes
        from integration import filter_by_scores

        box_ids = np.array(list(map(filter_by_bboxes, bboxes))).nonzero()
        cla_ids = np.array(list(map(filter_by_classes, classes))).nonzero()
        sco_ids = np.array(list(map(filter_by_scores, scores))).nonzero()

        box_cla_ids = set(box_ids).intersection(set(cla_ids))
        box_cla_sco_ids = set(sco_ids).intersection(box_cla_ids)

        print(f"After filtering: {len(box_cla_sco_ids)} detections")

        if len(box_cla_sco_ids) > 0:
            return True
Ejemplo n.º 15
0
class ObjectDetectionNode(DTROS):
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(ObjectDetectionNode,
              self).__init__(node_name=node_name,
                             node_type=NodeType.PERCEPTION)

        # Construct publishers
        self.pub_obj_dets = rospy.Publisher("~duckie_detected",
                                            BoolStamped,
                                            queue_size=1,
                                            dt_topic_type=TopicType.PERCEPTION)

        # Construct subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()
        self.bridge = CvBridge()

        model_file = rospy.get_param('~model_file', '.')
        rospack = rospkg.RosPack()
        model_file_absolute = rospack.get_path('object_detection') + model_file
        self.model_wrapper = Wrapper(model_file_absolute)
        self.initialized = True
        self.log("Initialized!")

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb(self, image_msg):
        if not self.initialized:
            return

        # TODO to get better hz, you might want to only call your wrapper's predict function only once ever 4-5 images?
        # This way, you're not calling the model again for two practically identical images. Experiment to find a good number of skipped
        # images.

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr('Could not decode image: %s' % e)
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        image = cv2.resize(image, (224, 224))
        bboxes, classes, scores = self.model_wrapper.predict(image)

        msg = BoolStamped()
        msg.header = image_msg.header
        msg.data = self.det2bool(
            bboxes[0],
            classes[0])  # [0] because our batch size given to the wrapper is 1

        self.pub_obj_dets.publish(msg)

    def midpoint(self, p1, p2):
        return Point((p1.x + p2.x) / 2, (p1.y + p2.y) / 2)

    def det2bool(self, bboxes, classes):

        middle_bounds_x = (80, 160)
        middle_bounds_y = (100, 224)
        for i in range(len(bboxes)):
            if abs(bboxes[i][0] - bboxes[i][2]) < 2 or abs(bboxes[i][1] -
                                                           bboxes[i][3]) < 2:
                print("SKIP")
                continue
            if classes[i] == 1:
                lower = Point(bboxes[i][0], bboxes[i][1])
                upper = Point(bboxes[i][2], bboxes[i][3])
                middle = self.midpoint(lower, upper)
                print("MIDDLE")
                print(f"{middle.x},{middle.y}")
                if middle.x > middle_bounds_x[
                        0] and middle.x < middle_bounds_x[1]:
                    if middle.y > middle_bounds_y[
                            0] and middle.y < middle_bounds_y[1]:
                        print("DUCKIE IN FRONT")
                    return True
        return False