Example #1
0
class DecoderNode(DTROS):
    def __init__(self, node_name):
        super().__init__(node_name, node_type=NodeType.PERCEPTION)

        # parameters
        self.publish_freq = DTParam("~publish_freq", -1)

        # utility objects
        self.bridge = CvBridge()
        self.reminder = DTReminder(frequency=self.publish_freq.value)

        # subscribers
        self.sub_img = rospy.Subscriber("~image_in",
                                        CompressedImage,
                                        self.cb_image,
                                        queue_size=1,
                                        buff_size="10MB")

        # publishers
        self.pub_img = rospy.Publisher(
            "~image_out",
            Image,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help="Raw image",
        )

    def cb_image(self, msg):
        # make sure this matters to somebody
        if not self.pub_img.anybody_listening():
            return
        # make sure the node is not switched off
        if not self.switch:
            return
        # make sure this is a good time to publish (always keep this as last check)
        if not self.reminder.is_time(frequency=self.publish_freq.value):
            return
        # turn 'compressed image message' into 'raw image'
        with self.profiler("/cb/image/decode"):
            img = self.bridge.compressed_imgmsg_to_cv2(msg)
        # turn 'raw image' into 'raw image message'
        with self.profiler("/cb/image/serialize"):
            out_msg = self.bridge.cv2_to_imgmsg(img, "bgr8")
        # maintain original header
        out_msg.header = msg.header
        # publish image
        self.pub_img.publish(out_msg)
Example #2
0
class Battery:
    def __init__(self, callback, logger: Logger = None):
        self._devices = []
        self._info = None
        self._data = None
        self._device = None
        self._interaction: Optional[BatteryInteraction] = BatteryInteraction(
            name="get_info",
            command=b'??',
            check=lambda d: 'FirmwareVersion' in d,
            callback=lambda d: setattr(self, '_info', self._format_info(d)))
        self._is_shutdown = False
        self._logger = logger
        if not callable(callback):
            raise ValueError('Callback must be a callable object.')
        self._callback = callback
        self._reset_reminder = DTReminder(period=5)
        self._worker = Thread(target=self._work)
        self._history = BatteryHistory()

    def start(self, block: bool = False, quiet: bool = True):
        if block:
            return self._work(quiet=quiet)
        else:
            self._worker.start()

    def join(self):
        if self._worker.is_alive():
            self._worker.join()

    def is_shutdown(self):
        return self._is_shutdown

    def shutdown(self):
        #   This is NOT a battery shutdown, it simply shuts down the drivers
        self._is_shutdown = True
        self.join()

    def turn_off(self,
                 timeout: int = 20,
                 wait: bool = False,
                 callback: Optional[Callable] = None):
        #   This is a battery shutdown, the power will be cut off after `timeout` seconds
        firmware_version = self.info.get("version")
        # noinspection PyBroadException
        try:
            # multi-firmware support
            if semver.compare(firmware_version, "2.0.0") == 0:
                timeout_str = f'{timeout}'.zfill(2)
                self._interaction = BatteryInteraction(
                    name="turn_off",
                    command=f'Q{timeout_str}'.encode('utf-8'),
                    check=lambda d: d.get('TTL(sec)', None) == timeout,
                    callback=callback,
                )
            elif semver.compare(firmware_version, "2.0.1") >= 0:
                self._interaction = BatteryInteraction(
                    name="turn_off",
                    command="QQ".encode('utf-8'),
                    check=lambda d: d.get('QACK', None) is not None,
                    callback=callback)
            else:
                raise Exception()
        except Exception:
            self._logger.warning(
                f"Unknown/Unsupported battery firmware: {firmware_version}")

        if wait:
            self._interaction.join()

    @property
    def info(self):
        return self._info

    @property
    def data(self):
        return self._data

    @data.setter
    def data(self, data):
        self._data = data
        self._chew_on_data()

    def history(self):
        return self._history.get()

    def _find_device(self):
        vid_pid_match = "VID:PID={}:{}".format(BATTERY_PCB16_READY_VID,
                                               BATTERY_PCB16_READY_PID)
        ports = serial_grep(vid_pid_match)
        self._devices = [p.device for p in ports]  # ['/dev/ttyACM0', ...]

    def _chew_on_data(self):
        if self._data and self._info:
            out = {
                "present": True,
                "charging": self._data['input_voltage'] >= (5.0 / 2),
                **self._data
            }
            self._callback(out)
            self._history.add(self._data)

    def _read_next(self, dev, quiet: bool = True):
        try:
            raw = dev.read_until().decode('utf-8', 'ignore')
            cleaned = re.sub(r"\x00\s*", "", raw).rstrip()
            cleaned = re.sub(r"-\s+", "-", cleaned)
            try:
                # if "}{" present, get the first for checking shutdown ACK
                if "}{" in cleaned:
                    cleaned = cleaned.split("}{")[0] + "}"
                parsed = yaml.load(cleaned, yaml.SafeLoader)
                return parsed
            except yaml.YAMLError as e:
                if self._logger:
                    self._logger.error(str(e))
                return None
        except BaseException as e:
            if quiet:
                # do not print, it would swamp the logs
                pass
            else:
                raise e

    def _work(self, quiet: bool = True):
        while True:
            if self._is_shutdown:
                return
            # ---
            # if we don't have a battery device, search again
            if len(self._devices) == 0:
                self._find_device()
            # if we still don't have it, just sleep for 5 seconds
            if len(self._devices) == 0:
                self._logger.warning(
                    'No battery found. Retrying in 5 seconds.')
            else:
                # we have at least one candidate device, try reading
                for device in self._devices:
                    with serial.Serial(device, BATTERY_PCB16_BAUD_RATE) as dev:
                        # once the device is open, try reading from it forever
                        # break only on unknown errors
                        while True:
                            if self._is_shutdown:
                                return
                            # ---
                            if self._data is not None:
                                # we were able to read from the battery at least once,
                                # consume any pending interaction
                                if self._interaction.active:
                                    iname, icmd = self._interaction.name, self._interaction.command
                                    # there is a command to be sent, send it and continue
                                    self._logger.debug(
                                        f"Pending interaction '{iname}' found. "
                                        f"Sending {str(icmd)} to the battery")
                                    dev.write(self._interaction.command)
                                    dev.flush()
                            # ---
                            try:
                                parsed = self._read_next(dev, quiet=quiet)
                                if parsed is None:
                                    continue
                                # first time we read?
                                if self._device is None:
                                    self._device = device
                                    self._logger.info(
                                        'Battery found at {}.'.format(device))
                                # distinguish between 'data' packet and others
                                if 'SOC(%)' in parsed:
                                    # this is a 'data' packet
                                    self.data = self._format_data(parsed)
                                elif self._interaction.active:
                                    iname = self._interaction.name
                                    self._logger.debug(
                                        f"Received (candidate) response to "
                                        f"interaction '{iname}': {str(parsed)}"
                                    )
                                    if self._interaction.check(parsed):
                                        self._logger.debug(
                                            f"Received valid response to "
                                            f"interaction '{iname}': {str(parsed)}"
                                        )
                                        # complete interaction
                                        self._interaction.complete(parsed)
                            except BaseException as e:
                                if quiet:
                                    traceback.print_exc()
                                    break
                                raise e
            if self._logger:
                self._logger.warning(
                    'An error occurred while reading from the battery.')
            # allow 5 seconds for things to reset
            self._reset_reminder.reset()
            while not self._reset_reminder.is_time():
                if self._is_shutdown:
                    return
                time.sleep(0.5)

    @staticmethod
    def _format_data(data):
        return {
            "temperature": round(KELVIN_TO_CELSIUS(data['CellTemp(degK)']), 2),
            "cell_voltage": round(float(data['CellVoltage(mV)']) / 1000, 2),
            "input_voltage":
            round(float(data['ChargerVoltage(mV)']) / 1000, 2),
            "current": round(float(data['Current(mA)']) / 1000, 2),
            "cycle_count": data['CycleCount'],
            "percentage": data['SOC(%)'],
            "time_to_empty": int(data['TimeToEmpty(min)'] * 60),
            "usb_out_1_voltage": round(float(data['USB OUT-1(mV)']) / 1000, 2),
            "usb_out_2_voltage": round(float(data['USB OUT-2(mV)']) / 1000, 2)
        }

    @staticmethod
    def _format_info(info):
        boot_data = str(info["BootData"])
        firmware_version = str(info["FirmwareVersion"])
        major, minor, patch, *_ = firmware_version + "000"
        yy, mm, dd = boot_data[3:5], boot_data[5:7], boot_data[7:9]
        return {
            "version": f"{major}.{minor}.{patch}",
            "boot": {
                "version": boot_data[0],
                "pcb_version": boot_data[1:3],
                "date": f"{mm}/{dd}/{yy}"
            },
            "serial_number": info["SerialNumber"]
        }
class AprilTagDetector(DTROS):

    def __init__(self):
        super(AprilTagDetector, self).__init__(
            node_name='apriltag_detector_node',
            node_type=NodeType.PERCEPTION
        )
        # get static parameters
        self.family = rospy.get_param('~family', 'tag36h11')
        self.ndetectors = rospy.get_param('~ndetectors', 1)
        self.nthreads = rospy.get_param('~nthreads', 1)
        self.quad_decimate = rospy.get_param('~quad_decimate', 1.0)
        self.quad_sigma = rospy.get_param('~quad_sigma', 0.0)
        self.refine_edges = rospy.get_param('~refine_edges', 1)
        self.decode_sharpening = rospy.get_param('~decode_sharpening', 0.25)
        self.tag_size = rospy.get_param('~tag_size', 0.065)
        self.rectify_alpha = rospy.get_param('~rectify_alpha', 0.0)
        # dynamic parameter
        self.detection_freq = DTParam(
            '~detection_freq',
            default=-1,
            param_type=ParamType.INT,
            min_value=-1,
            max_value=30
        )
        self._detection_reminder = DTReminder(frequency=self.detection_freq.value)
        # camera info
        self._camera_parameters = None
        self._mapx, self._mapy = None, None
        # create detector object
        self._detectors = [Detector(
            families=self.family,
            nthreads=self.nthreads,
            quad_decimate=self.quad_decimate,
            quad_sigma=self.quad_sigma,
            refine_edges=self.refine_edges,
            decode_sharpening=self.decode_sharpening
        ) for _ in range(self.ndetectors)]
        self._renderer_busy = False
        # create a CV bridge object
        self._jpeg = TurboJPEG()
        # create subscribers
        self._img_sub = rospy.Subscriber(
            '~image',
            CompressedImage,
            self._img_cb,
            queue_size=1,
            buff_size='20MB'
        )
        self._cinfo_sub = rospy.Subscriber(
            '~camera_info',
            CameraInfo,
            self._cinfo_cb,
            queue_size=1
        )
        # create publisher
        self._tag_pub = rospy.Publisher(
            '~detections',
            AprilTagDetectionArray,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_help='Tag detections',
        )
        self._img_pub = rospy.Publisher(
            '~detections/image/compressed',
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.VISUALIZATION,
            dt_help='Camera image with tag publishs superimposed',
        )
        # create thread pool
        self._workers = ThreadPoolExecutor(self.ndetectors)
        self._tasks = [None] * self.ndetectors
        # create TF broadcaster
        self._tf_bcaster = tf.TransformBroadcaster()

    def on_shutdown(self):
        self.loginfo('Shutting down workers pool')
        self._workers.shutdown()

    def _cinfo_cb(self, msg):
        # create mapx and mapy
        H, W = msg.height, msg.width
        # create new camera info
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(msg)
        # find optimal rectified pinhole camera
        with self.profiler('/cb/camera_info/get_optimal_new_camera_matrix'):
            rect_K, _ = cv2.getOptimalNewCameraMatrix(
                self.camera_model.K,
                self.camera_model.D,
                (W, H),
                self.rectify_alpha
            )
            # store new camera parameters
            self._camera_parameters = (rect_K[0, 0], rect_K[1, 1], rect_K[0, 2], rect_K[1, 2])
        # create rectification map
        with self.profiler('/cb/camera_info/init_undistort_rectify_map'):
            self._mapx, self._mapy = cv2.initUndistortRectifyMap(
                self.camera_model.K,
                self.camera_model.D,
                None,
                rect_K,
                (W, H),
                cv2.CV_32FC1
            )
        # once we got the camera info, we can stop the subscriber
        self.loginfo('Camera info message received. Unsubscribing from camera_info topic.')
        # noinspection PyBroadException
        try:
            self._cinfo_sub.shutdown()
        except BaseException:
            pass

    def _detect(self, detector_id, msg):
        # turn image message into grayscale image
        with self.profiler('/cb/image/decode'):
            img = self._jpeg.decode(msg.data, pixel_format=TJPF_GRAY)
        # run input image through the rectification map
        with self.profiler('/cb/image/rectify'):
            img = cv2.remap(img, self._mapx, self._mapy, cv2.INTER_NEAREST)
        # detect tags
        with self.profiler('/cb/image/detection'):
            tags = self._detectors[detector_id].detect(
                img, True, self._camera_parameters, self.tag_size)
        # pack detections into a message
        tags_msg = AprilTagDetectionArray()
        tags_msg.header.stamp = msg.header.stamp
        tags_msg.header.frame_id = msg.header.frame_id
        for tag in tags:
            # turn rotation matrix into quaternion
            q = _matrix_to_quaternion(tag.pose_R)
            p = tag.pose_t.T[0]
            # create single tag detection object
            detection = AprilTagDetection(
                transform=Transform(
                    translation=Vector3(
                        x=p[0],
                        y=p[1],
                        z=p[2]
                    ),
                    rotation=Quaternion(
                        x=q[0],
                        y=q[1],
                        z=q[2],
                        w=q[3]
                    )
                ),
                tag_id=tag.tag_id,
                tag_family=str(tag.tag_family),
                hamming=tag.hamming,
                decision_margin=tag.decision_margin,
                homography=tag.homography.flatten().astype(np.float32).tolist(),
                center=tag.center.tolist(),
                corners=tag.corners.flatten().tolist(),
                pose_error=tag.pose_err
            )
            # add detection to array
            tags_msg.detections.append(detection)
            # publish tf
            self._tf_bcaster.sendTransform(
                p.tolist(),
                q.tolist(),
                msg.header.stamp,
                'tag/{:s}'.format(str(tag.tag_id)),
                msg.header.frame_id
            )
        # publish detections
        self._tag_pub.publish(tags_msg)
        # update healthy frequency metadata
        self._tag_pub.set_healthy_freq(self._img_sub.get_frequency())
        self._img_pub.set_healthy_freq(self._img_sub.get_frequency())
        # render visualization (if needed)
        if self._img_pub.anybody_listening() and not self._renderer_busy:
            self._renderer_busy = True
            Thread(target=self._render_detections, args=(msg, img, tags)).start()

    def _img_cb(self, msg):
        # make sure we have received camera info
        if self._camera_parameters is None:
            return
        # make sure we have a rectification map available
        if self._mapx is None or self._mapy is None:
            return
        # make sure somebody wants this
        if (not self._img_pub.anybody_listening()) and (not self._tag_pub.anybody_listening()):
            return
        # make sure this is a good time to detect (always keep this as last check)
        if not self._detection_reminder.is_time(frequency=self.detection_freq.value):
            return
        # make sure we are still running
        if self.is_shutdown:
            return
        # ---
        # find the first available worker (if any)
        for i in range(self.ndetectors):
            if self._tasks[i] is None or self._tasks[i].done():
                # submit this image to the pool
                self._tasks[i] = self._workers.submit(self._detect, i, msg)
                break

    def _render_detections(self, msg, img, detections):
        with self.profiler('/publishs_image'):
            # get a color buffer from the BW image
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
            # draw each tag
            for detection in detections:
                for idx in range(len(detection.corners)):
                    cv2.line(
                        img,
                        tuple(detection.corners[idx - 1, :].astype(int)),
                        tuple(detection.corners[idx, :].astype(int)),
                        (0, 255, 0)
                    )
                # draw the tag ID
                cv2.putText(
                    img,
                    str(detection.tag_id),
                    org=(
                        detection.corners[0, 0].astype(int) + 10,
                        detection.corners[0, 1].astype(int) + 10
                    ),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.8,
                    color=(0, 0, 255)
                )
            # pack image into a message
            img_msg = CompressedImage()
            img_msg.header.stamp = msg.header.stamp
            img_msg.header.frame_id = msg.header.frame_id
            img_msg.format = 'jpeg'
            img_msg.data = self._jpeg.encode(img)
        # ---
        self._img_pub.publish(img_msg)
        self._renderer_busy = False
Example #4
0
class StatisticsWorker(Thread):
    def __init__(self):
        super().__init__()
        events_dir = STATS_CATEGORY_TO_DIR["event"]
        usage_dir = STATS_CATEGORY_TO_DIR["usage"]
        # ---
        self._shutdown = False
        self._providers: List[StatisticsProvider] = []
        self._outbox = StatisticsUploader()
        self._timer = DTReminder(frequency=FREQUENCY.EVERY_MINUTE)
        # register providers
        # - stats/events/ dir
        self._providers.extend(glob_event_providers(events_dir, "*.json"))
        # - stats/usage/ dirs
        self._providers.extend(
            glob_usage_providers(os.path.join(usage_dir, "disk_image"),
                                 "*.json", "disk_image"))
        self._providers.extend(
            glob_usage_providers(os.path.join(usage_dir, "init_sd_card"),
                                 "*.json", "init_sd_card"))
        # - docker/ps
        self._providers.append(DockerPSProvider(FREQUENCY.EVERY_MINUTE))
        # - docker/images
        self._providers.append(DockerImagesProvider(FREQUENCY.EVERY_MINUTE))
        # - uptime
        self._providers.append(UptimeProvider(FREQUENCY.EVERY_30_MINUTES))
        # - network/configuration
        self._providers.append(
            NetworkConfigurationProvider(FREQUENCY.EVERY_1_HOUR))
        # - ros/graph
        self._providers.append(ROSGraphProvider(FREQUENCY.EVERY_30_MINUTES))
        # - health
        self._providers.append(HealthProvider(FREQUENCY.EVERY_30_MINUTES))
        # - network/public_ip
        self._providers.append(PublicIPProvider(FREQUENCY.ONESHOT))
        # - geolocation
        self._providers.append(GeolocationProvider(FREQUENCY.ONESHOT))
        # - battery/history
        self._providers.append(BatteryHistoryProvider(FREQUENCY.EVERY_2_HOURS))
        # - battery/info
        self._providers.append(BatteryInfoProvider(FREQUENCY.ONESHOT))
        # - lsusb
        self._providers.append(LSUSBProvider(FREQUENCY.EVERY_2_HOURS))
        # - wireless/status
        self._providers.append(
            WirelessStatusProvider(FREQUENCY.EVERY_30_MINUTES))
        # - robot/hostname
        self._providers.append(RobotHostnameProvider())
        # - robot/type
        self._providers.append(RobotTypeProvider())
        # - robot/configuration
        self._providers.append(RobotConfigurationProvider())
        # launch outbox
        self._outbox.start()

    def is_shutdown(self) -> bool:
        return self._shutdown

    def shutdown(self):
        self._outbox.shutdown()
        self._shutdown = True

    def run(self):
        app = DTProcess.get_instance()
        # (try to) read the device ID
        try:
            device_id = get_device_id()
        except ValueError:
            # no device ID? nothing to do
            app.logger.warning(
                "Could not find the device's unique ID. Cannot share stats.")
            return
        # this process gets data from a bunch of stats providers and places them in the outbox
        while not app.is_shutdown():
            # don't do this constantly
            if not self._timer.is_time():
                time.sleep(1)
                continue
            # temporary structures
            to_remove = []
            # go through the providers
            for provider in self._providers:
                if provider.istime:
                    # get timestamp and payload
                    stamp, payload = provider.data
                    if stamp is None or payload is None:
                        continue
                    # format payload
                    payload = provider.format(payload)
                    # pack data into a point
                    point = StatisticsPoint(category=StatisticsCategory(
                        provider.category),
                                            key=provider.key,
                                            device=device_id,
                                            stamp=stamp,
                                            payload=payload,
                                            provider=provider)
                    # add point to outbox
                    self._outbox.add(point)
                    # remove exhausted providers
                    if provider.one_shot:
                        to_remove.append(provider)
            # remove providers
            self._providers = list(
                filter(lambda p: p not in to_remove, self._providers))
Example #5
0
class RectifierNode(DTROS):
    def __init__(self, node_name):
        super().__init__(node_name, node_type=NodeType.PERCEPTION)

        # parameters
        self.publish_freq = DTParam("~publish_freq", -1)
        self.alpha = DTParam("~alpha", 0.0)

        # utility objects
        self.jpeg = TurboJPEG()
        self.reminder = DTReminder(frequency=self.publish_freq.value)
        self.camera_model = None
        self.rect_camera_info = None
        self.mapx, self.mapy = None, None

        # subscribers
        self.sub_img = rospy.Subscriber("~image_in",
                                        CompressedImage,
                                        self.cb_image,
                                        queue_size=1,
                                        buff_size="10MB")
        self.sub_camera_info = rospy.Subscriber("~camera_info_in",
                                                CameraInfo,
                                                self.cb_camera_info,
                                                queue_size=1)

        # publishers
        self.pub_img = rospy.Publisher(
            "~image/compressed",
            CompressedImage,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help=
            "Rectified image (i.e., image with no distortion effects from the lens).",
        )
        self.pub_camera_info = rospy.Publisher(
            "~camera_info",
            CameraInfo,
            queue_size=1,
            dt_topic_type=TopicType.PERCEPTION,
            dt_healthy_freq=self.publish_freq.value,
            dt_help="Camera parameters for the (virtual) rectified camera.",
        )

    def cb_camera_info(self, msg):
        # unsubscribe from camera_info
        self.loginfo(
            "Camera info message received. Unsubscribing from camera_info topic."
        )
        # noinspection PyBroadException
        try:
            self.sub_camera_info.shutdown()
        except BaseException:
            pass
        # ---
        H, W = msg.height, msg.width
        # create new camera info
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(msg)
        # find optimal rectified pinhole camera
        with self.profiler("/cb/camera_info/get_optimal_new_camera_matrix"):
            rect_camera_K, _ = cv2.getOptimalNewCameraMatrix(
                self.camera_model.K, self.camera_model.D, (W, H),
                self.alpha.value)
        # create rectification map
        with self.profiler("/cb/camera_info/init_undistort_rectify_map"):
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(
                self.camera_model.K, self.camera_model.D, None, rect_camera_K,
                (W, H), cv2.CV_32FC1)
        # pack rectified camera info into a CameraInfo message
        self.rect_camera_info = CameraInfo(
            width=W,
            height=H,
            K=rect_camera_K.flatten().tolist(),
            R=np.eye(3).flatten().tolist(),
            P=np.zeros((3, 4)).flatten().tolist(),
        )

    def cb_image(self, msg):
        # make sure this matters to somebody
        if not self.pub_img.anybody_listening(
        ) and not self.pub_camera_info.anybody_listening():
            return
        # make sure we have a map to use
        if self.mapx is None or self.mapy is None:
            return
        # make sure the node is not switched off
        if not self.switch:
            return
        # make sure this is a good time to publish (always keep this as last check)
        if not self.reminder.is_time(frequency=self.publish_freq.value):
            return
        # turn 'compressed distorted image message' into 'raw distorted image'
        with self.profiler("/cb/image/decode"):
            dist_img = self.jpeg.decode(msg.data)
        # run input image through the lens map
        with self.profiler("/cb/image/rectify"):
            rect_img = cv2.remap(dist_img, self.mapx, self.mapy,
                                 cv2.INTER_NEAREST)
        # turn 'raw rectified image' into 'compressed rectified image message'
        with self.profiler("/cb/image/encode"):
            # rect_img_msg = self.bridge.cv2_to_compressed_imgmsg(rect_img)
            rect_img_msg = CompressedImage(format="jpeg",
                                           data=self.jpeg.encode(rect_img))
        # maintain original header
        rect_img_msg.header.stamp = msg.header.stamp
        rect_img_msg.header.frame_id = msg.header.frame_id
        self.rect_camera_info.header.stamp = msg.header.stamp
        self.rect_camera_info.header.frame_id = msg.header.frame_id
        # publish image
        self.pub_img.publish(rect_img_msg)
        # publish camera info
        self.pub_camera_info.publish(self.rect_camera_info)
Example #6
0
class DistributedTFNode(DTROS):

    PUBLISH_TF_STATIC_EVERY_SECS = 2

    def __init__(self):
        super(DistributedTFNode,
              self).__init__(node_name='distributed_tf_node',
                             node_type=NodeType.SWARM)
        # get static parameters
        self.robot_hostname = rospy.get_param('~veh')
        self.map_name = _sanitize_hostname(rospy.get_param('~map'))
        self.tag_id = rospy.get_param('~tag_id')
        self.min_distance_odom = rospy.get_param('~min_distance_odom')
        self.max_time_between_poses = rospy.get_param(
            '~max_time_between_poses')
        # define local reference frames' names
        self._tag_mount = f'{self.robot_hostname}/tag_mount'
        self._footprint_frame = f'{self.robot_hostname}/footprint'
        # create communication group
        self._group = DTCommunicationGroup("/autolab/tf", AutolabTransform)
        # create static tfs holder and access semaphore
        self._static_tfs = {}
        self._static_tfs_sem = threading.Semaphore(1)
        self._tf_buffer = tf2_ros.Buffer()
        self._tf_listener = tf2_ros.TransformListener(self._tf_buffer)
        # create publishers
        self._tf_pub = self._group.Publisher()
        # fetch/publish right away and then set timers
        if self.tag_id != '__NOTSET__':
            threading.Thread(target=self._fetch_tag_tf).start()
        self._pub_timer = rospy.Timer(
            rospy.Duration(self.PUBLISH_TF_STATIC_EVERY_SECS),
            self._publish_static_tfs)
        # setup subscribers for odometry
        self._odo_sub = rospy.Subscriber("~odometry_in",
                                         Odometry,
                                         self._cb_odometry,
                                         queue_size=1)
        self._pose_last = None
        self._reminder = DTReminder(frequency=10)

    def on_shutdown(self):
        if hasattr(self, '_group') and self._group is not None:
            self._group.shutdown()

    def _fetch_tag_tf(self, *_):
        origin = self._tag_mount
        target = self._footprint_frame
        # try to fetch the TF
        while not self.is_shutdown:
            try:
                self.loginfo(f"Looking for TF[{origin}] -> [{target}]...")
                transform = self._tf_buffer.lookup_transform(
                    origin, target, rospy.Time())
                tf = AutolabTransform(
                    origin=AutolabReferenceFrame(
                        time=transform.header.stamp,
                        type=AutolabReferenceFrame.TYPE_DUCKIEBOT_TAG,
                        # NOTE: fetch TF /tag_frame -> /footprint but publish tag/XYZ -> /footprint
                        name=f'tag/{self.tag_id}',
                        robot=self.robot_hostname),
                    target=AutolabReferenceFrame(
                        time=transform.header.stamp,
                        type=AutolabReferenceFrame.TYPE_DUCKIEBOT_FOOTPRINT,
                        name=target,
                        robot=self.robot_hostname),
                    is_fixed=True,
                    is_static=False,
                    transform=transform.transform)
                # add TF to the list of TFs to publish
                self._static_tfs_sem.acquire()
                try:
                    self._static_tfs[(origin, target)] = tf
                finally:
                    self._static_tfs_sem.release()
                self.loginfo(
                    f"Found TF[{origin}] -> [{target}]. Stopping TF listener thread."
                )
                return
            except (tf2_ros.LookupException, tf2_ros.ExtrapolationException):
                self.logwarn(
                    f"Could not find a static TF [{origin}] -> [{target}]")
            except tf2_ros.ConnectivityException:
                pass
            time.sleep(2)

    def _publish_static_tfs(self, *_):
        tfs = []
        self._static_tfs_sem.acquire()
        try:
            for transform in self._static_tfs.values():
                tfs.append(transform)
        finally:
            self._static_tfs_sem.release()
        # publish
        for tf in tfs:
            self._tf_pub.publish(tf, destination=self.map_name)

    def _cb_odometry(self, pose_now):
        if self._pose_last is None:
            self._pose_last = pose_now
            return

        if not self._reminder.is_time():
            return

        # Only add the transform if the new pose is sufficiently different
        t_now_to_world = np.array([
            pose_now.pose.pose.position.x, pose_now.pose.pose.position.y,
            pose_now.pose.pose.position.z
        ])

        t_last_to_world = np.array([
            self._pose_last.pose.pose.position.x,
            self._pose_last.pose.pose.position.y,
            self._pose_last.pose.pose.position.z
        ])

        o = pose_now.pose.pose.orientation
        q_now_to_world = np.array([o.x, o.y, o.z, o.w])

        o = self._pose_last.pose.pose.orientation
        q_last_to_world = np.array([o.x, o.y, o.z, o.w])

        q_world_to_last = q_last_to_world
        q_world_to_last[3] *= -1
        q_now_to_last = tr.quaternion_multiply(q_world_to_last, q_now_to_world)
        world_to_last = np.matrix(tr.quaternion_matrix(q_world_to_last))

        #now_to_last = np.matrix(tr.quaternion_matrix(q_now_to_last))
        # print(now_to_last)
        #now_to_last = now_to_last[0:3][:, 0:3]
        R_world_to_last = world_to_last[0:3][:, 0:3]
        # print(now_to_last)
        #t_now_to_last = np.array(np.dot(now_to_last, t_now_to_world - t_last_to_world))
        t_now_to_last = np.array(
            np.dot(R_world_to_last, t_now_to_world - t_last_to_world))

        t_now_to_last = t_now_to_last.flatten()

        # compute TF between `pose_now` and `pose_last`
        transform = Transform(translation=Vector3(x=t_now_to_last[0],
                                                  y=t_now_to_last[1],
                                                  z=t_now_to_last[2]),
                              rotation=Quaternion(x=q_now_to_last[0],
                                                  y=q_now_to_last[1],
                                                  z=q_now_to_last[2],
                                                  w=q_now_to_last[3]))

        # pack TF into an AutolabTransform message
        tf = AutolabTransform(
            origin=AutolabReferenceFrame(
                time=self._pose_last.header.stamp,
                type=AutolabReferenceFrame.TYPE_DUCKIEBOT_FOOTPRINT,
                name=self._footprint_frame,
                robot=self.robot_hostname),
            target=AutolabReferenceFrame(
                time=pose_now.header.stamp,
                type=AutolabReferenceFrame.TYPE_DUCKIEBOT_FOOTPRINT,
                name=self._footprint_frame,
                robot=self.robot_hostname),
            is_fixed=False,
            is_static=False,
            transform=transform)
        # publish
        self._tf_pub.publish(tf, destination=self.map_name)
        # update last pose
        self._pose_last = pose_now
Example #7
0
class AprilTagDetector(DTROS):
    def __init__(self):
        super(AprilTagDetector,
              self).__init__(node_name='apriltag_detector_node',
                             node_type=NodeType.PERCEPTION)
        # get static parameters
        self.family = rospy.get_param('~family', 'tag36h11')
        self.nthreads = rospy.get_param('~nthreads', 1)
        self.quad_decimate = rospy.get_param('~quad_decimate', 1.0)
        self.quad_sigma = rospy.get_param('~quad_sigma', 0.0)
        self.refine_edges = rospy.get_param('~refine_edges', 1)
        self.decode_sharpening = rospy.get_param('~decode_sharpening', 0.25)
        self.tag_size = rospy.get_param('~tag_size', 0.065)
        # dynamic parameter
        self.detection_freq = DTParam('~detection_freq',
                                      default=-1,
                                      param_type=ParamType.INT,
                                      min_value=-1,
                                      max_value=30)
        self._detection_reminder = DTReminder(
            frequency=self.detection_freq.value)
        # camera info
        self._camera_parameters = None
        self._camera_frame = None
        # create detector object
        self._at_detector = Detector(families=self.family,
                                     nthreads=self.nthreads,
                                     quad_decimate=self.quad_decimate,
                                     quad_sigma=self.quad_sigma,
                                     refine_edges=self.refine_edges,
                                     decode_sharpening=self.decode_sharpening)
        # create a CV bridge object
        self._bridge = CvBridge()
        # create subscribers
        self._img_sub = rospy.Subscriber('image_rect', Image, self._img_cb)
        self._cinfo_sub = rospy.Subscriber('camera_info', CameraInfo,
                                           self._cinfo_cb)
        # create publishers
        self._img_pub = rospy.Publisher('tag_detections_image/compressed',
                                        CompressedImage,
                                        queue_size=1,
                                        dt_topic_type=TopicType.VISUALIZATION)
        self._tag_pub = rospy.Publisher('tag_detections',
                                        AprilTagDetectionArray,
                                        queue_size=1,
                                        dt_topic_type=TopicType.PERCEPTION)
        self._img_pub_busy = False
        # create TF broadcaster
        self._tf_bcaster = tf.TransformBroadcaster()
        # spin forever
        rospy.spin()

    def _img_cb(self, data):
        if self._camera_parameters is None:
            return
        if not self._detection_reminder.is_time(
                frequency=self.detection_freq.value):
            return
        # ---
        # turn image message into grayscale image
        img = self._bridge.imgmsg_to_cv2(data, desired_encoding='mono8')
        # detect tags
        tags = self._at_detector.detect(img, True, self._camera_parameters,
                                        self.tag_size)
        # draw the detections on an image (if needed)
        if self._img_pub.anybody_listening() and not self._img_pub_busy:
            self._img_pub_busy = True
            Thread(target=self._publish_detections_image,
                   args=(img, tags)).start()
        # pack detections into a message
        msg = AprilTagDetectionArray()
        detection_time = rospy.Time.now()
        msg.header.stamp = detection_time
        msg.header.frame_id = self._camera_frame
        for tag in tags:
            # turn rotation matrix into quaternion
            q = _matrix_to_quaternion(tag.pose_R)
            p = tag.pose_t.T[0]
            # create single tag detection object
            detection = AprilTagDetection(
                transform=Transform(translation=Vector3(x=p[0], y=p[1],
                                                        z=p[2]),
                                    rotation=Quaternion(x=q[0],
                                                        y=q[1],
                                                        z=q[2],
                                                        w=q[3])),
                tag_id=tag.tag_id,
                tag_family=str(tag.tag_family),
                hamming=tag.hamming,
                decision_margin=tag.decision_margin,
                homography=tag.homography.flatten().astype(
                    np.float32).tolist(),
                center=tag.center.tolist(),
                corners=tag.corners.flatten().tolist(),
                pose_error=tag.pose_err)
            # add detection to array
            msg.detections.append(detection)
            # publish tf
            self._tf_bcaster.sendTransform(p.tolist(), q.tolist(),
                                           detection_time,
                                           '/tag{:s}'.format(str(tag.tag_id)),
                                           self._camera_frame)
        # publish detections
        self._tag_pub.publish(msg)
        # update healthy frequency metadata
        self._tag_pub.set_healthy_freq(self._img_sub.get_frequency())
        self._img_pub.set_healthy_freq(self._img_sub.get_frequency())

    def _publish_detections_image(self, img, tags):
        # get a color buffer from the BW image
        color_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        # draw each tag
        for tag in tags:
            for idx in range(len(tag.corners)):
                cv2.line(color_img, tuple(tag.corners[idx - 1, :].astype(int)),
                         tuple(tag.corners[idx, :].astype(int)), (0, 255, 0))
            # draw the tag ID
            cv2.putText(color_img,
                        str(tag.tag_id),
                        org=(tag.corners[0, 0].astype(int) + 10,
                             tag.corners[0, 1].astype(int) + 10),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.8,
                        color=(0, 0, 255))
        # pack image into a message
        img_msg = self._bridge.cv2_to_compressed_imgmsg(color_img)
        # ---
        self._img_pub.publish(img_msg)
        self._img_pub_busy = False

    def _cinfo_cb(self, data):
        K = np.array(data.K).reshape((3, 3))
        self._camera_parameters = (K[0, 0], K[1, 1], K[0, 2], K[1, 2])
        self._camera_frame = data.header.frame_id
        # once we got the camera info, we can stop the subscriber
        self._cinfo_sub.shutdown()