def step(self):
        stamp = super().step()
        if not stamp:
            return
        # Publish camera data
        if self._image_publisher.get_subscription_count(
        ) > 0 or self._always_publish:
            self._wb_device.enable(self._timestep)

            # Image data
            msg = Image()
            msg.header.stamp = stamp
            msg.header.frame_id = self._frame_id
            msg.height = self._wb_device.getHeight()
            msg.width = self._wb_device.getWidth()
            msg.is_bigendian = False
            msg.step = self._wb_device.getWidth() * 4

            if self._wb_device.getRangeImage() is None:
                return
            image_array = np.array(self._wb_device.getRangeImage(),
                                   dtype="float32")

            # We pass `data` directly to we avoid using `data` setter.
            # Otherwise ROS2 converts data to `array.array` which slows down the simulation as it copies memory internally.
            # Both, `bytearray` and `array.array`, implement Python buffer protocol, so we should not see unpredictable
            # behavior.
            # deepcode ignore W0212: Avoid conversion from `bytearray` to `array.array`.
            msg._data = image_array.tobytes()
            msg.encoding = '32FC1'

            self._image_publisher.publish(msg)

        else:
            self._wb_device.disable()
Beispiel #2
0
    def step(self):
        stamp = super().step()
        if not stamp:
            return
        # Publish camera data
        if self._image_publisher.get_subscription_count(
        ) > 0 or self._always_publish:
            self._wb_device.enable(self._timestep)
            image = self._wb_device.getImage()

            if image is None:
                return

            # Image data
            msg = Image()
            msg.header.stamp = stamp
            msg.header.frame_id = self._frame_id
            msg.height = self._wb_device.getHeight()
            msg.width = self._wb_device.getWidth()
            msg.is_bigendian = False
            msg.step = self._wb_device.getWidth() * 4
            # We pass `data` directly to we avoid using `data` setter.
            # Otherwise ROS2 converts data to `array.array` which slows down the simulation as it copies memory internally.
            # Both, `bytearray` and `array.array`, implement Python buffer protocol, so we should not see unpredictable
            # behavior.
            # deepcode ignore W0212: Avoid conversion from `bytearray` to `array.array`.
            msg._data = image
            msg.encoding = 'bgra8'
            self._image_publisher.publish(msg)

            self.__message_info.header.stamp = Time(
                seconds=self._node.robot.getTime()).to_msg()
            self._camera_info_publisher.publish(self.__message_info)

            if self._wb_device.hasRecognition(
            ) and self._recognition_publisher.get_subscription_count() > 0:
                self._wb_device.recognitionEnable(self._timestep)
                objects = self._wb_device.getRecognitionObjects()

                if objects is None:
                    return

                # Recognition data
                msg = RecognitionObjects()
                msg.header.stamp = stamp
                msg.header.frame_id = self._frame_id
                for obj in objects:
                    msg_obj = RecognitionObject()
                    msg_obj.position = obj.get_position()
                    msg_obj.position_on_image = obj.get_position_on_image()
                    msg_obj.size_on_image = obj.get_size_on_image()
                    msg_obj.number_of_colors = obj.get_number_of_colors()
                    msg_obj.colors = obj.get_colors()
                    msg_obj.model = str(obj.get_model())
                    msg.objects.append(msg_obj)
                self._recognition_publisher.publish(msg)
            else:
                self._wb_device.recognitionDisable()
        else:
            self._wb_device.disable()
Beispiel #3
0
    def imageCallback(self, msg):
        '''!
            Recebe a imagem, seg,emta e publica
        '''

        img = np.array(msg.data, dtype=np.uint8)
        img = np.array(np.split(img, msg.height))
        img = np.array(np.split(img, msg.width, axis=1))

        img = np.rot90(img)

        img = tf.keras.preprocessing.image.array_to_img(img)
        image_array = tf.keras.preprocessing.image.img_to_array(img)

        result = self.bodypix_model.predict_single(image_array)
        mask = result.get_mask(threshold=0.75)
        colored_mask = result.get_colored_part_mask(mask)

        mask = mask.numpy().astype(np.uint8)
        colored_mask = colored_mask.astype(np.uint8)

        maskMsg = Image()
        maskMsg._data = mask.flatten().tolist()
        maskMsg.height = mask.shape[0]
        maskMsg.width = mask.shape[1]
        maskMsg.encoding = "8UC1"
        maskMsg.is_bigendian = 0
        maskMsg.step = maskMsg.width

        colMsg = Image()
        colMsg._data = colored_mask.flatten().tolist()
        colMsg.height = colored_mask.shape[0]
        colMsg.width = colored_mask.shape[1]
        colMsg.encoding = "8UC3"
        colMsg.is_bigendian = 0
        colMsg.step = colMsg.width * 3

        maskMsg.header = msg.header
        colMsg.header = msg.header

        self.pubMask.publish(maskMsg)
        self.colMask.publish(colMsg)
    def timerCallback(self):
        '''!
            Callback do timer que recebe a publica a imagem
        '''
        frameId = self.get_parameter(
            "frame_id").get_parameter_value().string_value

        tempo1 = cv.getTickCount()

        if (self.ret):
            self.ret = False
            imgMsg = Image()

            imgMsg._data = self.flattened

            imgMsg.height = self.height
            imgMsg.width = self.width

            imgMsg.encoding = "8UC3"
            imgMsg.is_bigendian = 0
            imgMsg.step = imgMsg.width * 3

            time = self.get_clock().now().to_msg()
            imgMsg.header.stamp = time

            imgMsg.header.frame_id = frameId

            self.pubImg.publish(imgMsg)

            #compImgMsg = bridge.cv2_to_compressed_imgmsg(frame)
            #compImgMsg.header.stamp = time
            #compImgMsg.header.frame_id = frameId
            #self.pubImgCompress.publish(compImgMsg)

            self.publishInfo(imgMsg.header)

        tempo2 = cv.getTickCount()
Beispiel #5
0
    def step(self):
        stamp = super().step()
        if not stamp:
            return
        # Publish camera data
        if self._image_publisher.get_subscription_count(
        ) > 0 or self._always_publish:
            self._wb_device.enable(self._timestep)
            image = self._wb_device.getImage()

            if image is None:
                return

            # Image data
            msg = Image()
            msg.header.stamp = stamp
            msg.header.frame_id = self._frame_id
            msg.height = self._wb_device.getHeight()
            msg.width = self._wb_device.getWidth()
            msg.is_bigendian = False
            msg.step = self._wb_device.getWidth() * 4
            # We pass `data` directly to we avoid using `data` setter.
            # Otherwise ROS2 converts data to `array.array` which slows down the simulation as it copies memory internally.
            # Both, `bytearray` and `array.array`, implement Python buffer protocol, so we should not see unpredictable
            # behavior.
            # deepcode ignore W0212: Avoid conversion from `bytearray` to `array.array`.
            msg._data = image
            msg.encoding = 'bgra8'
            self._image_publisher.publish(msg)

            self.__message_info.header.stamp = Time(
                seconds=self._node.robot.getTime()).to_msg()
            self._camera_info_publisher.publish(self.__message_info)

            if self._wb_device.hasRecognition(
            ) and (self._recognition_publisher.get_subscription_count() > 0 or
                   self._recognition_webots_publisher.get_subscription_count()
                   > 0):
                self._wb_device.recognitionEnable(self._timestep)
                objects = self._wb_device.getRecognitionObjects()

                if objects is None:
                    return

                # Recognition data
                reco_msg = Detection2DArray()
                reco_msg_webots = WbCameraRecognitionObjects()
                reco_msg.header.stamp = stamp
                reco_msg_webots.header.stamp = stamp
                reco_msg.header.frame_id = self._frame_id
                reco_msg_webots.header.frame_id = self._frame_id
                for obj in objects:
                    # Getting Object Info
                    position = Point()
                    orientation = Quaternion()
                    position.x = obj.get_position()[0]
                    position.y = obj.get_position()[1]
                    position.z = obj.get_position()[2]
                    axangle = obj.get_orientation()
                    quat = axangle2quat(axangle[:-1], axangle[-1])
                    orientation.w = quat[0]
                    orientation.x = quat[1]
                    orientation.y = quat[2]
                    orientation.z = quat[3]
                    obj_model = obj.get_model().decode('UTF-8')
                    obj_center = [
                        float(i) for i in obj.get_position_on_image()
                    ]
                    obj_size = [float(i) for i in obj.get_size_on_image()]
                    obj_id = obj.get_id()
                    obj_colors = obj.get_colors()
                    # Object Info -> Detection2D
                    reco_obj = Detection2D()
                    hyp = ObjectHypothesisWithPose()
                    hyp.id = obj_model
                    hyp.pose.pose.position = position
                    hyp.pose.pose.orientation = orientation
                    reco_obj.results.append(hyp)
                    reco_obj.bbox.center.x = obj_center[0]
                    reco_obj.bbox.center.y = obj_center[1]
                    reco_obj.bbox.size_x = obj_size[0]
                    reco_obj.bbox.size_y = obj_size[1]
                    reco_msg.detections.append(reco_obj)

                    # Object Info -> WbCameraRecognitionObject
                    reco_webots_obj = WbCameraRecognitionObject()
                    reco_webots_obj.id = obj_id
                    reco_webots_obj.model = obj_model
                    reco_webots_obj.pose.pose.position = position
                    reco_webots_obj.pose.pose.orientation = orientation
                    reco_webots_obj.bbox.center.x = obj_center[0]
                    reco_webots_obj.bbox.center.y = obj_center[1]
                    reco_webots_obj.bbox.size_x = obj_size[0]
                    reco_webots_obj.bbox.size_y = obj_size[1]
                    for i in range(0, obj.get_number_of_colors()):
                        color = ColorRGBA()
                        color.r = obj_colors[3 * i]
                        color.g = obj_colors[3 * i + 1]
                        color.b = obj_colors[3 * i + 2]
                        reco_webots_obj.colors.append(color)
                    reco_msg_webots.objects.append(reco_webots_obj)
                self._recognition_webots_publisher.publish(reco_msg_webots)
                self._recognition_publisher.publish(reco_msg)
            else:
                self._wb_device.recognitionDisable()
        else:
            self._wb_device.disable()