예제 #1
0
def get_environment_objects(planning_scene_map_file):
    object_list = ObjectList()

    scene_file_path = get_package_path('mdr_wrs_tidy_up', 'config',
                                       planning_scene_map_file)
    scene_data = load_yaml_file(scene_file_path)

    frame_id = scene_data['frame_id']
    for obj_data in scene_data['objects']:
        obj = Object()
        obj.name = obj_data['name']
        obj.pose.header.frame_id = frame_id

        euler_orientation = obj_data['orientation']
        quaternion = tf.transformations.quaternion_from_euler(
            *euler_orientation)

        obj.pose.pose = Pose(Point(*obj_data['position']),
                             Quaternion(*quaternion))
        obj.dimensions.header.frame_id = frame_id
        obj.dimensions.vector = Vector3(*obj_data['dimensions'])
        obj.bounding_box.center = Point(*obj_data['position'])
        obj.bounding_box.dimensions = Vector3(*obj_data['dimensions'])
        object_list.objects.append(obj)
    return object_list
예제 #2
0
def get_obj_msg_from_detection(cloud_msg, bounding_box, category, confidence,
                               frame_id):
    '''Creates an mas_perception_msgs.msg.Object message from the given
    information about an object detected in an image generated from the
    point cloud.

    Keyword arguments:
    cloud_msg: sensor_msgs.msg.PointCloud2
    bounding_box: BoundingBox2D
    category: str
    confidence: float -- detection confidence
    frame_id: str -- frame of the resulting object

    '''
    cropped_cloud = crop_organized_cloud_msg(cloud_msg, bounding_box)
    obj_coords = crop_cloud_to_xyz(
        cropped_cloud,
        BoundingBox2D(box_geometry=(0, 0, cropped_cloud.width,
                                    cropped_cloud.height)))
    obj_coords = np.reshape(obj_coords, (-1, 3))
    min_coord = np.nanmin(obj_coords, axis=0)
    max_coord = np.nanmax(obj_coords, axis=0)
    mean_coord = np.nanmean(obj_coords, axis=0)

    # fill object geometry info
    detected_obj = Object()
    detected_obj.bounding_box.center.x = mean_coord[0]
    detected_obj.bounding_box.center.y = mean_coord[1]
    detected_obj.bounding_box.center.z = mean_coord[2]
    detected_obj.bounding_box.dimensions.x = max_coord[0] - min_coord[0]
    detected_obj.bounding_box.dimensions.y = max_coord[1] - min_coord[1]
    detected_obj.bounding_box.dimensions.z = max_coord[2] - min_coord[2]
    box_msg = detected_obj.bounding_box

    detected_obj.name = category
    detected_obj.category = category
    detected_obj.probability = confidence

    object_view = ObjectView()
    object_view.point_cloud = cropped_cloud
    object_view.image = cloud_msg_to_image_msg(cropped_cloud)
    detected_obj.views.append(object_view)

    detected_obj.pose.header.frame_id = frame_id
    detected_obj.pose.header.stamp = rospy.Time.now()
    detected_obj.pose.pose.position = box_msg.center
    detected_obj.pose.pose.position.x = mean_coord[0]
    detected_obj.pose.pose.orientation.w = 1.0

    detected_obj.roi.x_offset = int(bounding_box.x)
    detected_obj.roi.y_offset = int(bounding_box.y)
    detected_obj.roi.width = int(bounding_box.width)
    detected_obj.roi.height = int(bounding_box.height)

    return detected_obj
    def image_recognition_cb(self, img_msg):

        if img_msg.images:
            result_list = ObjectList()
            cavities_labels = []
            rospy.loginfo("[{}] images received: {} ".format(
                len(img_msg.images), self.model_name))
            now = datetime.now()
            concat_images = np.zeros((96, 96))
            font = cv2.FONT_HERSHEY_SIMPLEX
            count = 0

            for img in img_msg.images:

                print("count ", count)

                try:
                    result = Object()
                    time = now.strftime("%H:%M:%S")
                    cv_image = self.cvbridge.imgmsg_to_cv2(img, "bgr8")
                    label = self.model.predict_one_label(cv_image)
                    cv2.imwrite(
                        "/tmp/mobileNet_" + str(label) + str(time) + ".jpg",
                        cv_image)
                    cv2.putText(cv_image, str(label), (0, 20), font, 1,
                                (200, 255, 155), 2, cv2.LINE_AA)
                    '''                    
                    if count == 0:
                        cv2.resize(cv_image, (96,96))
                        concat_images = cv_image
                    else:
                        cv2.resize(cv_image, (96,96))
                        concat_images = np.hstack((concat_images, cv_image))
                    '''
                    #self.publish_debug_image(cv_image)

                    count += 1
                    print("predicted label ", label)
                    # cavities_labels.append(label)
                    result.name = label
                    cavities_labels.append(result)
                # Publish result_list
                #print ("cavities_labels ", cavities_labels)

                except CvBridgeError as e:
                    print("inside except cv bridge")
                    print(e)
                    return

            cv2.imwrite("/tmp/classifier_image.jpg", concat_images)
            #self.publish_debug_img(concat_images)
            #self.pub_classifier_debug_image.publish(concat_images)
            result_list.objects = cavities_labels
            self.pub_result.publish(result_list)
    def _insert_object_in_kb(self, name, category, size):
        obj_msg = Object()
        obj_msg.name = name
        obj_msg.category = category

        gripper_pose = self.gripper_controller.get_gripper_pose('base_link')
        obj_msg.pose = gripper_pose
        obj_msg.bounding_box.center.x = gripper_pose.pose.position.x
        obj_msg.bounding_box.center.y = gripper_pose.pose.position.y
        obj_msg.bounding_box.center.z = gripper_pose.pose.position.z
        obj_msg.bounding_box.dimensions.x = size
        obj_msg.bounding_box.dimensions.y = size

        self.kb_interface.insert_obj_instance(name, obj_msg)
예제 #5
0
    def execute(self, userdata):
        rospy.loginfo("object_detection_task_list_observer_state executed")

        object_list = []

        for task in userdata.task_list:
            if task.type == "source":
                for object_names in task.object_names:
                    obj = Object()
                    obj.name = object_names
                    obj.pose.header.frame_id = "/base_link"
                    obj.pose.pose.position.x = 0.61
                    object_list.append(obj)

        set_object_list(object_list)

        return "success"
예제 #6
0
    def convert_to_ros_msg(obj):
        '''Converts an 'action_execution.geometry.object.Object3d' object to an
        'mas_perception_msgs.msg.Object' message.

        Keyword argumens:
        obj -- an 'action_execution.geometry.object.Object3d' object

        '''
        obj_msg = Object()

        obj_msg.name = obj.id
        obj_msg.category = obj.type
        obj_msg.pose = PoseStampedConverter.convert_to_ros_msg(obj.pose)
        obj_msg.bounding_box = BoundingBoxConverter.convert_to_ros_msg(
            obj.bbox)

        return obj_msg
    def _get_action_result(cloud_msg, plane_list, bounding_boxes, classes,
                           confidences):
        """
        TODO(minhnh) adapt to multiple planes
        :type cloud_msg: PointCloud2
        :type plane_list: PlaneList
        :type bounding_boxes: list
        :type classes: list
        :type confidences: list
        :rtype: DetectSceneResult
        """
        result = DetectSceneResult()
        plane = plane_list.planes[0]
        for index, box in enumerate(bounding_boxes):
            # TODO(minhnh): BoundingBox code returns bad positions and dimensions
            # check if object in on plane
            cropped_cloud = crop_organized_cloud_msg(cloud_msg, box)

            obj_coords = crop_cloud_to_xyz(
                cropped_cloud,
                BoundingBox2D(box_geometry=(0, 0, cropped_cloud.width,
                                            cropped_cloud.height)))
            obj_coords = np.reshape(obj_coords, (-1, 3))
            min_coord = np.nanmin(obj_coords, axis=0)
            max_coord = np.nanmax(obj_coords, axis=0)
            mean_coord = np.nanmean(obj_coords, axis=0)

            # fill object geometry info
            detected_obj = Object()
            detected_obj.bounding_box.center.x = mean_coord[0]
            detected_obj.bounding_box.center.y = mean_coord[1]
            detected_obj.bounding_box.center.z = mean_coord[2]
            detected_obj.bounding_box.dimensions.x = max_coord[0] - min_coord[0]
            detected_obj.bounding_box.dimensions.y = max_coord[1] - min_coord[1]
            detected_obj.bounding_box.dimensions.z = max_coord[2] - min_coord[2]
            box_msg = detected_obj.bounding_box

            if not SceneDetectionActionServer.is_object_on_plane(
                    plane, box_msg):
                rospy.logdebug(
                    "skipping object '%s', position (%.3f, %.3f, %.3f)" %
                    (classes[index], box_msg.center.x, box_msg.center.y,
                     box_msg.center.z))
                continue

            rospy.loginfo(
                "adding object '%s', position (%.3f, %.3f, %.3f), dimensions (%.3f, %.3f, %.3f)"
                % (classes[index], box_msg.center.x, box_msg.center.y,
                   box_msg.center.z, box_msg.dimensions.x,
                   box_msg.dimensions.y, box_msg.dimensions.z))

            detected_obj.name = classes[index]
            detected_obj.category = classes[index]
            detected_obj.probability = confidences[index]
            detected_obj.pointcloud = cropped_cloud
            detected_obj.rgb_image = cloud_msg_to_image_msg(cropped_cloud)
            detected_obj.pose.header = plane.header
            detected_obj.pose.pose.position = box_msg.center
            detected_obj.pose.pose.position.x = mean_coord[0]
            detected_obj.pose.pose.orientation.w = 1.0

            plane.object_list.objects.append(detected_obj)
        result.planes.append(plane)
        return result