コード例 #1
0
    def _send_request(self):
        request = RecognizedObjectArray()

        # Dummy obj 1
        rec_object_1 = RecognizedObject()
        rec_object_1.type.key = "box"
        rec_object_1.pose.pose.pose.position.x = 0.1
        rec_object_1.pose.pose.pose.position.y = 0.7
        rec_object_1.pose.pose.pose.position.z = 0
        rec_object_1.pose.pose.pose.orientation.x = 0.707
        rec_object_1.pose.pose.pose.orientation.y = -0.707
        rec_object_1.pose.pose.pose.orientation.z = 0
        rec_object_1.pose.pose.pose.orientation.w = 0

        # Dummy obj 2
        rec_object_2 = RecognizedObject()
        rec_object_2.type.key = "box"
        rec_object_2.pose.pose.pose.position.x = 0.3
        rec_object_2.pose.pose.pose.position.y = 1
        rec_object_2.pose.pose.pose.position.z = 0
        rec_object_2.pose.pose.pose.orientation.x = 0.707
        rec_object_2.pose.pose.pose.orientation.y = 0
        rec_object_2.pose.pose.pose.orientation.z = 0
        rec_object_2.pose.pose.pose.orientation.w = 0.707
        request.objects = [rec_object_1, rec_object_2]
        self._request_sim(request)
コード例 #2
0
    def ar_marker_callback(self, msg):
        objects_msg = RecognizedObjectArray()

        objects_msg.header = copy.deepcopy(msg.header)
        for marker in msg.markers:
            if marker.id < len(
                    defined_objects) and marker.pose.pose.position.z > 0.3:
                object = RecognizedObject()

                object.pose = self.get_pose_from_pose(marker.pose)
                #object.pose.pose.pose.orientation.w = 1.0
                object.pose.header = copy.deepcopy(marker.header)
                #object.pose.header.frame_id = "ar_marker_" + str(marker.id)
                object.type.key = defined_objects[marker.id]["name"]
                objects_msg.objects.append(object)
        self.ar_object_publisher.publish(objects_msg)
コード例 #3
0
ファイル: measure.py プロジェクト: trinhtrannp/hcrdocker
    def execute(self, userdata):

        rospy.loginfo("DETECT: Wait for spotting object..")
        recognized_object_array = RecognizedObjectArray()

        while 1:
            try:
                recognized_object_array = rospy.wait_for_message('/recognized_object_array',
                                                                 RecognizedObjectArray, timeout=2)
                recognized_object =  str(recognized_object_array.objects)

            except rospy.ROSException as e:
                if 'timeout exceeded' in e.message:
                    rospy.loginfo('no object detected! ')
                    continue  # no object detected, continue with trying
                else:
                    raise e

            index_pos = 0
            index_pos = recognized_object.find("position", index_pos)
            if index_pos > 0:
                rospy.loginfo("DETECT: Spot an Object")
                break;


        rospy.loginfo("DETECT: SUCCEEDED")
        return 'succeeded'
コード例 #4
0
 def __init__(self):
     self.publisher = rospy.Publisher("recognized_objects",
                                      RecognizedObjectArray,
                                      queue_size=1,
                                      latch=True)
     self.regex_to_type = {
         "^utl5_small_": "utl5_small",
         "^utl5_medium_": "utl5_medium",
         "^utl5_large_": "utl5_large",
         "^duplo_2x4x1_": "duplo_2x4x1"
     }
     self.recognized_object_array = RecognizedObjectArray()
     self.update_recognized_objects_array()
コード例 #5
0
    def run(self):
        '''
        Main run function that constructs the recognised object array message from the matched object list and noise
        params.
        :return:
        '''

        rate = rospy.Rate(30)

        while not rospy.is_shutdown():
            try:
                self.matched_object_lock.acquire()

                object_array = RecognizedObjectArray()
                object_array.header.stamp = rospy.Time.now()
                object_array.header.frame_id = self.camera_link_name

                for object in self.matched_object_list:
                    object_msg = RecognizedObject()
                    object_pose = PoseWithCovarianceStamped()
                    object_pose.header.stamp = rospy.Time.now()
                    object_pose.header.frame_id = self.camera_link_name
                    object_pose.pose.pose = posemath.toMsg(self.cam_trans * posemath.fromMsg(object[0]) * \
                                            self.generate_noise_trans())
                    object_msg.pose = object_pose
                    object_msg.type.key = object[1]
                    object_array.objects.append(object_msg)

                    # publish tf
                    trans = TransformStamped()
                    trans.header.stamp = rospy.Time.now()
                    trans.header.frame_id = self.camera_link_name
                    trans.child_frame_id = object[1]
                    trans.transform.translation = object_pose.pose.pose.position
                    trans.transform.rotation = object_pose.pose.pose.orientation
                    self.tf_pub.sendTransform(trans)

                self.object_publisher.publish(object_array)

            except rospy.exceptions.ROSInterruptException:
                break

            finally:
                self.matched_object_lock.release()
                rate.sleep()
コード例 #6
0
    def execute(self, goal):
        """Action server callback.

        Callback gets called in a separate thread whenever
        a new goal is received.

        Args:
            goal (ObjectRecognitionGoal): action goal
        """
        if goal.use_roi:
            rospy.logwarn('ROI parameter is not supported.')

        try:
            image = self._camera.image(desired_encoding='rgb8', timeout=1.0)
            # TODO: enable tracking mode (reset=False)
            objects = self._predictor.predict(image, reset=True)

            rospy.logdebug('Recognized: %s',
                           ','.join([label for label, _ in objects]))

            # publish debug info
            self._publish_markers(objects)
            self._publish_overlay(image, objects)

            header = Header(stamp=rospy.Time.now(), frame_id=self._frame_id)
            roa = RecognizedObjectArray(header=header)

            for i, (label, pose) in enumerate(objects):
                ro = RecognizedObject(header=header)
                ro.type.db = self._predictor.dataset_name
                ro.type.key = label
                ro.confidence = self._predictor.get_score(i)
                ro.pose.header = header
                ro.pose.pose.pose = _pose_to_msg(pose)
                roa.objects.append(ro)

            self._array_publisher.publish(roa)

            result = ObjectRecognitionResult(recognized_objects=roa)
            self._server.set_succeeded(result)
        except Exception as ex:
            rospy.logerr('Recognition failed: %s', ex)
            rospy.logdebug(traceback.format_exc())
            self._server.set_aborted(text=str(ex))
コード例 #7
0
    def execute_callback(self, goal):

        now = rospy.Time.now()

        r_array = RecognizedObjectArray()
        r_array.header.stamp = now
        r_array.header.frame_id = self.frame_id

        if goal.use_roi:
            raise Warning("use_roi in ObjectRecognitionRequest ignored")

        for o in self.object_names:
            try:
                object_tf = self.listener.lookupTransform(
                    "/world", o, rospy.Time(0))
                print o
                print object_tf
                print ""

                p = PoseWithCovarianceStamped()
                p.pose.pose = rox_msg.transform2pose_msg(object_tf)
                p.header.stamp = now
                p.header.frame_id = self.frame_id

                r = RecognizedObject()
                r.header.stamp = now
                r.header.frame_id = self.frame_id
                r.type.key = o
                r.confidence = 1
                r.pose = p

                r_array.objects.append(r)

            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                continue

        result = ObjectRecognitionResult()
        result.recognized_objects = r_array

        self.server.set_succeeded(result=result)
コード例 #8
0
    def execute(self, goal):
        rospy.loginfo("Executing the server -- Goal Detected")
        result = RecognizeResult()
        possibleObjects = RecognizedObject()
        object_goal = goal.objects.pop()
        rospy.loginfo("Goal Received:: " + str(goal))
        rospy.loginfo("One of the Goals are:: " + str(object_goal))
        possibleObjects.type = object_goal

        rospy.loginfo("PossibleObject:: " + str(possibleObjects))

        objectArray = RecognizedObjectArray()
        objectArray.objects.append(possibleObjects)

        result.recognized_objects = objectArray

        rospy.loginfo("Results that are sending to Object Detection SM :: " +
                      str(result))
        rospy.sleep(goal.refine_pose_time)

        self.server.set_succeeded(result)
コード例 #9
0
    def calback_gazebo_state(self, msg):
        '''
        Callback function for getting the model states from gazebo. Function gets all the model states then matches the
        names to the object list.
        :param msg: gazebo model states
        :return:
        '''

        try:
            self.matched_object_lock.acquire()
            self.matched_object_list = []
            object_array = RecognizedObjectArray()
            object_array.header.stamp = rospy.Time.now()
            object_array.header.frame_id = self.camera_link_name
            for object_name in self.object_names:
                try:
                    index = msg.name.index(object_name)
                    self.matched_object_list.append(
                        [msg.pose[index], msg.name[index]])
                except ValueError:
                    continue

        finally:
            self.matched_object_lock.release()
コード例 #10
0
ファイル: measure.py プロジェクト: trinhtrannp/hcrdocker
    def record_frame_list(self, frame_list):

        recognized_object_array = RecognizedObjectArray()

        start_time = rospy.get_time()
        while (rospy.get_time() - start_time) < self.time_to_measure and len(frame_list) < self.max_objects:
            try:
                recognized_object_array = rospy.wait_for_message('/recognized_object_array',
                                                                 RecognizedObjectArray, timeout=2)
                recognized_object =  str(recognized_object_array.objects)

            except rospy.ROSException as e:
                if 'timeout exceeded' in e.message:
                    rospy.loginfo('no object detected! ')
                    continue  # no object detected, continue with trying
                else:
                    raise e

            index = 0
            index_pos = 1
            while index_pos > 0:

                index_pos = recognized_object.find("position", index)
                index_ori = recognized_object.find("orientation", index)
                index_cov = recognized_object.find("covariance", index)
                index = index + index_pos

                # rospy.loginfo('index: \n{}' .format(index))
                # rospy.loginfo('index_pos: \n{}' .format(index_pos))

                object_pose = PoseStamped()
                object_pose.header.frame_id = "camera_rgb_optical_frame"

                if index_pos > 0:
                    obj_pos = str(recognized_object[index_pos:index_ori])
                    obj_ori = str(recognized_object[index_ori:index_cov])

                    match = re.findall(r'-?\d+\.?\d*', str(obj_pos))
                    object_pose.pose.position.x = float(match[0])
                    object_pose.pose.position.y = float(match[1])
                    object_pose.pose.position.z = float(match[2])

                    match = re.findall(r'-?\d+\.?\d*', str(obj_ori))
                    object_pose.pose.orientation.x = float(match[0])
                    object_pose.pose.orientation.y = float(match[1])
                    object_pose.pose.orientation.z = float(match[2])
                    object_pose.pose.orientation.w = float(match[3])

                    frame = Frame()
                    frame.set_pose(object_pose)
                    # rospy.loginfo('obj: \n{}' .format(obj))

                    # append only if object not exist
                    for frame_tar in frame_list:
                        if compare_objects(frame_tar, frame, self.max_distance_between_frames):
                            break
                    else:
                        frame_list.append(frame)
                        rospy.loginfo("append object to list")

        return frame_list
コード例 #11
0
    # drop location pose in odom frame
    sm.userdata.drop_target_pose = PoseStamped()
    sm.userdata.drop_target_pose.header.frame_id = 'odom'

    # move_base goal pose in odom frame
    sm.userdata.move_base_odom = PoseStamped()

    # move arm trajectory
    sm.userdata.arm_plan = RobotTrajectory()

    # joints' retracted position in joint space (arm joints only)
    sm.userdata.retract_pos = [0.0, 0.0, 0.80, -1.80, 0.0, 0.0]

    # array of RecognizedObject
    sm.userdata.object_array = RecognizedObjectArray()
    # define a object_recognition goal
    sm.userdata.ork_goal = ObjectRecognitionGoal()
    sm.userdata.ork_goal.use_roi = False
    sm.userdata.ork_frame = ''

    sm.userdata.picked_objects = ['apple', 'big coffee', 'monster']
    sm.userdata.target_object = ''

    with sm:

        def ork_result_cb(userdata, status, result):

            if status == GoalStatus.SUCCEEDED:
                if len(result.recognized_objects.objects) > 0:
                    userdata.ork_object_array = result.recognized_objects.objects
コード例 #12
0
    def execute_overhead_callback(self, goal):

        now = rospy.Time.now()

        if self.ros_overhead_cam_info is None:
            raise Exception("Camera Info not received")

        self.last_ros_image_stamp = self.ros_image_stamp

        try:
            self.ros_overhead_trigger.wait_for_service(timeout=0.1)
            self.ros_overhead_trigger()
        except:
            pass

        wait_count = 0
        while self.ros_overhead_cam_info.ros_image is None or self.ros_image_stamp == self.last_ros_image_stamp:
            if wait_count > 250:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1

        print "Past timeout"
        #stored data in local function variable, so changed all references off of self.ros_image
        img = self.ros_overhead_cam_info.ros_image

        if img is None:
            raise Exception("Camera image data not received")
        print "image received"
        r_array = RecognizedObjectArray()
        r_array.header.stamp = now
        r_array.header.frame_id = self.frame_id

        if goal.use_roi:
            raise Warning("use_roi in ObjectRecognitionRequest ignored")

        search_objects = dict()
        with self.payloads_lock:
            for _, p in self.payloads.items():
                search_objects[p.name] = p.markers
            for _, l in self.link_markers.items():
                search_objects[l.header.frame_id] = l.markers

                #TODO: handle multiple aruco dictionaries, currently only use one
        print "past payload lock"
        aruco_dicts = set()
        for m in search_objects.itervalues():
            for m2 in m:
                aruco_dicts.add(m2.marker.dictionary)
        print len(aruco_dicts)
        aruco_dicts.add("DICT_ARUCO_ORIGINAL")
        assert len(
            aruco_dicts
        ) == 1, "Currently all tags must be from the same dictionary"

        if not hasattr(cv2.aruco, next(iter(aruco_dicts))):
            raise ValueError("Invalid aruco-dict value")
        aruco_dict_id = getattr(cv2.aruco, next(iter(aruco_dicts)))
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_id)

        c_pose = self.listener.lookupTransform(
            "/world", self.ros_overhead_cam_info.ros_data.header.frame_id,
            rospy.Time(0))
        print "c_pose"
        print c_pose.p
        print c_pose.R
        parameters = cv2.aruco.DetectorParameters_create()
        parameters.cornerRefinementWinSize = 32

        parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=parameters)
        print ids
        for object_name, payload_markers in search_objects.items():

            tag_object_poses = dict()

            for m in payload_markers:
                board = get_aruco_gridboard(m.marker, aruco_dict)
                #board = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 16)

                retval, rvec, tvec = cv2.aruco.estimatePoseBoard(
                    corners, ids, board, self.ros_overhead_cam_info.camMatrix,
                    self.ros_overhead_cam_info.distCoeffs)
                print retval
                print rvec
                print tvec
                if (retval > 0):
                    if (m.name == "leeward_mid_panel_marker_1"
                            or m.name == "leeward_tip_panel_marker_1"):
                        print "Found tag: " + m.name

                        try:
                            #o_pose = self.listener.lookupTransform(m.name, object_name, rospy.Time(0))
                            o_pose = rox_msg.msg2transform(m.pose).inv()
                            print object_name
                            print o_pose
                            print ""
                            print "o_pose"
                            print o_pose.p
                            print o_pose.R
                            Ra, b = cv2.Rodrigues(rvec)
                            a_pose = rox.Transform(Ra, tvec)
                            print "a_pose"
                            print a_pose.p
                            print a_pose.R
                            tag_object_poses[m.name] = c_pose * (a_pose *
                                                                 o_pose)

                        except (tf.LookupException, tf.ConnectivityException,
                                tf.ExtrapolationException):
                            continue

                    #TODO: merge tag data if multiple tags detected
            if len(tag_object_poses) > 0:
                p = PoseWithCovarianceStamped()
                p.pose.pose = rox_msg.transform2pose_msg(
                    tag_object_poses.itervalues().next())
                p.header.stamp = now
                p.header.frame_id = self.frame_id

                r = RecognizedObject()
                r.header.stamp = now
                r.header.frame_id = self.frame_id
                r.type.key = object_name
                r.confidence = 1
                r.pose = p

                r_array.objects.append(r)
                """for o in self.object_names:
            try:
                (trans,rot) = self.listener.lookupTransform("/world", o, rospy.Time(0))
                print o
                print trans
                print rot
                print ""

                p=PoseWithCovarianceStamped()
                p.pose.pose=Pose(Point(trans[0], trans[1], trans[2]), Quaternion(rot[0], rot[1], rot[2], rot[3]))
                p.header.stamp=now
                p.header.frame_id=self.frame_id

                r=RecognizedObject()
                r.header.stamp=now
                r.header.frame_id=self.frame_id
                r.type.key=o
                r.confidence=1
                r.pose=p

                r_array.objects.append(r)

                except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue"""
        print "Succeeded in finding tags"
        result = ObjectRecognitionResult()
        result.recognized_objects = r_array

        self.overhead_server.set_succeeded(result=result)