def get_pose(self, topic):
        image = self.images[topic]
        cvImage = self.bridge.imgmsg_to_cv(image, 'rgb8')
        img_raw = cv2util.cvmat2np(cvImage)

        camera_matrix = np.matrix(
            np.reshape(self.camera_infos[topic].K, (3, 3)))
        dist_coeffs = np.matrix(self.camera_infos[topic].D)
        return self.detector.calculate_object_pose(img_raw, camera_matrix,
                                                   dist_coeffs, False)
    def get_position(self):
        '''
        returns the Y value in subpixel accuracy for the center of the recognized checkerboard
        '''
        r = rospy.Rate(10)  # Hz
        while rospy.Time.now() - self.latest_image.header.stamp > rospy.Duration(1) and not rospy.is_shutdown():
            print 'no image received'
            print rospy.Time.now(), self.latest_image.header.stamp
            r.sleep()
        cvImage = self.bridge.imgmsg_to_cv(self.latest_image, "mono8")
        image_raw = cv2util.cvmat2np(cvImage)
        #image_processed = cv2.undistort(image_raw, self.cm, self.dc)

        #points=self.detector.detect_image_points(image_processed,True)
        (rmat, tvec, image_points) = self.detector.calculate_object_pose_ransac(image_raw, self.cm, self.dc, True)
        #print tvec[1]

        return tvec[0][0], tvec[1][0], tvec[2][0]
示例#3
0
    def _checkHandle(self, req):
        '''
        Service handle for "Capture" Service
        Grabs images, converts them and saves them to files

        @param req: service request
        @type  req: CaptureRequest() message

        @return: CaptureResponse() message
        '''
        self.image_received = [False] * self.numCams
        visible = []
        while not all(self.image_received):
            print "waiting for images"
            rospy.sleep(1)
        if all(self.image_received):
            print "=== ALL IMAGES RECEIVED ==="
        self.images_received = [False] * self.numCams
        for id in range(self.numCams):
            image = self.image[id]

            cvImage = self.bridge.imgmsg_to_cv(image, 'rgb8')
            img_raw = cv2util.cvmat2np(cvImage)
            try:
                self.detector.detect_image_points( img_raw, False, True)
                visible.append(True)
            except self.detector.NoPatternFoundException:
                rospy.logwarn("No cb found")
                visible.append(False)
            # grab image messages
        #print '%s checkerboards found --> return %s'%(sum(visible),all(visible))
        response = VisibleResponse()
        response.every = False
        response.master = False

        if all(visible):
            response.every = True
            response.master = True

        #elif visible[0]:
        #   response.master = True
        return response
示例#4
0
    def get_position(self):
        '''
        returns the Y value in subpixel accuracy for the center of the recognized checkerboard
        '''
        r = rospy.Rate(10)  # Hz
        while rospy.Time.now(
        ) - self.latest_image.header.stamp > rospy.Duration(
                1) and not rospy.is_shutdown():
            print 'no image received'
            print rospy.Time.now(), self.latest_image.header.stamp
            r.sleep()
        cvImage = self.bridge.imgmsg_to_cv(self.latest_image, "mono8")
        image_raw = cv2util.cvmat2np(cvImage)
        #image_processed = cv2.undistort(image_raw, self.cm, self.dc)

        #points=self.detector.detect_image_points(image_processed,True)
        (rmat,
         tvec) = self.detector.calculate_object_pose(image_raw, self.cm,
                                                     self.dc, True)
        #print tvec[1]

        return (rmat, tvec), self.latest_image.header.frame_id
    def _checkHandle(self, req):
        '''
        Service handle for "Capture" Service
        Grabs images, converts them and saves them to files

        @param req: service request
        @type  req: CaptureRequest() message

        @return: CaptureResponse() message
        '''
        self.image_received = [False] * self.numCams
        visible = []
        while not all(self.image_received):
            print "waiting for images"
            rospy.sleep(1)
        if all(self.image_received):
            print "=== ALL IMAGES RECEIVED ==="
        self.images_received = [False] * self.numCams
        for id in range(self.numCams):
            image = self.image[id]

            cvImage = self.bridge.imgmsg_to_cv(image, 'rgb8')
            img_raw = cv2util.cvmat2np(cvImage)
            visible.append(self.detector.detect_image_points(
                img_raw, False, True) is not None)
            # grab image messages
        #print '%s checkerboards found --> return %s'%(sum(visible),all(visible))
        response = VisibleResponse()
        response.every = False
        response.master = False

        if all(visible):
            response.every = True
            response.master = True

        elif visible[0]:
            response.master = True
        return response
示例#6
0
    def _capture_and_pub(self, sample_id, target_id, chain_id, pattern_size, square_size):
        '''
        Main capturing function. Gets a set of recent messages for all needed topics.
        Processes messages and creates RobotMeasuerment message which is published.
        
        @param sample_id: Sample identifier (e.g. sample01)
        @type  sample_id: string
        
        @param target_id: Name of checkerboard (e.g. cb_9x6)
        @type  target_id: string
        
        @param chain_id: Name of dh chain to which checkerboard is attached (e.g. arm_chain)
        @type  chain_id: string
        
        @param pattern_size: Size of checkerboard pattern as defined by opencv (e.g. (9, 6))
        @type  pattern_size: tuple(x, y)
        
        @param square_size: Size of checkerboard sqaures (im m)
        @type  square_size: float
        '''
        # capture measurements
        # --------------------
        self._left_received = False
        self._right_received = False
        self._kinect_rgb_received = False
        start_time = rospy.Time.now()
        while (not self._left_received or not self._right_received or not self._kinect_rgb_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._left_received: print "--> still waiting for sample from left"
                if not self._right_received: print "--> still waiting for sample from right"
                if not self._kinect_rgb_received: print "--> still waiting for sample from kinect"
                start_time = rospy.Time.now()
        latest_left = self._left
        latest_right = self._right
        latest_kinect_rgb = self._kinect_rgb
        
        self._torso_joint_msg_received = False
        self._arm_joint_msg_received = False
        start_time = rospy.Time.now()
        while (not self._torso_joint_msg_received or not self._arm_joint_msg_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._torso_joint_msg_received: print "--> still waiting for torso joint states"
                if not self._arm_joint_msg_received: print "--> still waiting for srm joint states"
                start_time = rospy.Time.now()
        latest_torso = self._torso_joint_msg
        latest_arm = self._arm_joint_msg
        
        # set up checkerboard and checkerboard detector
        # ---------------------------------------------
        checkerboard = Checkerboard(pattern_size, square_size)
        checkerboard_detector = CheckerboardDetector(checkerboard)
        
        # detect cb left
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_left["image_rect"], "mono8")
        image = cv2util.cvmat2np(cvImage)
        
        corners = checkerboard_detector.detect_image_points(image, is_grayscale=True)
        if corners != None:
            print "cb found: left"
            img_points_left = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_left.append(ImagePoint(x, y))
        else:
            # cb not found
            return False
        
        # create camera msg left
        # ----------------------
        cam_msg_left = CameraMeasurement()
        cam_msg_left.camera_id = "left"
        cam_msg_left.header.stamp = latest_left["camera_info"].header.stamp
        cam_msg_left.cam_info     = latest_left["camera_info"]
        cam_msg_left.image_points = img_points_left
        cam_msg_left.verbose      = False
        #cam_ms_leftg.image        = latest_left["image_color"]
        #cam_msg_left.image_rect   = latest_left["image_rect"]
        #cam_msg_left.features    = # Not implemented here
        
        # detect cb right
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_right["image_rect"], "mono8")
        image = cv2util.cvmat2np(cvImage)
        
        corners = checkerboard_detector.detect_image_points(image, is_grayscale=True)
        if corners != None:
            print "cb found: right"
            img_points_right = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_right.append(ImagePoint(x, y))
        else:
            # cb not found
            return False
           
        # create camera msg right
        # -----------------------
        cam_msg_right = CameraMeasurement()
        cam_msg_right.camera_id = "right"
        cam_msg_right.header.stamp = latest_right["camera_info"].header.stamp
        cam_msg_right.cam_info     = latest_right["camera_info"]
        cam_msg_right.image_points = img_points_right
        cam_msg_right.verbose      = False
        #cam_msg_right.image        = latest_right["image_color"]
        #cam_msg_right.image_rect   = latest_right["image_rect"]
        #cam_msg_right.features    = # Not implemented here

        # detect cb kinect_rgb
        # --------------------
        cvImage = self.bridge.imgmsg_to_cv(latest_kinect_rgb["image_color"], "mono8")
        image = cv2util.cvmat2np(cvImage)
        
        corners = checkerboard_detector.detect_image_points(image, is_grayscale=True)
        if corners != None:
            print "cb found: kinect_rgb"
            img_points_kinect_rgb = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_kinect_rgb.append(ImagePoint(x, y))
        else:
            # cb not found
            return False
           
        # create camera msg kinect_rgb
        # ----------------------------
        cam_msg_kinect_rgb = CameraMeasurement()
        cam_msg_kinect_rgb.camera_id = "kinect_rgb"
        cam_msg_kinect_rgb.header.stamp = latest_kinect_rgb["camera_info"].header.stamp
        cam_msg_kinect_rgb.cam_info     = latest_kinect_rgb["camera_info"]
        cam_msg_kinect_rgb.image_points = img_points_kinect_rgb
        cam_msg_kinect_rgb.verbose      = False
        #cam_ms_kinect_rgbg.image        = latest_kinect_rgb["image_color"]
        #cam_msg_kinect_rgb.image_rect   = latest_kinect_rgb["image_rect"]
        #cam_msg_kinect_rgb.features    = # Not implemented here
        
        # create torso_chain msg
        # ----------------------
        torso_chain_msg = ChainMeasurement()
        torso_chain_msg.header = latest_torso.header
        torso_chain_msg.chain_id = "torso_chain"
        torso_chain_msg.chain_state = latest_torso
        
        # create arm_chain msg
        # --------------------
        arm_chain_msg = ChainMeasurement()
        arm_chain_msg.header = latest_arm.header
        arm_chain_msg.chain_id = "arm_chain"
        arm_chain_msg.chain_state = latest_arm
        
        # DEBUG publish pic
        # -----------------
        self._image_pub_left.publish(latest_left["image_color"])
        self._image_pub_right.publish(latest_right["image_color"])
        self._image_pub_kinect_rgb.publish(latest_kinect_rgb["image_color"])
        
        # create robot measurement msg and publish
        # -----------------
        robot_msg = RobotMeasurement()
        robot_msg.sample_id = sample_id
        robot_msg.target_id = target_id
        robot_msg.chain_id = chain_id
        robot_msg.M_cam = [cam_msg_left, cam_msg_right, cam_msg_kinect_rgb]
        robot_msg.M_chain = [torso_chain_msg, arm_chain_msg]
        self._robot_measurement_pub.publish(robot_msg)
        
        return True
    def _capture_and_pub(self, sample_id, target_id, chain_id, pattern_size, square_size):
        '''
        Main capturing function. Gets a set of recent messages for all needed topics.
        Processes messages and creates RobotMeasuerment message which is published.

        @param sample_id: Sample identifier (e.g. sample01)
        @type  sample_id: string

        @param target_id: Name of checkerboard (e.g. cb_9x6)
        @type  target_id: string

        @param chain_id: Name of dh chain to which checkerboard is attached (e.g. arm_chain)
        @type  chain_id: string

        @param pattern_size: Size of checkerboard pattern as defined by opencv (e.g. (9, 6))
        @type  pattern_size: tuple(x, y)

        @param square_size: Size of checkerboard sqaures (im m)
        @type  square_size: float
        '''
        # capture measurements
        # --------------------
        self._left_received = False
        self._right_received = False
        self._kinect_rgb_received = False
        start_time = rospy.Time.now()
        while (not self._left_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._left_received:
                    print "--> still waiting for sample from left"
                start_time = rospy.Time.now()
	print "got sample"
        latest_left = self._left

        self._torso_joint_msg_received = False
        self._arm_joint_msg_received = False
        # set up checkerboard and checkerboard detector
        # ---------------------------------------------
        checkerboard = Checkerboard(pattern_size, square_size)
        checkerboard_detector = CheckerboardDetector(checkerboard)

        # detect cb left
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_left["image"], "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(
            image, is_grayscale=True)
        if corners is not None:
            print "cb found: left"
            img_points_left = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_left.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg left
        # ----------------------
        cam_msg_left = CameraMeasurement()
        cam_msg_left.camera_id = "left"
        cam_msg_left.header.stamp = latest_left["image"].header.stamp
        #cam_msg_left.cam_info = latest_left["camera_info"]
        cam_msg_left.image_points = img_points_left
        cam_msg_left.verbose = False
        #cam_ms_leftg.image        = latest_left["image_color"]
        #cam_msg_left.image_rect   = latest_left["image_rect"]
        #cam_msg_left.features    = # Not implemented here

        # create chain msgs
        # ----------------------
        transformations = []
        print self.transformations
        for (key, links) in self.transformations.iteritems():
            chain_msg = ChainMeasurement()
            chain_msg.chain_id = key
            while not rospy.is_shutdown():
                try:
                    (
                        trans, rot) = self.listener.lookupTransform(links[0], links[1],
                                                                    rospy.Time(0))
                    break
                except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
		    print "error looking up transformation from ", links[0], " to ", links[1]
                    rospy.sleep(0.5)
            chain_msg.translation = trans
            chain_msg.rotation = rot
            transformations.append(chain_msg)
        # DEBUG publish pic
        # -----------------
        self._image_pub_left.publish(latest_left["image"])

        # create robot measurement msg and publish
        # -----------------
        robot_msg = RobotMeasurement()
        robot_msg.sample_id = sample_id
        robot_msg.target_id = target_id
        robot_msg.chain_id = chain_id
        robot_msg.M_cam = [cam_msg_left]
        robot_msg.M_chain = transformations
        self._robot_measurement_pub.publish(robot_msg)

        return True
示例#8
0
    def _capture_and_pub(self, sample_id, target_id, chain_id, pattern_size,
                         square_size):
        '''
        Main capturing function. Gets a set of recent messages for all needed topics.
        Processes messages and creates RobotMeasuerment message which is published.

        @param sample_id: Sample identifier (e.g. sample01)
        @type  sample_id: string

        @param target_id: Name of checkerboard (e.g. cb_9x6)
        @type  target_id: string

        @param chain_id: Name of dh chain to which checkerboard is attached (e.g. arm_chain)
        @type  chain_id: string

        @param pattern_size: Size of checkerboard pattern as defined by opencv (e.g. (9, 6))
        @type  pattern_size: tuple(x, y)

        @param square_size: Size of checkerboard sqaures (im m)
        @type  square_size: float
        '''
        # capture measurements

        # --------------------
        # create robot measurement msg and publish
        # -----------------
        robot_msg = RobotMeasurement()
        robot_msg.sample_id = sample_id
        robot_msg.target_id = target_id
        robot_msg.chain_id = chain_id

        # --------------------
        # receive images
        # -----------------
        for v in self._images_received:
            self._images_received[v] = False
        for v in self.transformations_received:
            self.transformations_received[v] = False
        print self._images_received

        start_time = rospy.Time.now()
        while (not all(self._images_received.values())
               or not all(self.transformations.values())):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still
            # missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                for name, v in self._images_received.iteritems():
                    if not v:
                        print "--> still waiting for sample from %s" % name
                for name, v in self._transformations_received.iteritems():
                    if not v:
                        print "--> still waiting for sample from %s" % name
            start_time = rospy.Time.now()
        print "got sample"
        #latest_left = self._left

        # set up checkerboard and checkerboard detector
        # ---------------------------------------------
        checkerboard = Checkerboard(pattern_size, square_size)
        checkerboard_detector = CheckerboardDetector(checkerboard)

        # detect cb
        # --------------
        for name, image in self._images.iteritems():
            image = image["image"]
            #print image.header
            cvImage = self.bridge.imgmsg_to_cv(image, "mono8")
            imagecv = cv2util.cvmat2np(cvImage)

            try:
                corners = checkerboard_detector.detect_image_points(
                    imagecv, is_grayscale=True)
            except:
                # cb not found
                rospy.logwarn("No calibration pattern found for: '%s'" % name)
                return False
            else:
                print "cb found: %s" % name
                img_points = []
                for (x, y) in corners.reshape(-1, 2):
                    img_points.append(ImagePoint(x, y))

            # create camera msg left
            # ----------------------
            cam_msg = CameraMeasurement()
            cam_msg.camera_id = name
            #print "crash before"
            cam_msg.header.stamp = image.header.stamp
            #print "crash after"
            #cam_msg.cam_info = image["camera_info"]
            cam_msg.image_points = img_points
            cam_msg.verbose = False
            robot_msg.M_cam.append(cam_msg)
            cam_msg.image = image

# print cam_msg.camera_id
#print cam_msg.header
# cam_msg.image_rect   = latest_left["image_rect"]
# cam_msg.features    = # Not implemented here

#----------------------
#DEBUG publish pic
#-----------------
#self._image_pub_left.publish(latest_left["image"])

#----------------------
# Fill robot_msg
#----------------------
        robot_msg.M_chain = self.transformations.values()

        self._robot_measurement_pub.publish(robot_msg)

        return True
示例#9
0
    def _capture_and_pub(self, sample_id, target_id, chain_id, pattern_size,
                         square_size):
        '''
        Main capturing function. Gets a set of recent messages for all needed topics.
        Processes messages and creates RobotMeasuerment message which is published.
        
        @param sample_id: Sample identifier (e.g. sample01)
        @type  sample_id: string
        
        @param target_id: Name of checkerboard (e.g. cb_9x6)
        @type  target_id: string
        
        @param chain_id: Name of dh chain to which checkerboard is attached (e.g. arm_chain)
        @type  chain_id: string
        
        @param pattern_size: Size of checkerboard pattern as defined by opencv (e.g. (9, 6))
        @type  pattern_size: tuple(x, y)
        
        @param square_size: Size of checkerboard sqaures (im m)
        @type  square_size: float
        '''
        # capture measurements
        # --------------------
        self._left_received = False
        self._right_received = False
        self._kinect_rgb_received = False
        start_time = rospy.Time.now()
        while (not self._left_received or not self._right_received
               or not self._kinect_rgb_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._left_received:
                    print "--> still waiting for sample from left"
                if not self._right_received:
                    print "--> still waiting for sample from right"
                if not self._kinect_rgb_received:
                    print "--> still waiting for sample from kinect"
                start_time = rospy.Time.now()
        latest_left = self._left
        latest_right = self._right
        latest_kinect_rgb = self._kinect_rgb

        self._torso_joint_msg_received = False
        self._arm_joint_msg_received = False
        start_time = rospy.Time.now()
        while (not self._torso_joint_msg_received
               or not self._arm_joint_msg_received):
            rospy.sleep(0.005)
            # print warning every 2 seconds if one of the messages is still missing...
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                if not self._torso_joint_msg_received:
                    print "--> still waiting for torso joint states"
                if not self._arm_joint_msg_received:
                    print "--> still waiting for srm joint states"
                start_time = rospy.Time.now()
        latest_torso = self._torso_joint_msg
        latest_arm = self._arm_joint_msg

        # set up checkerboard and checkerboard detector
        # ---------------------------------------------
        checkerboard = Checkerboard(pattern_size, square_size)
        checkerboard_detector = CheckerboardDetector(checkerboard)

        # detect cb left
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_left["image_rect"], "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(image,
                                                            is_grayscale=True)
        if corners != None:
            print "cb found: left"
            img_points_left = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_left.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg left
        # ----------------------
        cam_msg_left = CameraMeasurement()
        cam_msg_left.camera_id = "left"
        cam_msg_left.header.stamp = latest_left["camera_info"].header.stamp
        cam_msg_left.cam_info = latest_left["camera_info"]
        cam_msg_left.image_points = img_points_left
        cam_msg_left.verbose = False
        #cam_ms_leftg.image        = latest_left["image_color"]
        #cam_msg_left.image_rect   = latest_left["image_rect"]
        #cam_msg_left.features    = # Not implemented here

        # detect cb right
        # --------------
        cvImage = self.bridge.imgmsg_to_cv(latest_right["image_rect"], "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(image,
                                                            is_grayscale=True)
        if corners != None:
            print "cb found: right"
            img_points_right = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_right.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg right
        # -----------------------
        cam_msg_right = CameraMeasurement()
        cam_msg_right.camera_id = "right"
        cam_msg_right.header.stamp = latest_right["camera_info"].header.stamp
        cam_msg_right.cam_info = latest_right["camera_info"]
        cam_msg_right.image_points = img_points_right
        cam_msg_right.verbose = False
        #cam_msg_right.image        = latest_right["image_color"]
        #cam_msg_right.image_rect   = latest_right["image_rect"]
        #cam_msg_right.features    = # Not implemented here

        # detect cb kinect_rgb
        # --------------------
        cvImage = self.bridge.imgmsg_to_cv(latest_kinect_rgb["image_color"],
                                           "mono8")
        image = cv2util.cvmat2np(cvImage)

        corners = checkerboard_detector.detect_image_points(image,
                                                            is_grayscale=True)
        if corners != None:
            print "cb found: kinect_rgb"
            img_points_kinect_rgb = []
            for (x, y) in corners.reshape(-1, 2):
                img_points_kinect_rgb.append(ImagePoint(x, y))
        else:
            # cb not found
            return False

        # create camera msg kinect_rgb
        # ----------------------------
        cam_msg_kinect_rgb = CameraMeasurement()
        cam_msg_kinect_rgb.camera_id = "kinect_rgb"
        cam_msg_kinect_rgb.header.stamp = latest_kinect_rgb[
            "camera_info"].header.stamp
        cam_msg_kinect_rgb.cam_info = latest_kinect_rgb["camera_info"]
        cam_msg_kinect_rgb.image_points = img_points_kinect_rgb
        cam_msg_kinect_rgb.verbose = False
        #cam_ms_kinect_rgbg.image        = latest_kinect_rgb["image_color"]
        #cam_msg_kinect_rgb.image_rect   = latest_kinect_rgb["image_rect"]
        #cam_msg_kinect_rgb.features    = # Not implemented here

        # create torso_chain msg
        # ----------------------
        torso_chain_msg = ChainMeasurement()
        torso_chain_msg.header = latest_torso.header
        torso_chain_msg.chain_id = "torso_chain"
        torso_chain_msg.chain_state = latest_torso

        # create arm_chain msg
        # --------------------
        arm_chain_msg = ChainMeasurement()
        arm_chain_msg.header = latest_arm.header
        arm_chain_msg.chain_id = "arm_chain"
        arm_chain_msg.chain_state = latest_arm

        # DEBUG publish pic
        # -----------------
        self._image_pub_left.publish(latest_left["image_color"])
        self._image_pub_right.publish(latest_right["image_color"])
        self._image_pub_kinect_rgb.publish(latest_kinect_rgb["image_color"])

        # create robot measurement msg and publish
        # -----------------
        robot_msg = RobotMeasurement()
        robot_msg.sample_id = sample_id
        robot_msg.target_id = target_id
        robot_msg.chain_id = chain_id
        robot_msg.M_cam = [cam_msg_left, cam_msg_right, cam_msg_kinect_rgb]
        robot_msg.M_chain = [torso_chain_msg, arm_chain_msg]
        self._robot_measurement_pub.publish(robot_msg)

        return True