def test_calculate_object_pose(self):
        # create Checkerboard instance and read image
        c = Checkerboard((9, 6), 0.03)
        image = cv2.imread(CHECKERBOARD_IMAGE, 0)
        
        # create CheckerboardDetector instance and detect object pose 
        # (only dummy matrices, does not correspond to real camera used)
        cd = CheckerboardDetector(c)
        camera_matrix   = np.matrix([1200, 0, 600, 0, 1200, 600, 0, 0, 1], dtype=np.float32).reshape((3,3))
        dist_coeffs     = np.matrix([0, 0, 0, 0, 0], dtype=np.float32).reshape((1,5))
        (rvec, tvec) = cd.calculate_object_pose(image, camera_matrix, dist_coeffs, True)
        
        rvec_expected = np.matrix([[0.99905897,  0.035207,   -0.02533079],
                                   [-0.0208742,   0.90224111,  0.43072642],
                                   [ 0.03801906, -0.42979234,  0.90212699]])
        tvec_expected = np.matrix([[-0.03181351], [-0.13593217], [ 0.7021291]])

        self.assertTrue(np.allclose(rvec_expected, rvec))
        self.assertTrue(np.allclose(tvec_expected, tvec))
    def test_calculate_object_pose(self):
        # create Checkerboard instance and read image
        c = Checkerboard((9, 6), 0.03)
        image = cv2.imread(CHECKERBOARD_IMAGE, 0)

        # create CheckerboardDetector instance and detect object pose
        # (only dummy matrices, does not correspond to real camera used)
        cd = CheckerboardDetector(c)
        camera_matrix = np.matrix([1200, 0, 600, 0, 1200, 600, 0, 0, 1],
                                  dtype=np.float32).reshape((3, 3))
        dist_coeffs = np.matrix([0, 0, 0, 0, 0], dtype=np.float32).reshape(
            (1, 5))
        (rvec, tvec) = cd.calculate_object_pose(image, camera_matrix,
                                                dist_coeffs, True)

        rvec_expected = np.matrix([[0.99905897, 0.035207, -0.02533079],
                                   [-0.0208742, 0.90224111, 0.43072642],
                                   [0.03801906, -0.42979234, 0.90212699]])
        tvec_expected = np.matrix([[-0.03181351], [-0.13593217], [0.7021291]])

        self.assertTrue(np.allclose(rvec_expected, rvec))
        self.assertTrue(np.allclose(tvec_expected, tvec))
예제 #3
0
class Detection():
    def __init__(self):
        '''
        Configures the calibration node
        Reads configuration from parameter server or uses default values
        '''

        # get parameter from parameter server or set defaults
        self.pattern_size = rospy.get_param('~pattern_size', "9x6")
        self.square_size = rospy.get_param('~square_size', 0.03)

        self.alpha = rospy.get_param('~alpha', 0.0)
        self.verbose = rospy.get_param('~verbose', True)

        self.save_result = rospy.get_param('~save_result', False)

        # split pattern_size string into tuple, e.g '9x6' -> tuple(9,6)
        self.pattern_size = tuple((int(self.pattern_size.split("x")[0]),
                                   int(self.pattern_size.split("x")[1])))

        self.camera_info = CameraInfo()
        self.camera_info_received = False
        self.latest_image = Image()
        self.bridge = CvBridge()

        # set up Checkerboard and CheckerboardDetector
        self.board = Checkerboard(self.pattern_size, self.square_size)
        self.detector = CheckerboardDetector(self.board)

        self.sss = simple_script_server()

        topic_name = '/stereo/left/'
        # subscribe to /cam3d/rgb/camera_info for camera_matrix and distortion coefficients
        rospy.Subscriber(topic_name + 'camera_info', CameraInfo,
                         self.__camera_info_callback__)
        # subscribe to /cam3d/rgb/image_raw for image data
        rospy.Subscriber(topic_name + 'image_color', Image,
                         self.__image_raw_callback__)

        # wait until camera informations are recieved.
        start_time = rospy.Time.now()
        while not (self.camera_info_received or rospy.is_shutdown()):
            rospy.sleep(0.05)
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                # print warning every 2 seconds if the message is stil missing
                print "--> still waiting for /cam3d/rgb/camera_info"
                start_time = rospy.Time.now()

        # convert camera matrix an distortion coefficients and store them
        camera_matrix = self.camera_info.K
        cm = np.asarray(camera_matrix)
        self.cm = np.reshape(cm, (3, 3))
        dist_coeffs = self.camera_info.D
        self.dc = np.asarray(dist_coeffs)
        self.frame = self.camera_info.header.frame_id

        # initialize torso for movement
    def __camera_info_callback__(self, data):
        '''
        executed on new message in /cam3d/rgb/camera_info
        stores received data
        '''
        self.camera_info = data
        self.camera_info_received = True

    def __image_raw_callback__(self, data):
        '''
        executed on new message in /cam3d/rgb/image_raw
        stores newest image
        '''
        self.latest_image = data

    def get_position(self):
        '''
        returns the Y value in subpixel accuracy for the center of the recognized checkerboard
        '''
        r = rospy.Rate(10)  # Hz
        while rospy.Time.now(
        ) - self.latest_image.header.stamp > rospy.Duration(
                1) and not rospy.is_shutdown():
            print 'no image received'
            print rospy.Time.now(), self.latest_image.header.stamp
            r.sleep()
        cvImage = self.bridge.imgmsg_to_cv(self.latest_image, "mono8")
        image_raw = cv2util.cvmat2np(cvImage)
        #image_processed = cv2.undistort(image_raw, self.cm, self.dc)

        #points=self.detector.detect_image_points(image_processed,True)
        (rmat,
         tvec) = self.detector.calculate_object_pose(image_raw, self.cm,
                                                     self.dc, True)
        #print tvec[1]

        return (rmat, tvec), self.latest_image.header.frame_id
예제 #4
0
class Detection():

    def __init__(self):
        '''
        Configures the calibration node
        Reads configuration from parameter server or uses default values
        '''

        # get parameter from parameter server or set defaults
        self.pattern_size = rospy.get_param('~pattern_size', "9x6")
        self.square_size = rospy.get_param('~square_size', 0.03)

        self.alpha = rospy.get_param('~alpha', 0.0)
        self.verbose = rospy.get_param('~verbose', True)

        self.save_result = rospy.get_param('~save_result', False)

        # split pattern_size string into tuple, e.g '9x6' -> tuple(9,6)
        self.pattern_size = tuple((int(self.pattern_size.split(
            "x")[0]), int(self.pattern_size.split("x")[1])))

        self.camera_info = CameraInfo()
        self.camera_info_received = False
        self.latest_image = Image()
        self.bridge = CvBridge()

        # set up Checkerboard and CheckerboardDetector
        self.board = Checkerboard(self.pattern_size, self.square_size)
        self.detector = CheckerboardDetector(self.board)

        self.sss = simple_script_server()

	topic_name = '/stereo/left/'
        # subscribe to /cam3d/rgb/camera_info for camera_matrix and distortion coefficients
	rospy.Subscriber(topic_name + 'camera_info', CameraInfo,
                         self.__camera_info_callback__)
        # subscribe to /cam3d/rgb/image_raw for image data
        rospy.Subscriber(
            topic_name + 'image_color', Image, self.__image_raw_callback__)

        # wait until camera informations are recieved.
        start_time = rospy.Time.now()
        while not (self.camera_info_received or rospy.is_shutdown()):
            rospy.sleep(0.05)
            if start_time + rospy.Duration(2.0) < rospy.Time.now():
                # print warning every 2 seconds if the message is stil missing
                print "--> still waiting for /cam3d/rgb/camera_info"
                start_time = rospy.Time.now()

        # convert camera matrix an distortion coefficients and store them
        camera_matrix = self.camera_info.K
        cm = np.asarray(camera_matrix)
        self.cm = np.reshape(cm, (3, 3))
        dist_coeffs = self.camera_info.D
        self.dc = np.asarray(dist_coeffs)
        self.frame = self.camera_info.header.frame_id

        # initialize torso for movement
    def __camera_info_callback__(self, data):
        '''
        executed on new message in /cam3d/rgb/camera_info
        stores received data
        '''
        self.camera_info = data
        self.camera_info_received = True

    def __image_raw_callback__(self, data):
        '''
        executed on new message in /cam3d/rgb/image_raw
        stores newest image
        '''
        self.latest_image = data

    def get_position(self):
        '''
        returns the Y value in subpixel accuracy for the center of the recognized checkerboard
        '''
        r = rospy.Rate(10)  # Hz
        while rospy.Time.now() - self.latest_image.header.stamp > rospy.Duration(1) and not rospy.is_shutdown():
            print 'no image received'
            print rospy.Time.now(), self.latest_image.header.stamp
            r.sleep()
        cvImage = self.bridge.imgmsg_to_cv(self.latest_image, "mono8")
        image_raw = cv2util.cvmat2np(cvImage)
        #image_processed = cv2.undistort(image_raw, self.cm, self.dc)

        #points=self.detector.detect_image_points(image_processed,True)
        (rmat, tvec) = self.detector.calculate_object_pose(image_raw, self.cm, self.dc, True)
        #print tvec[1]

        return (rmat, tvec), self.latest_image.header.frame_id
class Cb_marker_publish():
    def __init__(self):
        rospy.init_node(NODE)
        pattern = rospy.get_param('~calibration_pattern')
        self.cameras = rospy.get_param('~cameras')
        pattern_string = pattern["pattern_size"].split("x")
        pattern_size = (int(pattern_string[0]), int(pattern_string[1]))
        cb = Checkerboard(pattern_size, pattern["square_size"])

        self.detector = CheckerboardDetector(cb)

        self.bridge = CvBridge()

        self.pub = rospy.Publisher("cb_detections", MarkerArray)
        self.topics = [self.cameras["reference"]["topic"]]
        for camera in self.cameras["further"]:
            self.topics.append(camera["topic"])

        self.info_topics = [
            t.replace("image_raw", "camera_info") for t in self.topics
        ]
        self.info_topics = [
            t.replace("image_color", "camera_info") for t in self.info_topics
        ]
        print self.topics
        print self.info_topics

        self.images = dict.fromkeys(self.topics)
        self.camera_infos = dict.fromkeys(self.topics)
        self.colors = [ColorRGBA() for i in range(len(self.topics))]
        hue_values = np.linspace(0, 1, len(self.topics) + 1)
        for c, hue, topic in zip(self.colors, hue_values, self.topics):
            c.a = 1
            (c.r, c.g, c.b) = hsv_to_rgb(hue, 1, 1)
            print "Listening to: %s - Color RGB: (%d, %d, %d)" % (
                topic, c.r * 255, c.g * 255, c.b * 255)

        self.points = []
        for point in cb.get_pattern_points():
            p = Point()
            p.x = point[0]
            p.y = point[1]
            p.z = point[2]
            self.points.append(p)

        for topic, info_topic in zip(self.topics, self.info_topics):
            rospy.Subscriber(topic, Image, self.callback_image, topic)
            rospy.Subscriber(info_topic, CameraInfo, self.callback_info, topic)
        rospy.sleep(3)

    def callback_image(self, data, topic):
        self.images[topic] = data

    def callback_info(self, data, topic):
        self.camera_infos[topic] = data

    def get_pose(self, topic):
        image = self.images[topic]
        cvImage = self.bridge.imgmsg_to_cv(image, 'rgb8')
        img_raw = cv2util.cvmat2np(cvImage)

        camera_matrix = np.matrix(
            np.reshape(self.camera_infos[topic].K, (3, 3)))
        dist_coeffs = np.matrix(self.camera_infos[topic].D)
        return self.detector.calculate_object_pose(img_raw, camera_matrix,
                                                   dist_coeffs, False)

    def run(self):
        rate = rospy.Rate(10)
        br = tf.TransformBroadcaster()
        print "Everything up and running"

        while not rospy.is_shutdown():
            # put code here
            msgs = MarkerArray()
            for topic in self.topics:
                # pick color as defined before
                c = self.colors[self.topics.index(topic)]
                if self.images[topic] is None:
                    continue

                try:
                    # compute pose of detected pattern

                    p = self.get_pose(topic)
                except CheckerboardDetector.NoPatternFoundException:
                    rospy.logwarn("No pattern found on topic: '%s'" % topic)
                    continue
                else:
                    rmat = np.array(p[0])
                    rmat = np.append(rmat, [[0, 0, 0]], 0)
                    rmat = np.append(rmat, [[0], [0], [0], [1]], 1)
                    q = tf.transformations.quaternion_from_matrix(rmat)
                    msg = Marker()
                    msg.type = Marker.SPHERE_LIST
                    msg.header.frame_id = self.camera_infos[
                        topic].header.frame_id
                    msg.header.stamp = rospy.Time()
                    msg.id = self.topics.index(topic)

                    msg.pose.position.x = p[1][0]
                    msg.pose.position.y = p[1][1]
                    msg.pose.position.z = p[1][2]
                    msg.pose.orientation.x = q[0]
                    msg.pose.orientation.y = q[1]
                    msg.pose.orientation.z = q[2]
                    msg.pose.orientation.w = q[3]
                    msg.scale.x = 0.01
                    msg.scale.y = 0.01
                    msg.scale.z = 0.01
                    msg.color = c
                    msg.points = self.points
                    msgs.markers.append(msg)
                    br.sendTransform(p[1], q, rospy.Time.now(), "cb",
                                     msg.header.frame_id)
            self.pub.publish(msgs)

            rate.sleep()