def __init__(self, Tfm):
     Thread.__init__(self)
     self.trans, self.rot = conv.hmat_to_trans_rot(np.linalg.inv(Tfm))
     self.transform_broadcaster = tf.TransformBroadcaster()
     self.parent_frame = "/camera_depth_optical_frame"
     self.child_frame = "/phasespace_frame"
     self.time_period = 0.001
    def run (self):
        """
        Publishes the transforms stored.
        """
        while True:
            if self.publish_pc and self.cameras is not None:
                data=self.cameras.get_RGBD()
                for i,rgbd in data.items():
                    if rgbd['depth'] is None or rgbd['rgb'] is None:
                        print 'wut'
                        continue
                    camera_frame = 'camera%d_depth_optical_frame'%(i+1)
                    xyz = clouds.depth_to_xyz(rgbd['depth'], asus_xtion_pro_f)
                    pc = ru.xyzrgb2pc(xyz, rgbd['rgb'], camera_frame)
                    ar_cam = gmt.get_ar_marker_poses(None, None, pc=pc)
                    
                    self.pc_pubs[i].publish(pc)
                    marker_frame = 'ar_marker%d_camera%d'
                    for marker in ar_cam:
                        trans, rot = conversions.hmat_to_trans_rot(ar_cam[marker])
                        self.tf_broadcaster.sendTransform(trans, rot,
                                                          rospy.Time.now(),
                                                          marker_frame%(marker,i+1), 
                                                          camera_frame)
                    

            if self.ready:
                for parent, child in self.transforms:
                    trans, rot = self.transforms[parent, child]
                    self.tf_broadcaster.sendTransform(trans, rot,
                                                      rospy.Time.now(),
                                                      child, parent)
            time.sleep(1/self.rate)
def publish_transform_markers(grabber, marker, T, from_frame, to_frame, rate=100):

    from visualization_msgs.msg import Marker
    from sensor_msgs.msg import PointCloud2
    
    print colorize("Transform : ", "yellow", True)
    print T
    
    marker_frame = "ar_frame"
    tf_pub = tf.TransformBroadcaster()
    pc_pub = rospy.Publisher("camera_points", PointCloud2)
    
    trans = T[0:3,3] 
    rot   = tf.transformations.quaternion_from_matrix(T)
    sleeper = rospy.Rate(10)

    while True:
        try:
            r, d = grabber.getRGBD()
            ar_tfm = get_ar_transform_id (d, r, marker)
            
            pc = ru.xyzrgb2pc(clouds.depth_to_xyz(d, asus_xtion_pro_f), r, from_frame)
            pc_pub.publish(pc)
            
            tf_pub.sendTransform(trans, rot, rospy.Time.now(), to_frame, from_frame)
            
            try:
                trans_m, rot_m = conversions.hmat_to_trans_rot(ar_tfm)
                tf_pub.sendTransform(trans_m, rot_m, rospy.Time.now(), marker_frame, from_frame)
            except:
                pass
            
            sleeper.sleep()
        except KeyboardInterrupt:
            break    
 def add_transforms(self, transforms):
     """
     Takes a list of dicts with relevant transform information.
     """
     if transforms is None:
         return
     for transform in transforms:
         trans, rot = conversions.hmat_to_trans_rot(transform['tfm'])
         self.transforms[transform['parent'],transform['child']] = (trans,rot)
     self.ready = True
def publish_transform(T, from_frame, to_frame, rate=100):
    
    print colorize("Transform : ", "yellow", True)
    print T
    
    tf_pub = tf.TransformBroadcaster()
    
    trans, rot = conversions.hmat_to_trans_rot(T)
    sleeper = rospy.Rate(10)

    while True:
        try:            
            tf_pub.sendTransform(trans, rot, rospy.Time.now(), to_frame, from_frame)
            sleeper.sleep()
        except KeyboardInterrupt:
            break    
def quick_calibrate(NUM_CAM, N_AVG):

    
    rospy.init_node('quick_calibrate')
    
    cameras = RosCameras(NUM_CAM)
    tfm_pub = tf.TransformBroadcaster()
    
    frames = {i:'/camera%i_link'%(i+1) for i in xrange(NUM_CAM)}
    
    calib_tfms = {(frames[0],frames[i]):None for i in xrange(1,NUM_CAM)}
    
    sleeper = rospy.Rate(30)
    
    try:
        while True:
            tfms_found = {i:{} for i in xrange(NUM_CAM)}
            for _ in xrange(N_AVG):
                for i in xrange(NUM_CAM):
                    mtfms = cameras.get_ar_markers(camera=i)
                    for m in mtfms:
                        if m not in tfms_found[i]:
                            tfms_found[i][m] = []
                        tfms_found[i][m].append(mtfms[m])
            
            for i in tfms_found:
                for m in tfms_found[i]:
                    tfms_found[i][m] = utils.avg_transform(tfms_found[i][m])        

            for i in xrange(1,NUM_CAM):
                rof_tfm = find_transform(tfms_found[0], tfms_found[i])
                if rof_tfm is not None:
                    calib_tfms[(frames[0], frames[i])] = tfm_link_rof.dot(rof_tfm).dot(nlg.inv(tfm_link_rof))

            for parent, child in calib_tfms:
                if calib_tfms[parent, child] is not None:
                    trans, rot = conversions.hmat_to_trans_rot(calib_tfms[parent, child])
                    tfm_pub.sendTransform(trans, rot,
                                          rospy.Time.now(),
                                          child, parent)

            sleeper.sleep()

    except KeyboardInterrupt:
        print "got ctrl-c"
    def publish_gripper_tfms(self):

#         self.langle_pub.publish(gmt.get_pot_angle('l'))
#         self.rangle_pub.publish(gmt.get_pot_angle('r'))

        transforms = []
        for gr in self.grippers.values():
            if self.gripper_lite:
                transforms += gr.get_all_transforms(diff_cam=True)
            else:
                parent_frame = self.cameras.parent_frame
                transforms += gr.get_all_transforms(parent_frame, diff_cam=True)

        for transform in transforms:
            (trans, rot) = conversions.hmat_to_trans_rot(transform['tfm'])
            self.tf_broadcaster.sendTransform(trans, rot,
                                              rospy.Time.now(), transform['child'],
                                              transform['parent'])
Пример #8
0
def visualize_ar ():
    """
    Visualize point clouds from openni grabber and AR marker transforms through ROS.
    """
    global getMarkers
    
    if rospy.get_name() == '/unnamed':
        rospy.init_node("visualize_ar")
    
    camera_frame = "camera_depth_optical_frame"
    
    tf_pub = tf.TransformBroadcaster()
    pc_pub = rospy.Publisher("camera_points", PointCloud2)
    sleeper = rospy.Rate(30)
    
    getMarkers = rospy.ServiceProxy("getMarkers", MarkerPositions)
    
    grabber= cpr.CloudGrabber()
    grabber.startRGBD()
    
    print "Streaming now."
    while True:
        try:
            r, d = grabber.getRGBD()
            ar_tfms = get_ar_transform_id (d, r)
            
            pc = ru.xyzrgb2pc(clouds.depth_to_xyz(d, asus_xtion_pro_f), r, camera_frame)
            pc_pub.publish(pc)
                        
            for i in ar_tfms:
                try:
                    trans, rot = conversions.hmat_to_trans_rot(ar_tfms[i])
                    tf_pub.sendTransform(trans, rot, rospy.Time.now(), "ar_marker_%d"%i, camera_frame)
                except:
                    print "Warning: Problem with AR transform."
                    pass
            
            sleeper.sleep()
        except KeyboardInterrupt:
            print "Keyboard interrupt. Exiting."
            break
def publish_phasespace_markers_ros (print_shit=False):

    import rospy
    import roslib; roslib.load_manifest("tf")
    import tf
    from visualization_msgs.msg import Marker, MarkerArray
    from geometry_msgs.msg import Point
    from std_msgs.msg import ColorRGBA

    ph.turn_phasespace_on()

    if rospy.get_name() == '/unnamed':
        rospy.init_node("phasespace")
    
#    ph.turn_phasespace_on()   
    marker_pub = rospy.Publisher('phasespace_markers', MarkerArray)
    tf_pub = tf.TransformBroadcaster()

    #prev_time = time.time() - 1/log_freq
    while True:
        try:
            marker_pos = ph.get_marker_positions() 
            #print marker_pos#.keys()
            time_stamp = rospy.Time.now()

            mk = MarkerArray()  
            for i in marker_pos:
                m = Marker()
                m.pose.position.x = marker_pos[i][0]
                m.pose.position.y = marker_pos[i][1]
                m.pose.position.z = marker_pos[i][2]
                m.pose.orientation.w = 1
                m.id = i
                m.header.stamp = time_stamp
                m.header.frame_id = ph.PHASESPACE_FRAME
                m.scale.x = m.scale.y = m.scale.z = 0.01
                m.type = Marker.CUBE
                m.color.r = 1
                m.color.a = 1
                mk.markers.append(m)
            
            trans, rot = None, None
            try:
                trans, rot = conv.hmat_to_trans_rot(ph.marker_transform(0,1,2, marker_pos))
            except:
                print "Could not find phasespace transform."
                pass
            
            #curr_time = time.time()
            #if curr_time - prev_time > 1/log_freq:
            if print_shit: print marker_pos
            marker_pub.publish(mk)
            if trans is not None:
                tf_pub.sendTransform(trans, rot, rospy.Time.now(), "ps_marker_transform", ph.PHASESPACE_FRAME)
            #    prev_time = curr_time
                
            
            #sleep for remainder of the time
            #time_passed = rospy.Time.now().to_sec() - time_stamp.to_sec()
            #time.sleep(0.2)
            #time.sleep(max(1/log_freq-time_passed,0))
        except KeyboardInterrupt:
            break

    ph.turn_phasespace_off()
        sleeper.sleep()


if __name__ == '__main__':
    rospy.init_node('calib_hydra_pr2')
    
    parser = argparse.ArgumentParser(description="Hydra Kinect Calibration")
    parser.add_argument('--arm', help="in {'right', 'left'} : the pr2 arm to track.", required=True)  
    parser.add_argument('--hydra', help="in {'right', 'left'} : the hydra handle to track.", required=True)
    parser.add_argument('--n_tfm', help="number of transforms to use for calibration.", type=int, default=5)
    parser.add_argument('--n_avg', help="number of estimates of  transform to use for averaging.", type=int, default=5)
    parser.add_argument('--publish_tf', help="whether to publish the transform between hydra_base and camera_link", default=True)
    vals = parser.parse_args()

    arm_tfms, hydra_tfms = get_transforms(vals.arm, vals.hydra, vals.n_tfm, vals.n_avg)
    
    if vals.publish_tf:
        T_ms = solve_sylvester4(arm_tfms, hydra_tfms)
        T_chs = [arm_tfms[i].dot(T_ms).dot(np.linalg.inv(hydra_tfms[i])) for i in xrange(len(arm_tfms))]
        trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in T_chs]
        
        trans = np.asarray([trans for (trans, rot) in trans_rots])
        avg_trans = np.sum(trans,axis=0)/trans.shape[0]
        rots = [rot for (trans, rot) in trans_rots]
        avg_rot = avg_quaternions(np.array(rots))
        
        T_ch = conversions.trans_rot_to_hmat(avg_trans, avg_rot)
        
        print T_ch
        publish_tf(T_ch, 'base_footprint', 'hydra_base')                                
    #subprocess.call("sudo killall XnSensorServer", shell=True)
    device.open()
    colorStream = device.createStream("color", width=640, height=480, fps=30)
    colorStream.start()
    depthStream = device.createStream("depth", width=640, height = 480, fps=30)
    depthStream.start()
    
    print "Streaming now."
    while True:
        try:
            r = colorStream.readFrame().data
            d = depthStream.readFrame().data
            ar_tfm = get_ar_transform_id (d, r, 2)
            print ar_tfm
            #pc = ru.xyzrgb2pc(clouds.depth_to_xyz(d, asus_xtion_pro_f), r, camera_frame)
            #pc_pub.publish(pc)
                        
            
            try:
                trans, rot = conversions.hmat_to_trans_rot(ar_tfm)
                tf_pub.sendTransform(trans, rot, rospy.Time.now(), "ar_marker_%d"%i, camera_frame)
            except:
                print "Warning: Problem with AR transform."               
                pass
            
            sleeper.sleep()
        except KeyboardInterrupt:
            print "Keyboard interrupt. Exiting."
            break
        
def get_transforms(arm, hydra, n_tfm , n_avg):
    """
    Returns a tuple of two list of N_TFM transforms, each
    the average of N_AVG transforms
    corresponding to the left/ right arm of the pr2 and 
    the hydra paddle of the HYDRA ('right' or 'left') side.
    
    Return gripper_tfms, hydra_tfms, kinect_tfms, head_tfms
    """

    pr2_frame = 'base_footprint'
    assert arm == 'right' or 'left'
    arm_frame = '%s_gripper_tool_frame' % {'right':'r', 'left':'l'}[arm]
    head_frame = 'head_plate_frame'
    hydra_frame = 'hydra_base'
    assert hydra =='right' or hydra=='left'
    paddle_frame = 'hydra_%s' % hydra

    tf_sub = tf.TransformListener()

    I_0 = np.eye(4)
    I_0[3, 3] = 0
    
    gripper_tfms = []
    hydra_tfms = []
    kinect_tfms = []
    head_tfms = []
    ar_markers = ARMarkersRos('/camera1_')
    time.sleep(3)
    
    
    marker_id = 11
    i = 0
    while i < n_tfm:
        raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms"%(i, n_tfm-1), "red", True))
       
        ## transforms which need to be averaged.
        gtfms = []
        htfms = []
        ktfms = []
        ptfms = []

        sleeper = rospy.Rate(30)  
        collect_enough_data = True      
        for j in xrange(n_avg):
            print colorize('\tGetting averaging transform : %d of %d ...'%(j, n_avg-1), "blue", True)
            
            kinect_tfm = ar_markers.get_marker_transforms(time_thresh=0.5)
            print kinect_tfm
            if kinect_tfm == {}:
                print "Lost sight of AR marker. Breaking..."
                collect_enough_data = False
                break
            
            ptrans, prot = tf_sub.lookupTransform(pr2_frame, head_frame, rospy.Time(0))
            ptfms.append(conversions.trans_rot_to_hmat(ptrans,prot))   
            gtrans, grot = tf_sub.lookupTransform(pr2_frame, arm_frame, rospy.Time(0))
            gtfms.append(conversions.trans_rot_to_hmat(gtrans, grot))
            htrans, hrot = tf_sub.lookupTransform(hydra_frame, paddle_frame, rospy.Time(0))
            htfms.append(conversions.trans_rot_to_hmat(htrans, hrot))
            ktrans, krot = conversions.hmat_to_trans_rot(kinect_tfm[marker_id])
            ktfms.append(conversions.trans_rot_to_hmat(ktrans, krot))
            sleeper.sleep()
            
        if collect_enough_data:
            gripper_tfms.append(avg_transform(gtfms))
            hydra_tfms.append(avg_transform(htfms))
            kinect_tfms.append(avg_transform(ktfms))
            head_tfms.append(avg_transform(ptfms))
            
            i += 1

    return (gripper_tfms, hydra_tfms, kinect_tfms, head_tfms)
    parser = argparse.ArgumentParser(description="Hydra Kinect Calibration")
    parser.add_argument('--arm', help="in {'right', 'left'} : the pr2 arm to track.", required=True)  
    parser.add_argument('--hydra', help="in {'right', 'left'} : the hydra handle to track.", required=True)
    parser.add_argument('--n_tfm', help="number of transforms to use for calibration.", type=int, default=5)
    parser.add_argument('--n_avg', help="number of estimates of  transform to use for averaging.", type=int, default=5)
    parser.add_argument('--publish_tf', help="whether to publish the transform between hydra_base and camera_link", default=True)
    vals = parser.parse_args()

    arm_tfms, hydra_tfms, kinect_tfms, head_tfms = get_transforms(vals.arm, vals.hydra, vals.n_tfm, vals.n_avg)
    
    if vals.publish_tf:
        # Solving for transform between base_footprint and hydra_base

        T_ms = ss.solve4(arm_tfms, hydra_tfms)
        T_chs = [arm_tfms[i].dot(T_ms).dot(np.linalg.inv(hydra_tfms[i])) for i in xrange(len(arm_tfms))]
        trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in T_chs]
        
        trans = np.asarray([trans for (trans, rot) in trans_rots])
        avg_trans = np.sum(trans, axis=0) / trans.shape[0]
        rots = [rot for (trans, rot) in trans_rots]
        avg_rot = avg_quaternions(np.array(rots))
        
        T_ch = conversions.trans_rot_to_hmat(avg_trans, avg_rot)


        # Average transform from gripper to hydra (T_gh)
        T_grip_hydra = [np.linalg.inv(arm_tfms[i]).dot(T_ch).dot(hydra_tfms[i]) for i in xrange(vals.n_tfm)]
        trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in T_grip_hydra]
        trans = np.asarray([trans for (trans, rot) in trans_rots])
        avg_trans = np.sum(trans, axis=0) / trans.shape[0]
        rots = [rot for (trans, rot) in trans_rots]
    parser.add_argument('--publish_tf', help="whether to publish the transform between hydra_base and camera_link", default=True)
    vals = parser.parse_args()


    #tool_tfms, model_tfms = get_tool_model(vals.n_tfm, vals.n_avg)
    tool_tfms = get_tool_model(vals.n_tfm, vals.n_avg)
    
    tool_to_model_tfms = []
    for i in xrange(vals.n_tfm):
        tool = tool_tfms[i]
        model = model_tfms[i]
        tool_inv = nlg.inv(tool)
        tool_to_model_tfm = tool_inv.dot(model)
        tool_to_model_tfms.append(tool_to_model_tfm)
    
    trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in tool_to_model_tfms]
    trans = np.asarray([trans for (trans, rot) in trans_rots])
    avg_trans = np.sum(trans,axis=0)/trans.shape[0]
    rots = [rot for (trans, rot) in trans_rots]
    avg_rot = avg_quaternions(np.array(rots))

    tool_to_model_tfm_avg = conversions.trans_rot_to_hmat(avg_trans, avg_rot)
    print tool_to_model_tfm_avg


    if vals.publish_tf:
       publish_tf(tool_to_model_tfm_avg, 'l_gripper_tool_frame', 'gripper_model_frame')



    rospy.init_node('calib_hydra_pr2')
    
    parser = argparse.ArgumentParser(description="Hydra Kinect Calibration")
    parser.add_argument('--arm', help="in {'right', 'left'} : the pr2 arm to track.", required=True)  
    parser.add_argument('--hydra', help="in {'right', 'left'} : the hydra handle to track.", required=True)
    parser.add_argument('--n_tfm', help="number of transforms to use for calibration.", type=int, default=5)
    parser.add_argument('--n_avg', help="number of estimates of  transform to use for averaging.", type=int, default=5)
    parser.add_argument('--publish_tf', help="whether to publish the transform between hydra_base and camera_link", default=True)
    vals = parser.parse_args()

    arm_tfms, hydra_tfms = get_transforms(vals.arm, vals.hydra, vals.n_tfm, vals.n_avg)
    
    if vals.publish_tf:
        T_ms = ss.solve4(arm_tfms, hydra_tfms)
        T_chs = [arm_tfms[i].dot(T_ms).dot(np.linalg.inv(hydra_tfms[i])) for i in xrange(len(arm_tfms))]
        trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in T_chs]
        
        trans = np.asarray([trans for (trans, rot) in trans_rots])
        avg_trans = np.sum(trans, axis=0) / trans.shape[0]
        rots = [rot for (trans, rot) in trans_rots]
        avg_rot = avg_quaternions(np.array(rots))
        
        T_ch = conversions.trans_rot_to_hmat(avg_trans, avg_rot)
        T_arm_sensor = [np.linalg.inv(arm_tfms[i]).dot(T_ch).dot(hydra_tfms[i]) for i in xrange(len(arm_tfms))]
        trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in T_arm_sensor]
        trans = np.asarray([trans for (trans, rot) in trans_rots])
        avg_trans = np.sum(trans, axis=0) / trans.shape[0]
        rots = [rot for (trans, rot) in trans_rots]
        avg_rot = avg_quaternions(np.array(rots))
        T_as = conversions.trans_rot_to_hmat(avg_trans, avg_rot)
while(i < args.n_tfm):
    tfm1 = None
    tfm2 = None 
    rgb1 = cs1.readFrame()
    #cv2.imshow("rgb", rgb.data)
    depth1 = ds1.readFrame()
    #cv2.imshow("depth", cmap[np.fmin((depth.data*.064).astype('int'), 255)])
    tfm1 = get_ar_transform_id(depth1.data, rgb1.data, 2)
    print tfm1
    if tfm1 == None:
        print "got none"
        continue
    #print tfm
    #tfms1.append(tfm1)

    k1_t, k1_q = conversions.hmat_to_trans_rot(tfm1)
    k1_ts = np.r_[k1_ts, np.array(k1_t, ndmin=2)]
    k1_qs = np.r_[k1_qs, np.array(k1_q, ndmin=2)]

    rgb2 = cs2.readFrame()
    depth2 = ds2.readFrame()
    tfm2 = get_ar_transform_id(depth2.data, rgb2.data, 2)
    #print tfm 
    #trans, rot = conversions.hmat_to_trans_rot(tfm)
    print tfm2
    if tfm2 == None:
        print "got none"
        continue
    k2_t, k2_q = conversions.hmat_to_trans_rot(tfm2)
    k2_ts = np.r_[k2_ts, np.array(k2_t, ndmin=2)]
    k2_qs = np.r_[k2_qs, np.array(k2_q, ndmin=2)]