def get_transforms(arm, hydra, n_tfm , n_avg):
    """
    Returns a tuple of two list of N_TFM transforms, each
    the average of N_AVG transforms
    corresponding to the left/ right arm of the pr2 and 
    the hydra paddle of the HYDRA ('right' or 'left') side.
    """

    pr2_frame = 'base_footprint'
    assert arm=='right' or 'left'
    arm_frame  = '%s_gripper_tool_frame' % {'right':'r', 'left':'l'}[arm]

    hydra_frame  = 'hydra_base'
    assert hydra=='right' or hydra=='left'
    paddle_frame = 'hydra_%s'%hydra

    tf_sub = tf.TransformListener()

    I_0 = np.eye(4)
    I_0[3,3] = 0
    
    arm_tfms   = []
    hydra_tfms = []

    for i in xrange(n_tfm+1):
        raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms"%(i, n_tfm), "red", True))
        
        ## transforms which need to be averaged.
        p_ts = np.empty((0, 3))
        p_qs = np.empty((0,4))
        h_ts = np.empty((0, 3))
        h_qs = np.empty((0,4))

        sleeper = rospy.Rate(30)        
        for j in xrange(n_avg):            
            print colorize('\tGetting averaging transform : %d of %d ...'%(j,n_avg-1), "blue", True)

            ptrans, prot = tf_sub.lookupTransform(pr2_frame, arm_frame, rospy.Time(0))
            htrans, hrot = tf_sub.lookupTransform(hydra_frame, paddle_frame, rospy.Time(0))

            p_ts = np.r_[p_ts, np.array(ptrans, ndmin=2)]
            h_ts = np.r_[h_ts, np.array(htrans, ndmin=2)]
            p_qs = np.r_[p_qs, np.array(prot, ndmin=2)]
            h_qs = np.r_[h_qs, np.array(hrot, ndmin=2)]
            sleeper.sleep()
          
        ptrans_avg = np.sum(p_ts, axis=0) / n_avg
        prot_avg   = avg_quaternions(p_qs)
        htrans_avg = np.sum(h_ts, axis=0) / n_avg
        hrot_avg   = avg_quaternions(h_qs)
          
        a_tfm = conversions.trans_rot_to_hmat(ptrans_avg,prot_avg)
        h_tfm = conversions.trans_rot_to_hmat(htrans_avg,hrot_avg)
               
        arm_tfms.append(a_tfm)
        hydra_tfms.append(h_tfm)

    return (arm_tfms, hydra_tfms)
def get_tool_model(n_tfm, n_avg):

    listener = tf.TransformListener()

    gripper_frame = 'l_gripper_tool_frame'
    base_frame = 'base_link'
    model_frame = ''
    depth_frame = 'camera_depth_optical_frame'
    head_frame = 'head_plate_frame'
    model_tfms = []
    tool_tfms = []
    

    for i in xrange(n_tfm):
        raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms"%(i + 1, n_tfm), "red", True))
        tool_trans_list = np.empty((0, 3))
        tool_quat_list = np.empty((0, 4))
        marker_trans_list = np.empty((0, 3))
        marker_quat_list = np.empty((0, 4))
        head_trans_list = np.empty((0, 3))
        head_quat_list = np.empty((0, 4))
	time.sleep(3)
	for j in xrange(n_avg):

            tool_trans, tool_quat, model_trans, model_quat, head_trans, head_quat = None, None, None, None, None, None

            while tool_trans == None:
                #model_trans, model_quat = listener.lookupTransform(base_frame, model_frame, rospy.Time(0))
                tool_trans, tool_quat = listener.lookupTransform(base_frame, gripper_frame, rospy.Time(0))
                head_trans, head_quat = listener.lookupTransform(base_frame, head_frame, rospy.Time(0))

            tool_trans_list = np.r_[tool_trans_list, np.array(tool_trans, ndmin=2)]
            tool_quat_list = np.r_[tool_quat_list, np.array(tool_quat, ndmin=2)]
            #model_trans_list = np.r_[model_trans_list, np.array(model_trans, ndmin=2)]
            #model_quat_list = np.r_[model_quat_list, np.array(model_quat, ndmin=2)]
            head_trans_list = np.r_[head_trans_list, np.array(head_trans, ndmin=2)]
            head_quat_list = np.r_[head_quat_list, np.array(head_quat, ndmin=2)]
        
        tool_trans_avg = np.sum(tool_trans_list, axis=0) / n_avg
        tool_quat_avg = avg_quaternions(tool_quat_list)
        #model_trans_avg = np.sum(model_trans_list, axis=0) / n_avg
        #model_quat_avg = avg_quaternions(model_quart_list)
        head_trans_avg = np.sum(head_trans_list, axis=0) / n_avg
        head_quat_avg = avg_quaternions(head_quat_list)
    
        tool_tfm = conversions.trans_rot_to_hmat(tool_trans_avg, tool_quat_avg)

        head_tfm = conversions.trans_rot_to_hmat(head_trans_avg, head_quat_avg)
        #depth_to_model_tfm = conversions.trans_rot_to_hmat(model_trans_avg, model_quat_avg)
        depth_tfm = head_tfm.dot(T_h_k)
        #model_tfm = depth_tfm.dot(depth_to_model_tfm)
        
        tool_tfms.append(tool_tfm)
        #model_tfms.append(model_tfm)
    #return tool_tfms, model_tfms
    return tool_tfms
def make_joint_traj(xyzs, quats, manip, ref_frame, targ_frame, filter_options = 0):
    "do ik and then fill in the points where ik failed"

    n = len(xyzs)
    assert len(quats) == n
    
    robot = manip.GetRobot()
    joint_inds = manip.GetArmIndices()
    robot.SetActiveDOFs(joint_inds)
    orig_joint = robot.GetActiveDOFValues()
    
    joints = []
    inds = []

    for i in xrange(0,n):
        mat4 = conv.trans_rot_to_hmat(xyzs[i], quats[i])
        joint = PR2.cart_to_joint(manip, mat4, ref_frame, targ_frame, filter_options)
        if joint is not None: 
            joints.append(joint)
            inds.append(i)
            robot.SetActiveDOFValues(joint)


    robot.SetActiveDOFValues(orig_joint)
    
    
    LOG.info("found ik soln for %i of %i points",len(inds), n)
    if len(inds) > 2:
        joints2 = mu.interp2d(np.arange(n), inds, joints)
        return joints2, inds
    else:
        return np.zeros((n, len(joints))), []
    def save_calibration(self, cfile):
        """
        Save the transforms and the gripper data from this current calibration.
        This assumes that the entire calibration data is stored in this class.
        """

        calib_data = {}

        gripper_data = {}
        if self.gripper_lite:
            for (lr, gr) in self.grippers.items():
                gripper_data[lr] = {'ar': gr.get_ar_marker(),
                                    'hydra': gr.get_hydra_marker(),
                                    'tfms': gr.get_saveable_transforms()}
        else:
            for (lr, gr) in self.grippers.items():
                gripper_data[lr] = gr.transform_graph

        calib_data['grippers'] = gripper_data

        calib_transforms = []
        for (parent, child) in self.transforms:
            tfm = {}
            tfm['parent'] = parent
            tfm['child'] = child
            (trans, rot) = self.transforms[parent, child]
            tfm['tfm'] = conversions.trans_rot_to_hmat(trans, rot)
            calib_transforms.append(tfm)
        calib_data['transforms'] = calib_transforms

        file_name = osp.join(calib_files_dir, cfile)
        with open(file_name, 'w') as fh:
            cPickle.dump(calib_data, fh)
def get_hydra_transforms(parent_frame, hydras = None):
    """
    Returns hydra transform in hydra base frame.
    hydras is a list which contains 'left', 'right' or both 
    """
    global tf_listener, hydra_initialized
    if not hydra_initialized:
        if tf_listener is None:
            tf_listener = tf.TransformListener()
        hydra_initialized = True
        print "HYDRA INITIALIZED"
        
    if hydras is None:
        hydras = ['left','right']
    
    hydra_transforms = {}
    for hydra in hydras:
        hydra_frame = 'hydra_%s'%hydra
        try:
            trans, rot = tf_listener.lookupTransform(parent_frame, hydra_frame, rospy.Time(0))
            hydra_transforms[hydra] = conversions.trans_rot_to_hmat(trans, rot)
        except (tf.LookupException, tf.ExtrapolationException, tf.ConnectivityException):
            continue
    
    return hydra_transforms
def get_transforms(arm, hydra, n_tfm , n_avg):
    """
    Returns a tuple of two list of N_TFM transforms, each
    the average of N_AVG transforms
    corresponding to the left/ right arm of the pr2 and 
    the hydra paddle of the HYDRA ('right' or 'left') side.
    
    return arm_tfms, hydra_tfms
    """

    pr2_frame = 'base_footprint'
    assert arm == 'right' or 'left'
    arm_frame = '%s_gripper_tool_frame' % {'right':'r', 'left':'l'}[arm]

    hydra_frame = 'hydra_base'
    assert hydra == 'right' or hydra == 'left'
    paddle_frame = 'hydra_%s' % hydra

    tf_sub = tf.TransformListener()

    I_0 = np.eye(4)
    I_0[3, 3] = 0
    
    gripper_tfms = []
    hydra_tfms = []

    for i in xrange(n_tfm):
        raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms" % (i, n_tfm - 1), "red", True))
        
        # # transforms which need to be averaged.
        gtfms = []
        htfms = []
        
        sleeper = rospy.Rate(30)        
        for j in xrange(n_avg):            
            print colorize('\tGetting averaging transform : %d of %d ...' % (j, n_avg - 1), "blue", True)

            gtrans, grot = tf_sub.lookupTransform(pr2_frame, arm_frame, rospy.Time(0))
            gtfms.append(conversions.trans_rot_to_hmat(gtrans, grot))
            htrans, hrot = tf_sub.lookupTransform(hydra_frame, paddle_frame, rospy.Time(0))
            htfms.append(conversions.trans_rot_to_hmat(htrans, hrot))
            sleeper.sleep()
            
        gripper_tfms.append(avg_transform(gtfms))
        hydra_tfms.append(avg_transform(htfms))

    return (gripper_tfms, hydra_tfms)
Ejemplo n.º 7
0
 def get_transform_frame(self, cam, frame):
     """
     Gets transform of cam in frame's frame.
     Stores transforms when found. Assumes offset does not change between frame and camera.
     """
     if frame not in self.stored_tfms:
         trans, rot = self.tf_listener.lookupTransform(frame, self.parent_frame, rospy.Time(0))
         self.stored_tfms[frame] = conversions.trans_rot_to_hmat(trans, rot)
     
     return self.stored_tfms[frame].dot(self.get_camera_transform(0, cam))
def get_transform_frames (parent_frame, child_frame):
    """
    Gets transform between frames.
    """
    if tf_initialized is False:
        if tf_listener is None:
            tf_listener = tf.TransformListener()
        tf_initialized = True
        print "TF INITIALIZED"
        
    trans, quat = tf_listener.lookupTransform(parent_frame, child_frame, rospy.Time(0))
    return conversions.trans_rot_to_hmat(trans, quat)
    def get_all_transforms(self):
        '''
        Return [{'parent': parent, 'child': child, 'tfm': hmat}]
        '''

        result_tfms = []
        for (parent, child) in self.transforms:
            tfm = {'parent': parent, 'child': child}
            (trans, rot) = self.transforms[parent, child]
            tfm['tfm'] = conversions.trans_rot_to_hmat(trans, rot)
            result_tfms.append(tfm)

        return result_tfms
def get_ar_transforms(markers, parent_frame):
    """
    Takes in a list of @markers (AR marker ids) and a @parent_frame.
    Returns a dict of transforms with keys as found marker ids and values as transforms.
    """
    if markers is None:
        return {}
    ar_tfms = {}
    for marker in markers:
        try:
            trans, rot = tf_listener.lookupTransform(parent_frame, "ar_marker_%d" % marker, rospy.Time(0))
            ar_tfms[marker] = conversions.trans_rot_to_hmat(trans, rot)
        except:
            pass
    return ar_tfms
    def get_camera_transforms(self):
        '''
        Return a dictionary {(c1, c2): tf1, (c1', c2'): tf2}
        tf1 is a dict with 'parent', 'child' and 4x4 'tfm'
        '''

        result_tfms = {}
        for (parent, child) in self.transforms:
            if 'camera' in parent and 'camera' in child:
                c1 = int(parent[6]) - 1  # assume parent is something like 'camera1'
                c2 = int(child[6]) - 1  # assume child is something like 'camera1'
                tfm = {'parent': parent, 'child': child}
                (trans, rot) = self.transforms[parent, child]
                tfm['tfm'] = conversions.trans_rot_to_hmat(trans, rot)
                result_tfms[c1, c2] = tfm
        return result_tfms
Ejemplo n.º 12
0
def get_robot_kinect_transform():
    global tf_listener

    tfm_base_kinect = None
    for i in xrange(5):
        try:
            now = rospy.Time.now()
            tf_listener.waitForTransform('base_footprint', 'camera_link', now, rospy.Duration(5.0))
            (trans, rot) = tf_listener.lookupTransform('base_footprint', 'camera_link', now)
            tfm_base_kinect = conversions.trans_rot_to_hmat(trans, rot)
            print tfm_base_kinect
            yellowprint("Got the transform for PR2")
            break
        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            yellowprint("Failed attempt.")

    return tfm_base_kinect
def get_hydra_transforms(parent_frame, hydras=None):
    """
    Returns hydra transform in hydra base frame.
    hydras is a list which contains 'left', 'right' or both 
    """
    global tf_l, hydra_initialized
    if not hydra_initialized:
        if rospy.get_name() == '/unnamed':
            rospy.init_node('hydra_tfm')
        tf_l = tf.TransformListener()
        hydra_initialized = False
        
    if hydras is None:
        hydras = ['left','right']
    
    hydra_transforms = {}
    for hydra in hydras:
        hydra_frame = 'hydra_%s_pivot'%hydra
        trans, rot = tf_l.lookupTransform(parent_frame, hydra_frame, rospy.Time(0))
        hydra_transforms[hydra] = conversions.trans_rot_to_hmat(trans, rot)
    
    return hydra_transforms
def get_transform (parent_frame, child_frame, n_attempts = None):
    """
    Wrapper for getting tf transform.
    """
    global tfl, tf_sleeper
    
    if tfl is None:
        tfl = tf.TransformListener()
        tf_sleeper = rospy.Rate(30)

    while n_attempts is None or n_attempts > 0:
        try:
            trans, rot = tfl.lookupTransform(parent_frame, child_frame, rospy.Time(0))
            tfm = conversions.trans_rot_to_hmat(trans, rot)
            return tfm
        except (tf.LookupException, tf.ExtrapolationException, tf.ConnectivityException):
            if n_attempts is not None:
                n_attempts -= 1
            tf_sleeper.sleep()

    redprint("Unable to find transform from %s to %s."%(parent_frame, child_frame))
    return None
Ejemplo n.º 15
0
def save_hydra_only (demo_type, demo_name, save_file=None):
    """
    Save hydra only data.
    """
    # Temp file to show that data is already being extracted
    demo_dir        = osp.join(demo_files_dir, demo_type, demo_name)
    with open(osp.join(demo_dir, demo_names.extract_hydra_data_temp),'w') as fh: fh.write('Extracting...')
    
    bag_file        = osp.join(demo_dir, demo_names.bag_name)
    calib_file_path = osp.join(demo_dir, demo_names.calib_name)
    
    data = {}

    c1_frame = 'camera1_link'
    hydra_frame = 'hydra_base'
    
    tfm_c1_h = None

    with open(calib_file_path,'r') as fh: calib_data = cPickle.load(fh)
    bag = rosbag.Bag(bag_file)
    
    for tfm in calib_data['transforms']:
        if tfm['parent'] == c1_frame or tfm['parent'] == '/' + c1_frame:
            if tfm['child'] == hydra_frame or tfm['child'] == '/' + hydra_frame:
                tfm_c1_h = nlg.inv(tfm_link_rof).dot(tfm['tfm'])

    if tfm_c1_h is None:
        redprint("Calibration does not have hydra transform.")
        return
    
    data['T_cam2hbase'] = tfm_c1_h

    # If demo.data file already exists, don't do extra work.
    found_hydra_data = False
    found_pot_data = False
    all_data_file        = osp.join(demo_dir, demo_names.data_name)
    if osp.isfile(all_data_file):
        yellowprint ("%s already exists for %s. Creating hydra file from this info."%(demo_names.data_name,demo_name))
        with open(all_data_file,'r') as fh: all_data = cPickle.load(fh)
        
        for lr in all_data:
            if lr in ['lr']:
                data[lr] = {}
                if all_data[lr].get('hydra'):
                    data[lr]['hydra'] = all_data[lr]['hydra']
                    found_hydra_data = True
                if all_data[lr].get('pot_angles'):
                    data[lr]['pot_angles'] = all_data[lr]['pot_angles']
                    found_pot_data = True

        
        if found_hydra_data and found_pot_data:
            if save_file is None:
                save_file = demo_names.hydra_data_name
            save_filename = osp.join(demo_dir, save_file)
            with open(save_filename, 'w') as sfh: cPickle.dump(data, sfh)
            return
    
    if not calib_data.get('grippers'):
        redprint("Gripper not found.")
        return    
    grippers = {}
    for lr,gdata in calib_data['grippers'].items():
        gr = gripper_lite.GripperLite(lr, gdata['ar'], trans_marker_tooltip=gripper_trans_marker_tooltip[lr])
        gr.reset_gripper(lr, gdata['tfms'], gdata['ar'], gdata['hydra'])
        grippers[lr] = gr
        data[lr] ={'hydra':[],
                   'pot_angles':[],
                   'T_tt2hy': gr.get_rel_transform('tool_tip', gr.hydra_marker)}

    if not found_hydra_data:
        if verbose:
            yellowprint('Hydra')
        lr_long = {'l':'left','r':'right'}
        for (_, msg, _) in bag.read_messages(topics=['/tf']):
            hyd_tfm = {}
            found = ''
            for tfm in msg.transforms:
                if found in ['lr','rl']:
                    break
                for lr in grippers:
                    if lr in found:
                        continue
                    elif tfm.header.frame_id == '/' + hydra_frame and tfm.child_frame_id == '/hydra_'+lr_long[lr]:
                        t,r = tfm.transform.translation, tfm.transform.rotation
                        trans = (t.x,t.y,t.z)
                        rot = (r.x, r.y, r.z, r.w)
                        hyd_tfm = tfm_c1_h.dot(conversions.trans_rot_to_hmat(trans, rot))
                        stamp = tfm.header.stamp.to_sec()
                        tt_tfm = grippers[lr].get_tooltip_transform(lr_long[lr], hyd_tfm)
                        data[lr]['hydra'].append((tt_tfm, stamp))
                        found += lr
            if found and verbose:
                blueprint("Got hydra readings %s at time %f"%(found,stamp))

    if not found_pot_data:
        if verbose:
            yellowprint('Potentiometer readings')
        for lr in grippers:
            for (_, msg, ts) in bag.read_messages(topics=['/%s_pot_angle'%lr]):
                angle = msg.data
                stamp = ts.to_sec()
                data[lr]['pot_angles'].append((angle, stamp))
                if verbose: 
                    blueprint("Got a %s potentiometer angle of %f at time %f"%(lr,angle, stamp))

    if verbose:
        for lr in 'lr':
            yellowprint("Gripper %s:"%lr)            
            yellowprint("Found %i transforms from hydra"%len(data[lr]['hydra']))
            yellowprint("Found %i potentiometer readings"%len(data[lr]['pot_angles']))

    if save_file is None:
        save_file = demo_names.hydra_data_name
    save_filename = osp.join(demo_dir, save_file)

    with open(save_filename, 'w') as sfh: cPickle.dump(data, sfh)
    yellowprint("Saved %s."%demo_names.hydra_data_name)
    
    os.remove(osp.join(demo_dir, demo_names.extract_hydra_data_temp))
def get_transform_ros(marker, n_tfm, n_avg, freq=None):
    """
    Returns a tuple of two list of N_TFM transforms, each
    the average of N_AVG transforms
    corresponding to the AR marker with ID = MARKER and 
    the phasespace markers.
    """

    camera_frame = 'camera_depth_optical_frame'
    marker_frame = 'ar_marker_%d'%marker
    ps_frame = 'ps_marker_transform'

    tf_sub = tf.TransformListener()

    I_0 = np.eye(4)
    I_0[3,3] = 0
    
    ar_tfms    = []
    ph_tfms = []

    wait_time = 5
    print "Waiting for %f seconds before collecting data."%wait_time
    time.sleep(wait_time)

    sleeper = rospy.Rate(30)
    for i in xrange(n_tfm+1):
        if freq is None:
            raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms"%(i, n_tfm), "red", True))
        else: 
            print colorize("Transform %d of %d."%(i, n_tfm), "red", True)
        ## transforms which need to be averaged.
        ar_tfm_avgs = []
        ph_tfm_avgs = []
        
        for j in xrange(n_avg):            
            print colorize('\tGetting averaging transform : %d of %d ...'%(j,n_avg-1), "blue", True)
            
            mtrans, mrot, ptrans, prot = None, None, None, None
            while ptrans == None or mtrans == None:
                mtrans, mrot = tf_sub.lookupTransform(camera_frame, marker_frame, rospy.Time(0))
                ptrans, prot = tf_sub.lookupTransform(ph.PHASESPACE_FRAME, ps_frame, rospy.Time(0))
                sleeper.sleep()

            ar_tfm_avgs.append(conversions.trans_rot_to_hmat(mtrans,mrot))
            ph_tfm_avgs.append(conversions.trans_rot_to_hmat(ptrans,prot))
            
        ar_tfm = utils.avg_transform(ar_tfm_avgs)
        ph_tfm = utils.avg_transform(ph_tfm_avgs)

#         print "\nar:"
#         print ar_tfm
#         print ar_tfm.dot(I_0).dot(ar_tfm.T)
#         print "h:"
#         print ph_tfm
#         print ph_tfm.dot(I_0).dot(ph_tfm.T), "\n"
                
        ar_tfms.append(ar_tfm)
        ph_tfms.append(ph_tfm)
        if freq is not None:
            time.sleep(1/freq)

        
    print "Found %i transforms. Calibrating..."%n_tfm
    Tas = ss.solve4(ar_tfms, ph_tfms)
    print "Done."
    
    T_cps = [ar_tfms[i].dot(Tas).dot(np.linalg.inv(ph_tfms[i])) for i in xrange(len(ar_tfms))]
    return utils.avg_transform(T_cps)
Ejemplo n.º 17
0

    #tool_tfms, model_tfms = get_tool_model(vals.n_tfm, vals.n_avg)
    tool_tfms = get_tool_model(vals.n_tfm, vals.n_avg)
    
    tool_to_model_tfms = []
    for i in xrange(vals.n_tfm):
        tool = tool_tfms[i]
        model = model_tfms[i]
        tool_inv = nlg.inv(tool)
        tool_to_model_tfm = tool_inv.dot(model)
        tool_to_model_tfms.append(tool_to_model_tfm)
    
    trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in tool_to_model_tfms]
    trans = np.asarray([trans for (trans, rot) in trans_rots])
    avg_trans = np.sum(trans,axis=0)/trans.shape[0]
    rots = [rot for (trans, rot) in trans_rots]
    avg_rot = avg_quaternions(np.array(rots))

    tool_to_model_tfm_avg = conversions.trans_rot_to_hmat(avg_trans, avg_rot)
    print tool_to_model_tfm_avg


    if vals.publish_tf:
       publish_tf(tool_to_model_tfm_avg, 'l_gripper_tool_frame', 'gripper_model_frame')





listener = tf.TransformListener()
time.sleep(3)
while(i < args.n_tfm):
        
    rgb = colorStream.readFrame()
    #cv2.imshow("rgb", rgb.data)
    depth = depthStream.readFrame()
    #cv2.imshow("depth", cmap[np.fmin((depth.data*.064).astype('int'), 255)])
    tool_trans, tool_quat = listener.lookupTransform(base_frame, gripper_frame, rospy.Time(0))
    head_trans, head_quat = listener.lookupTransform(base_frame, head_frame, rospy.Time(0))
    ar_tfm = get_ar_transform_id(depth.data, rgb.data, 2)
    if ar_tfm == None:
        print "got none"
        continue
    tool_tfm = conversions.trans_rot_to_hmat(tool_trans, tool_quat)
    head_tfm = conversions.trans_rot_to_hmat(head_trans, head_quat)
    #print head_tfm
    #print tool_tfm
    tool_tfms.append(tool_tfm)
    head_tfms.append(head_tfm)
    ar_tfms.append(ar_tfm)
    #trans, rot = conversions.hmat_to_trans_rot(tfm)
    print i
    i = i+1


ar_in_base_tfms = []
for i in xrange(len(ar_tfms)):
    head_frame_ar = tfm_head_kinect.dot(ar_tfms[i])
    base_frame_ar = head_tfms[i].dot(head_frame_ar)
    k1_t, k1_q = conversions.hmat_to_trans_rot(tfm1)
    k1_ts = np.r_[k1_ts, np.array(k1_t, ndmin=2)]
    k1_qs = np.r_[k1_qs, np.array(k1_q, ndmin=2)]

    rgb2 = cs2.readFrame()
    depth2 = ds2.readFrame()
    tfm2 = get_ar_transform_id(depth2.data, rgb2.data, 2)
    #print tfm 
    #trans, rot = conversions.hmat_to_trans_rot(tfm)
    print tfm2
    if tfm2 == None:
        print "got none"
        continue
    k2_t, k2_q = conversions.hmat_to_trans_rot(tfm2)
    k2_ts = np.r_[k2_ts, np.array(k2_t, ndmin=2)]
    k2_qs = np.r_[k2_qs, np.array(k2_q, ndmin=2)]
    print i
    i = i+1
    #except:
    #    print "Warning: Problem with ar"
    #    pass
    #cv2.waitKey(30)

k1_trans_avg = np.sum(k1_ts, axis=0) / args.n_tfm
k1_rot_avg = avg_quaternions(k1_qs)
k1_tfm = conversions.trans_rot_to_hmat(k1_trans_avg, k1_rot_avg)
print k1_tfm

#except KeyboardInterrupt:
#    print "got Control-C"
        sleeper.sleep()


if __name__ == '__main__':
    rospy.init_node('calib_hydra_pr2')
    
    parser = argparse.ArgumentParser(description="Hydra Kinect Calibration")
    parser.add_argument('--arm', help="in {'right', 'left'} : the pr2 arm to track.", required=True)  
    parser.add_argument('--hydra', help="in {'right', 'left'} : the hydra handle to track.", required=True)
    parser.add_argument('--n_tfm', help="number of transforms to use for calibration.", type=int, default=5)
    parser.add_argument('--n_avg', help="number of estimates of  transform to use for averaging.", type=int, default=5)
    parser.add_argument('--publish_tf', help="whether to publish the transform between hydra_base and camera_link", default=True)
    vals = parser.parse_args()

    arm_tfms, hydra_tfms = get_transforms(vals.arm, vals.hydra, vals.n_tfm, vals.n_avg)
    
    if vals.publish_tf:
        T_ms = solve_sylvester4(arm_tfms, hydra_tfms)
        T_chs = [arm_tfms[i].dot(T_ms).dot(np.linalg.inv(hydra_tfms[i])) for i in xrange(len(arm_tfms))]
        trans_rots = [conversions.hmat_to_trans_rot(tfm) for tfm in T_chs]
        
        trans = np.asarray([trans for (trans, rot) in trans_rots])
        avg_trans = np.sum(trans,axis=0)/trans.shape[0]
        rots = [rot for (trans, rot) in trans_rots]
        avg_rot = avg_quaternions(np.array(rots))
        
        T_ch = conversions.trans_rot_to_hmat(avg_trans, avg_rot)
        
        print T_ch
        publish_tf(T_ch, 'base_footprint', 'hydra_base')                                
def get_transforms(arm, hydra, n_tfm , n_avg):
    """
    Returns a tuple of two list of N_TFM transforms, each
    the average of N_AVG transforms
    corresponding to the left/ right arm of the pr2 and 
    the hydra paddle of the HYDRA ('right' or 'left') side.
    
    Return gripper_tfms, hydra_tfms, kinect_tfms, head_tfms
    """

    pr2_frame = 'base_footprint'
    assert arm == 'right' or 'left'
    arm_frame = '%s_gripper_tool_frame' % {'right':'r', 'left':'l'}[arm]
    head_frame = 'head_plate_frame'
    hydra_frame = 'hydra_base'
    assert hydra =='right' or hydra=='left'
    paddle_frame = 'hydra_%s' % hydra

    tf_sub = tf.TransformListener()

    I_0 = np.eye(4)
    I_0[3, 3] = 0
    
    gripper_tfms = []
    hydra_tfms = []
    kinect_tfms = []
    head_tfms = []
    ar_markers = ARMarkersRos('/camera1_')
    time.sleep(3)
    
    
    marker_id = 11
    i = 0
    while i < n_tfm:
        raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms"%(i, n_tfm-1), "red", True))
       
        ## transforms which need to be averaged.
        gtfms = []
        htfms = []
        ktfms = []
        ptfms = []

        sleeper = rospy.Rate(30)  
        collect_enough_data = True      
        for j in xrange(n_avg):
            print colorize('\tGetting averaging transform : %d of %d ...'%(j, n_avg-1), "blue", True)
            
            kinect_tfm = ar_markers.get_marker_transforms(time_thresh=0.5)
            print kinect_tfm
            if kinect_tfm == {}:
                print "Lost sight of AR marker. Breaking..."
                collect_enough_data = False
                break
            
            ptrans, prot = tf_sub.lookupTransform(pr2_frame, head_frame, rospy.Time(0))
            ptfms.append(conversions.trans_rot_to_hmat(ptrans,prot))   
            gtrans, grot = tf_sub.lookupTransform(pr2_frame, arm_frame, rospy.Time(0))
            gtfms.append(conversions.trans_rot_to_hmat(gtrans, grot))
            htrans, hrot = tf_sub.lookupTransform(hydra_frame, paddle_frame, rospy.Time(0))
            htfms.append(conversions.trans_rot_to_hmat(htrans, hrot))
            ktrans, krot = conversions.hmat_to_trans_rot(kinect_tfm[marker_id])
            ktfms.append(conversions.trans_rot_to_hmat(ktrans, krot))
            sleeper.sleep()
            
        if collect_enough_data:
            gripper_tfms.append(avg_transform(gtfms))
            hydra_tfms.append(avg_transform(htfms))
            kinect_tfms.append(avg_transform(ktfms))
            head_tfms.append(avg_transform(ptfms))
            
            i += 1

    return (gripper_tfms, hydra_tfms, kinect_tfms, head_tfms)
Ejemplo n.º 22
0
def calibrate_cameras ():
    global tfm_pub, cameras, tf_listener
    
    tfm_base_kinect = get_robot_kinect_transform()
    if yes_or_no('Calibrate again?'):
        greenprint("Step 1. Calibrating multiple cameras.")
        cam_calib = CameraCalibrator(cameras)
        done = False
        while not done:
            cam_calib.calibrate(CAM_N_OBS, CAM_N_AVG)
            if cameras.num_cameras == 1:
                break
            if not cam_calib.calibrated:
                redprint("Camera calibration failed.")
                cam_calib.reset_calibration()
            else:
                tfm_reference_camera = cam_calib.get_transforms()[0]
                tfm_reference_camera['child'] = 'camera_link'
    
                tfm_base_reference = {}
                tfm_base_reference['child'] = transform['parent']
                tfm_base_reference['parent'] = 'base_footprint'
                tfm_base_reference['tfm'] = nlg.inv(tfm_reference_camera['tfm'].dot(nlg.inv(tfm_base_kinect)))
                tfm_pub.add_transforms([tfm_base_reference])
                if yes_or_no("Are you happy with the calibration? Check RVIZ."):
                    done = True
                else:
                    yellowprint("Calibrating cameras again.")
                    cam_calib.reset_calibration()
    
        greenprint("Cameras calibrated.")
    else:
        tfm_pub.load_calibration('cam_pr2')

    
    if yes_or_no('Get PR2 Gripper?'):
        # Finding transform between PR2's gripper and the tool_tip
        marker_id = 1
        camera_id = 1
        n_avg = 100
        tfms_camera_marker = []
        for i in xrange(n_avg):
            tfms_camera_marker.append(cameras.get_ar_markers([marker_id], camera=camera_id)[marker_id])
            time.sleep(0.03)
        tfm_camera_marker = utils.avg_transform(tfms_camera_marker)
    
        calib_file = 'calib_cam_hydra_gripper1'
        file_name = osp.join(calib_files_dir, calib_file)
        with open(file_name, 'r') as fh: calib_data = cPickle.load(fh)
    
        lr, graph = calib_data['grippers'].items()[0]
        gr = gripper.Gripper(lr, graph)
        assert 'tool_tip' in gr.mmarkers
        gr.tt_calculated = True
        
        tfm_marker_tooltip = gr.get_rel_transform(marker_id, 'tool_tip', 0)
    
        i = 10
        tfm_base_gripper = None
        while i > 0:
            try:
                now = rospy.Time.now()
                tf_listener.waitForTransform('base_footprint', 'l_gripper_tool_frame', now, rospy.Duration(5.0))
                (trans, rot) = tf_listener.lookupTransform('base_footprint', 'l_gripper_tool_frame', rospy.Time(0))
                tfm_base_gripper = conversions.trans_rot_to_hmat(trans, rot)
                break
            except (tf.Exception, tf.LookupException, tf.ConnectivityException):
                yellowprint("Failed attempt.")            
                i -= 1
                pass
        if tfm_base_gripper is not None:
            tfm_gripper_tooltip = {'parent':'l_gripper_tool_frame',
                                   'child':'pr2_lgripper_tooltip',
                                   'tfm':nlg.inv(tfm_base_gripper).dot(tfm_base_kinect).dot(tfm_link_rof).dot(tfm_camera_marker).dot(tfm_marker_tooltip)
                                   }
            tfm_pub.add_transforms([tfm_gripper_tooltip])
            greenprint("Gripper to marker found.")
        else:
            redprint("Gripper to marker not found.")
Ejemplo n.º 23
0
def get_transforms(marker, hydra, n_tfm , n_avg):
    """
    Returns a tuple of two list of N_TFM transforms, each
    the average of N_AVG transforms
    corresponding to the AR marker with ID = MARKER and 
    the hydra paddle of the HYDRA ('right' or 'left') side.
    """

    camera_frame = 'camera1_link'
    hydra_frame = 'hydra_base'
    marker_frame = 'ar_marker%d_camera1' % marker    
    assert hydra == 'right' or hydra == 'left'
    paddle_frame = 'hydra_%s' % hydra

    tf_sub = tf.TransformListener()

    I_0 = np.eye(4)
    I_0[3, 3] = 0
    
    ar_tfms = []
    hydra_tfms = []
    
    
    for i in xrange(n_tfm + 1):
        raw_input(colorize("Transform %d of %d : Press return when ready to capture transforms" % (i, n_tfm), "red", True))
        
        # # transforms which need to be averaged.
        m_ts = np.empty((0, 3))
        m_qs = np.empty((0, 4))
        h_ts = np.empty((0, 3))
        h_qs = np.empty((0, 4))
        
        for j in xrange(n_avg):            
            print colorize('\tGetting averaging transform : %d of %d ...' % (j, n_avg - 1), "blue", True)
            
            mtrans, mrot, htrans, hrot = None, None, None, None

            sleeper = rospy.Rate(30)
            while htrans == None:
                # now = rospy.Time.now()
                # tf_sub.waitForTransform(camera_frame, marker_frame, now)
#                 mtrans, mrot = tf_sub.lookupTransform(marker_frame, camera_frame, rospy.Time(0))
#                 (htrans,hrot) = tf_sub.lookupTransform(paddle_frame, hydra_frame, rospy.Time(0))
                mtrans, mrot = tf_sub.lookupTransform(camera_frame, marker_frame, rospy.Time(0))
                htrans, hrot = tf_sub.lookupTransform(hydra_frame, paddle_frame, rospy.Time(0))
                sleeper.sleep()
                # try:
                #    tf_sub.waitForTransform(hydra_frame, paddle_frame, now, rospy.Duration(0.05))
                # except (tf.LookupException):
                #    continue

            m_ts = np.r_[m_ts, np.array(mtrans, ndmin=2)]
            h_ts = np.r_[h_ts, np.array(htrans, ndmin=2)]
            m_qs = np.r_[m_qs, np.array(mrot, ndmin=2)]
            h_qs = np.r_[h_qs, np.array(hrot, ndmin=2)]
          
        mtrans_avg = np.sum(m_ts, axis=0) / n_avg
        mrot_avg = avg_quaternions(m_qs)
        htrans_avg = np.sum(h_ts, axis=0) / n_avg
        hrot_avg = avg_quaternions(h_qs)
          
#         ar_tfm = tf_sub.fromTranslationRotation(mtrans_avg, mrot_avg)
#         h_tfm = tf_sub.fromTranslationRotation(htrans_avg, hrot_avg)
        ar_tfm = conversions.trans_rot_to_hmat(mtrans_avg, mrot_avg)
        h_tfm = conversions.trans_rot_to_hmat(htrans_avg, hrot_avg)
         
        print "\nar:"
        print ar_tfm
        print ar_tfm.dot(I_0).dot(ar_tfm.T)
        print "h:"
        print h_tfm
        print h_tfm.dot(I_0).dot(h_tfm.T), "\n"
         
#         ar_tfm = conversions.trans_rot_to_hmat(mtrans,mrot)
#         h_tfm = conversions.trans_rot_to_hmat(htrans,hrot)
#         print "\nar:"
#         print ar_tfm
#         print ar_tfm.dot(I_0).dot(ar_tfm.T)
#         print "h:"
#         print h_tfm
#         print h_tfm.dot(I_0).dot(h_tfm.T), "\n"
        
        ar_tfms.append(ar_tfm)
        hydra_tfms.append(h_tfm)
        
    return (ar_tfms, hydra_tfms)
Ejemplo n.º 24
0
def save_observations_rgbd(demo_type, demo_name, save_file=None):
    """
    Extract data from all sensors and store into demo.data file.
    """
    global setCalib
    # Temp file to show that data is already being extracted
    demo_dir        = osp.join(demo_files_dir, demo_type, demo_name)
    with open(osp.join(demo_dir, demo_names.extract_data_temp),'w') as fh: fh.write('Extracting...')
    
    calib_file_path = osp.join(demo_dir, demo_names.calib_name)
    bag_file        = osp.join(demo_dir, demo_names.bag_name)
    
    with open(osp.join(demo_dir, demo_names.camera_types_name)) as fh:
        camera_types = yaml.load(fh)
    with open(osp.join(demo_dir, demo_names.camera_models_name)) as fh:
        camera_models = yaml.load(fh)
    
    num_cameras = len(camera_types)

    video_dirs = {}
    for i in range(1, num_cameras + 1):
        video_dirs[i] = osp.join(demo_dir, demo_names.video_dir%i)

    c_frames = {}
    for i in range(1, num_cameras + 1):
        c_frames[i]= 'camera%i_link'%(i)
    hydra_frame = 'hydra_base'
    
    tfm_c1 = {i:None for i in range (1,num_cameras+1)}
    tfm_c1[1] = np.eye(4)
    tfm_c1_h = None

    with open(calib_file_path,'r') as fh: calib_data = cPickle.load(fh)
    bag = rosbag.Bag(bag_file)
    
    for tfm in calib_data['transforms']:
        if tfm['parent'] == c_frames[1] or tfm['parent'] == '/' + c_frames[1]:
            if tfm['child'] == hydra_frame or tfm['child'] == '/' + hydra_frame:
                tfm_c1_h = nlg.inv(tfm_link_rof).dot(tfm['tfm'])
            else:
                for i in range(2, num_cameras+1):
                    if tfm['child'] == c_frames[i] or tfm['child'] == '/' + c_frames[i]:
                        tfm_c1[i] = nlg.inv(tfm_link_rof).dot(tfm['tfm']).dot(tfm_link_rof)

    if tfm_c1_h is None or not all([tfm_c1[s] != None for s in tfm_c1]):
        redprint("Calibration does not have required transforms")
        return

    if not calib_data.get('grippers'):
        redprint("Gripper not found.")
        return

    grippers = {}
    data = {}
    data['T_cam2hbase'] = tfm_c1_h
    for lr,gdata in calib_data['grippers'].items():
        gr = gripper_lite.GripperLite(lr, gdata['ar'], trans_marker_tooltip=gripper_trans_marker_tooltip[lr])
        gr.reset_gripper(lr, gdata['tfms'], gdata['ar'], gdata['hydra'])
        grippers[lr] = gr
        data[lr] ={'hydra':[],
                   'pot_angles':[],
                   'T_tt2hy': gr.get_rel_transform('tool_tip', gr.hydra_marker)}
        ## place-holder for AR marker transforms:
        for i in xrange(num_cameras):
            data[lr]['camera%d'%(i+1)] = []


    winname = 'cam_image'    
    cam_counts = []
    for i in range(1,num_cameras+1):
        if verbose:
            yellowprint('Camera%i'%i)
        if camera_types[i] == "rgbd":
            rgb_fnames, depth_fnames, stamps = eu.get_rgbd_names_times(video_dirs[i])
        else:
            rgb_fnames, stamps = eu.get_rgbd_names_times(video_dirs[i], depth = False)
        cam_count = len(stamps)
        cam_counts.append(cam_count)

        if camera_types[i] == "rgbd":
            for ind in rgb_fnames:
                rgb = cv2.imread(rgb_fnames[ind])
                
                if displayImages:
                    cv2.imshow(winname, rgb)
                    cv2.waitKey(1)

                assert rgb is not None
                depth = cv2.imread(depth_fnames[ind],2)
                assert depth is not None
                xyz = clouds.depth_to_xyz(depth, asus_xtion_pro_f)
                pc = ru.xyzrgb2pc(xyz, rgb, frame_id='', use_time_now=False)
                ar_tfms = get_ar_marker_poses(pc,track=True)
                if ar_tfms:
                    if verbose:
                        blueprint("Got markers " + str(ar_tfms.keys()) + " at time %f"%stamps[ind])

                    for lr,gr in grippers.items():
                        ar = gr.get_ar_marker() 
                        if ar in ar_tfms:
                            tt_tfm = gr.get_tooltip_transform(ar, np.asarray(ar_tfms[ar]))
                            data[lr]['camera%i'%i].append((tfm_c1[i].dot(tt_tfm),stamps[ind]))
        else:
#             if setCalib is None: 
#                 setCalib = rospy.ServiceProxy("setCalibInfo", SetCalibInfo)
#             reqCalib.camera_model = camera_models[i]
#             setCalib(reqCalib)
#             
#             if verbose:
#                 yellowprint("Changed camera calibration parameters to model %s"%camera_models[i])

            for ind in rgb_fnames:                
                rgb = cv.LoadImage(rgb_fnames[ind])
                
                if displayImages:
                    cv.ShowImage(winname, rgb)
                    cv.WaitKey(1)

                assert rgb is not None
                ar_tfms = get_ar_marker_poses(rgb,use_pc_service=False, model = camera_models[i], track=True)

                if ar_tfms:
                    if verbose:
                        blueprint("Got markers " + str(ar_tfms.keys()) + " at time %f"%stamps[ind])
    
                    for lr,gr in grippers.items():
                        ar = gr.get_ar_marker() 
                        if ar in ar_tfms:
                            tt_tfm = gr.get_tooltip_transform(ar, np.asarray(ar_tfms[ar]))
                            data[lr]['camera%i'%i].append((tfm_c1[i].dot(tt_tfm),stamps[ind]))


    found_hydra_data = False
    found_pot_data = False
    # If hydra_only.data file already exists, don't do extra work.
    hydra_data_file        = osp.join(demo_dir, demo_names.hydra_data_name)
    if osp.isfile(hydra_data_file):
        yellowprint ("%s already exists for %s. Getting hydra data from this info."%(demo_names.hydra_data_name,demo_name))
        with open(hydra_data_file,'r') as fh: hdata = cPickle.load(fh)
         
        for lr in hdata:
            if lr in 'lr':
                if hdata[lr].get('hydra'):
                    data[lr]['hydra'] = hdata[lr]['hydra']
                    found_hydra_data = True
                if hdata[lr].get('pot_angles'):
                    data[lr]['pot_angles'] = hdata[lr]['pot_angles']
                    found_pot_data = True


    if not found_hydra_data:
        if verbose:
            yellowprint('Hydra')
        lr_long = {'l':'left','r':'right'}
        for (_, msg, _) in bag.read_messages(topics=['/tf']):
            hyd_tfm = {}
            found = ''
            for tfm in msg.transforms:
                if found in ['lr','rl']:
                    break
                for lr in grippers:
                    if lr in found:
                        continue
                    elif tfm.header.frame_id == '/' + hydra_frame and tfm.child_frame_id == '/hydra_'+lr_long[lr]:
                        t,r = tfm.transform.translation, tfm.transform.rotation
                        trans = (t.x,t.y,t.z)
                        rot = (r.x, r.y, r.z, r.w)
                        hyd_tfm = tfm_c1_h.dot(conversions.trans_rot_to_hmat(trans, rot))
                        stamp = tfm.header.stamp.to_sec()
                        tt_tfm = grippers[lr].get_tooltip_transform(lr_long[lr], hyd_tfm)
                        data[lr]['hydra'].append((tt_tfm, stamp))
                        found += lr
            if found and verbose:
                blueprint("Got hydra readings %s at time %f"%(found,stamp))

    if not found_pot_data:
        if verbose:
            yellowprint('Potentiometer readings')
        for lr in grippers:
            for (_, msg, ts) in bag.read_messages(topics=['/%s_pot_angle'%lr]):
                angle = msg.data
                stamp = ts.to_sec()
                data[lr]['pot_angles'].append((angle, stamp))
                if verbose: 
                    blueprint("Got a %s potentiometer angle of %f at time %f"%(lr,angle, stamp))

    if verbose:
        for lr in 'lr':
            yellowprint("Gripper %s:"%lr)
            for i in range(num_cameras):
                yellowprint("Found %i transforms out of %i rgb/rgbd images from camera%i"%(len(data[lr]['camera%i'%(i+1)]), cam_counts[i], i+1))
            
            yellowprint("Found %i transforms from hydra"%len(data[lr]['hydra']))
            yellowprint("Found %i potentiometer readings"%len(data[lr]['pot_angles']))

    if save_file is None:
        save_file = demo_names.data_name
    save_filename = osp.join(demo_dir, save_file)

    with open(save_filename, 'w') as sfh: cPickle.dump(data, sfh)
    yellowprint("Saved %s."%demo_names.data_name)
    
    os.remove(osp.join(demo_dir, demo_names.extract_data_temp))