Beispiel #1
0
def camTlaser(res=np.zeros(7)):
    # @ Duke, res = np.array([0.8, 0.9, -1.7, 3.1, 0.061, 0.032, -0.035 ])
    rot = tr.Ry(math.radians(0.0 + res[0])) * tr.Rz(
        math.radians(0.0 + res[1])) * tr.Rx(
            math.radians(-90.0 + res[2])) * tr.Rz(math.radians(-90.0 + res[3]))
    disp = np.matrix([res[4], res[5], res[6]]).T + np.matrix([0.0, 0.0, 0.0]).T
    return tr.composeHomogeneousTransform(rot, disp)
def createMecanumTransform():
    ''' mecanum frame -> global frame (ignores the zenither)
    '''
    disp = np.matrix([-0.25,0.,0.0]).T
    rot = np.matrix(np.eye(3))
    t = tr.composeHomogeneousTransform(rot,disp)
    _globalT['mecanum'] = t
def createTorsoTransform():
    ''' torso frame -> global frame
    '''
    disp = np.matrix([0.,0.,0.]).T
    rot = np.matrix(np.eye(3))
    t = tr.composeHomogeneousTransform(rot,disp)
    _globalT['torso'] = t
Beispiel #4
0
def process_read( pf, p_start, m ):
    # Note: p_start is numpy array (Nx3) and it's weights ([:,2]) will be modified in-place!
    # m is form: RFIDread, PoseStamped (ant in map), PoseStamped (robot base in map)
    #   We assume it is a postive reading for the desired tagID!

    xy_map = np.copy( p_start[:,0:2].T )  # 2xN
    
    # Antenna is at position given by m.ps_ant_map.
    #   IMPORTANT NOTE: I'm assuming that the antenna XY plane is parallel to the map XY plane!
    o = m.ps_ant_map.pose.orientation
    d = m.ps_ant_map.pose.position
    
    roll, pitch, yaw = tft.euler_from_quaternion([ o.x, o.y, o.z, o.w ])
    dx, dy = d.x, d.y

    # Transform xy_map points into antenna frame
    t_new = tr.composeHomogeneousTransform( tr.rotZ( yaw ),
                                            np.matrix([dx,dy,0.0]).T )
    trans = tr.invertHomogeneousTransform( t_new )

    xy_new = (trans * tr.xyToHomogenous( xy_map ))[0:2]  # This is a matrix!  (2xN)
    xy_new = np.array( xy_new ) # Convert it back to array!

    # Build new particles using old weights (Nx3)
    p_new = np.column_stack([ xy_new.T, p_start[:,2] ])  # take weights from last iteration

    rv = pf.step( 0.0, m.read.rssi, p_new, should_resample_func = pfilter.retFalse )

    # Put back the new weights:
    p_start[:,2] = rv[:,2]
    return
def createThok0Transform():
    ''' thok0 frame -> global frame
    '''
    disp = np.matrix([0.,0.,0.09]).T
    rot = np.matrix(np.eye(3))
    t = tr.composeHomogeneousTransform(rot,disp)
    _globalT['thok0'] = t
def createUtm0Transform():
    ''' utm0 frame -> global frame
    '''
    disp = copy.copy(tr.getDispSubMat(_globalT['thok0']))
    disp[2,0] += 0.055
    rot = np.matrix(np.eye(3))
    t = tr.composeHomogeneousTransform(rot,disp)
    _globalT['utm0'] = t
def utmcam0Tglobal_mat(ang):
    thok0Tglobal_mat = tr.invertHomogeneousTransform(_globalT['thok0'])
    # servo angle.
    disp = np.matrix([0.,0.,0.]).T
    tmat = tr.composeHomogeneousTransform(tr.Ry(ang),disp)*thok0Tglobal_mat

    # cameraTlaser from thok_cam_calib.py
    x = 0.012
    y = -0.056
    z = 0.035
    r1 = 0.
    r2 = 0.
    r3 = -0.7
    disp = np.matrix([-x,-y,-z]).T
    r = tr.Rz(math.radians(-90))*tr.Ry(math.radians(90.))
    disp = r*disp
    r = r*tr.Rx(math.radians(r1))
    r = r*tr.Ry(math.radians(r2))
    r = r*tr.Rz(math.radians(r3))

    t = tr.composeHomogeneousTransform(r, disp)
    tmat = t*tmat
    return tmat
Beispiel #8
0
def process_read(pf, p_start, m):
    # Note: p_start is numpy array (Nx3) and it's weights ([:,2]) will be modified in-place!
    # m is form: RFIDread, PoseStamped (ant in map), PoseStamped (robot base in map)
    #   We assume it is a postive reading for the desired tagID!

    xy_map = np.copy(p_start[:, 0:2].T)  # 2xN

    # Antenna is at position given by m.ps_ant_map.
    #   IMPORTANT NOTE: I'm assuming that the antenna XY plane is parallel to the map XY plane!
    o = m.ps_ant_map.pose.orientation
    d = m.ps_ant_map.pose.position

    roll, pitch, yaw = tft.euler_from_quaternion([o.x, o.y, o.z, o.w])
    dx, dy = d.x, d.y

    # Transform xy_map points into antenna frame
    t_new = tr.composeHomogeneousTransform(tr.rotZ(yaw),
                                           np.matrix([dx, dy, 0.0]).T)
    trans = tr.invertHomogeneousTransform(t_new)

    xy_new = (trans *
              tr.xyToHomogenous(xy_map))[0:2]  # This is a matrix!  (2xN)
    xy_new = np.array(xy_new)  # Convert it back to array!

    # Build new particles using old weights (Nx3)
    p_new = np.column_stack([xy_new.T,
                             p_start[:,
                                     2]])  # take weights from last iteration

    rv = pf.step(0.0,
                 m.read.rssi,
                 p_new,
                 should_resample_func=pfilter.retFalse)

    # Put back the new weights:
    p_start[:, 2] = rv[:, 2]
    return
def make_pr2_gripper_marker(ps,
                            color,
                            orientation=None,
                            marker_array=None,
                            control=None,
                            mesh_id_start=0,
                            ns="pr2_gripper",
                            offset=-0.19):
    mesh_id = mesh_id_start
    # this is the palm piece
    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1., 1., 1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/gripper_palm.dae"
    mesh.type = Marker.MESH_RESOURCE
    if orientation != None:
        mesh.pose.orientation = Quaternion(*orientation)
    else:
        mesh.pose.orientation = Quaternion(0, 0, 0, 1)
    mesh.pose.position.x = offset
    if control != None:
        control.markers.append(mesh)
    elif marker_array != None:
        mesh.pose.position = ps.pose.position
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id + 1
        marker_array.markers.append(mesh)

    # amount to open the gripper for each finger
    angle_open = 0.4
    if orientation == None:
        rot0 = np.matrix(np.eye(3))
    else:
        rot0 = tr.quaternion_to_matrix(orientation)

    if marker_array != None:
        T0 = tr.composeHomogeneousTransform(
            rot0, [ps.point.x, ps.point.y, ps.point.z])
    else:
        T0 = tr.composeHomogeneousTransform(rot0, [offset, 0.0, 0.])

    #transforming the left finger and finger tip
    rot1 = tr.rot_angle_direction(angle_open, np.matrix([0, 0, 1]))
    T1 = tr.composeHomogeneousTransform(rot1, [0.07691, 0.01, 0.])
    rot2 = tr.rot_angle_direction(-1 * angle_open, np.matrix([0, 0, 1]))
    T2 = tr.composeHomogeneousTransform(rot2, [0.09137, 0.00495, 0.])

    T_proximal = T0 * T1
    T_distal = T0 * T1 * T2

    finger_pos = tr.tft.translation_from_matrix(T_proximal)
    finger_rot = tr.tft.quaternion_from_matrix(T_proximal)

    tip_pos = tr.tft.translation_from_matrix(T_distal)
    tip_rot = tr.tft.quaternion_from_matrix(T_distal)

    #making the marker for the left finger
    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1., 1., 1.)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger.dae"
    mesh.color = ColorRGBA(*color)
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*finger_rot)
    mesh.pose.position = Point(*finger_pos)
    if control != None:
        control.markers.append(mesh)
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id + 1
        marker_array.markers.append(mesh)

    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1., 1., 1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger_tip.dae"
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*tip_rot)
    mesh.pose.position = Point(*tip_pos)
    if control != None:
        control.markers.append(mesh)
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id + 1
        marker_array.markers.append(mesh)

    #transforming the right finger and finger tip
    rot1 = tr.rot_angle_direction(3.14, np.matrix(
        [1, 0, 0])) * tr.rot_angle_direction(angle_open, np.matrix([0, 0, 1]))
    T1 = tr.composeHomogeneousTransform(rot1, [0.07691, -0.01, 0.])
    rot2 = tr.rot_angle_direction(-1 * angle_open, np.matrix([0, 0, 1]))
    T2 = tr.composeHomogeneousTransform(rot2, [0.09137, 0.00495, 0.])

    T_proximal = T0 * T1
    T_distal = T0 * T1 * T2

    finger_pos = tr.tft.translation_from_matrix(T_proximal)
    finger_rot = tr.tft.quaternion_from_matrix(T_proximal)

    tip_pos = tr.tft.translation_from_matrix(T_distal)
    tip_rot = tr.tft.quaternion_from_matrix(T_distal)

    #making the marker for the right finger
    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1., 1., 1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger.dae"
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*finger_rot)
    mesh.pose.position = Point(*finger_pos)
    if control != None:
        control.markers.append(mesh)
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id + 1
        marker_array.markers.append(mesh)

    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1., 1., 1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger_tip.dae"
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*tip_rot)
    mesh.pose.position = Point(*tip_pos)
    if control != None:
        control.markers.append(mesh)
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id + 1
        marker_array.markers.append(mesh)

    if control != None:
        control.interaction_mode = InteractiveMarkerControl.BUTTON
        return control
    elif marker_array != None:
        return marker_array
Beispiel #10
0
def tiltTpan(panAng, residuals = np.zeros(6) ):
    rotResid, dispResid = residualXform( residuals )
    rot = rotResid * tr.Rz( -1.0 * panAng )
    disp = dispResid + np.matrix([ 0.07124, 0.0, 0.02243 ]).T
    return tr.composeHomogeneousTransform(rot, disp)
Beispiel #11
0
def rollTtool_MA( residuals = np.zeros(6) ):
    rotResid, dispResid = residualXform( residuals )
    rot = rotResid * tr.Ry( math.radians( -90.0 ))
    disp = dispResid + np.matrix([ 0.0476, 0.0, 0.0 ]).T
    return tr.composeHomogeneousTransform(rot, disp)
Beispiel #12
0
X,Y = np.meshgrid( np.arange(-10,10,0.1),
                   np.arange(-10,10,0.1) )
xyw = np.row_stack([ X.flatten(),  # Build Nx3
                     Y.flatten(),
                     np.ones( X.shape ).flatten() ]).T # weights (multiplicative)

p_set = np.copy( xyw )

pf = pfilter.PFilter( rfid_model.NoMotion(), rfid_model.RfidModel( rfid_model.yaml_fname ))

t0 = time.time()

rv = pf.step( 0.0, 83, p_set, should_resample_func = pfilter.retFalse ) # rv are the new weights

# New antenna is at position (5,-5) and rotated 90-deg. relative to MAP frame
t_new = tr.composeHomogeneousTransform( tr.rotZ( math.radians(90) ), np.matrix([5,-5,0]).T )
# This transform takes points from map frame into t_new:
trans = tr.invertHomogeneousTransform( t_new )
xy_new = np.array((trans * tr.xyToHomogenous( xyw[:,0:2].T) )[0:2].T)
xyw_new = np.column_stack([ xy_new, rv[:,2] ])  # take weights from last iteration

rv = pf.step( 0.0, 83, xyw_new, should_resample_func = pfilter.retFalse ) # rv are the new weights

t1 = time.time()
print 'Time: %3.2f ' % (t1 - t0)

to_print = np.copy( xyw )
to_print[:,2] = rv[:,2]  # Just keep weights, but use original points in "map" frame


def make_pr2_gripper_marker(ps, color, orientation = None,
                            marker_array=None, control=None, 
                            mesh_id_start = 0, ns = "pr2_gripper",
                            offset=-0.19):
    mesh_id = mesh_id_start
    # this is the palm piece
    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1.,1.,1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/gripper_palm.dae"
    mesh.type = Marker.MESH_RESOURCE
    if orientation != None:
        mesh.pose.orientation = Quaternion(*orientation)
    else:
        mesh.pose.orientation = Quaternion(0,0,0,1)
    mesh.pose.position.x = offset
    if control != None:
        control.markers.append( mesh )
    elif marker_array != None:
        mesh.pose.position = ps.pose.position
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id+1
        marker_array.markers.append(mesh)

    # amount to open the gripper for each finger
    angle_open = 0.4
    if orientation == None:
        rot0 = np.matrix(np.eye(3))
    else:
        rot0 = tr.quaternion_to_matrix(orientation)

    if marker_array != None:
        T0 = tr.composeHomogeneousTransform(rot0, [ps.point.x, ps.point.y, ps.point.z])
    else:
        T0 = tr.composeHomogeneousTransform(rot0, [offset, 0.0, 0.])

    #transforming the left finger and finger tip
    rot1 = tr.rot_angle_direction(angle_open, np.matrix([0, 0, 1]))
    T1 = tr.composeHomogeneousTransform(rot1, [0.07691, 0.01, 0.])
    rot2 = tr.rot_angle_direction(-1*angle_open, np.matrix([0, 0, 1]))
    T2 = tr.composeHomogeneousTransform(rot2, [0.09137, 0.00495, 0.])
    
    T_proximal = T0*T1
    T_distal = T0*T1*T2

    finger_pos = tr.tft.translation_from_matrix(T_proximal)
    finger_rot = tr.tft.quaternion_from_matrix(T_proximal)

    tip_pos = tr.tft.translation_from_matrix(T_distal)
    tip_rot = tr.tft.quaternion_from_matrix(T_distal)

    #making the marker for the left finger
    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1.,1.,1.)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger.dae"
    mesh.color = ColorRGBA(*color)
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*finger_rot)
    mesh.pose.position = Point(*finger_pos)
    if control != None:
        control.markers.append( mesh )
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id+1
        marker_array.markers.append(mesh)

    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1.,1.,1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger_tip.dae"
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*tip_rot)
    mesh.pose.position = Point(*tip_pos)
    if control != None:
        control.markers.append( mesh )
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id+1
        marker_array.markers.append(mesh)

    #transforming the right finger and finger tip
    rot1 = tr.rot_angle_direction(3.14, np.matrix([1, 0, 0]))*tr.rot_angle_direction(angle_open, np.matrix([0, 0, 1]))
    T1 = tr.composeHomogeneousTransform(rot1, [0.07691, -0.01, 0.])
    rot2 = tr.rot_angle_direction(-1*angle_open, np.matrix([0, 0, 1]))
    T2 = tr.composeHomogeneousTransform(rot2, [0.09137, 0.00495, 0.])

    T_proximal = T0*T1
    T_distal = T0*T1*T2

    finger_pos = tr.tft.translation_from_matrix(T_proximal)
    finger_rot = tr.tft.quaternion_from_matrix(T_proximal)

    tip_pos = tr.tft.translation_from_matrix(T_distal)
    tip_rot = tr.tft.quaternion_from_matrix(T_distal)

    #making the marker for the right finger
    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1.,1.,1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger.dae"
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*finger_rot)
    mesh.pose.position = Point(*finger_pos)
    if control != None:
        control.markers.append( mesh )
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id+1
        marker_array.markers.append(mesh)

    mesh = Marker()
    mesh.ns = ns
    #mesh.mesh_use_embedded_materials = True
    mesh.scale = Vector3(1.,1.,1.)
    mesh.color = ColorRGBA(*color)
    mesh.mesh_resource = "package://pr2_description/meshes/gripper_v0/l_finger_tip.dae"
    mesh.type = Marker.MESH_RESOURCE
    mesh.pose.orientation = Quaternion(*tip_rot)
    mesh.pose.position = Point(*tip_pos)
    if control != None:
        control.markers.append( mesh )
    elif marker_array != None:
        mesh.header.frame_id = ps.header.frame_id
        mesh.id = mesh_id
        mesh_id = mesh_id+1
        marker_array.markers.append(mesh)

    if control != None:
        control.interaction_mode = InteractiveMarkerControl.BUTTON
        return control
    elif marker_array != None:
        return marker_array
Beispiel #14
0
    Y.flatten(),
    np.ones(X.shape).flatten()
]).T  # weights (multiplicative)

p_set = np.copy(xyw)

pf = pfilter.PFilter(rfid_model.NoMotion(),
                     rfid_model.RfidModel(rfid_model.yaml_fname))

t0 = time.time()

rv = pf.step(0.0, 83, p_set,
             should_resample_func=pfilter.retFalse)  # rv are the new weights

# New antenna is at position (5,-5) and rotated 90-deg. relative to MAP frame
t_new = tr.composeHomogeneousTransform(tr.rotZ(math.radians(90)),
                                       np.matrix([5, -5, 0]).T)
# This transform takes points from map frame into t_new:
trans = tr.invertHomogeneousTransform(t_new)
xy_new = np.array((trans * tr.xyToHomogenous(xyw[:, 0:2].T))[0:2].T)
xyw_new = np.column_stack([xy_new, rv[:,
                                      2]])  # take weights from last iteration

rv = pf.step(0.0, 83, xyw_new,
             should_resample_func=pfilter.retFalse)  # rv are the new weights

t1 = time.time()
print 'Time: %3.2f ' % (t1 - t0)

to_print = np.copy(xyw)
to_print[:,
         2] = rv[:,
    def omni_pose_cb(self, msg):
        self.publish_rviz_markers()
        if self.enabled:
            #Get the omni's tip pose in the PR2's torso frame
            tip_omni, msg_frame = tfu.posestamped_as_matrix(msg)
            self.torso_lift_link_T_omni(tip_omni, msg_frame)
            #self.torso_T_omni(tip_omni, msg_frame)

            #Publish new arm pose
            ps = PoseStamped()
            ps.header.frame_id = '/torso_lift_link'
            ps.header.stamp = rospy.get_rostime()
            ps.pose.position.x = self.tip_tt[0]
            ps.pose.position.y = self.tip_tt[1]
            ps.pose.position.z = self.tip_tt[2]
            ps.pose.orientation.x = self.tip_tq[0]
            ps.pose.orientation.y = self.tip_tq[1]
            ps.pose.orientation.z = self.tip_tq[2]
            ps.pose.orientation.w = self.tip_tq[3]

            self.set_goal_pub.publish(ps)

            if self.zero_out_forces:
                wr = OmniFeedback()
                wr.force.x = 0 
                wr.force.y = 0 
                wr.force.z = 0 
                self.omni_fb.publish(wr)
                self.zero_out_forces = False
        else:
            #this is a zero order hold publishing the last received values until the control loop is active again
            tip_tt = self.tip_tt
            tip_tq = self.tip_tq
            ps = PointStamped()
            ps.header.frame_id = '/torso_lift_link'
            ps.header.stamp = rospy.get_rostime()
            ps.point.x = tip_tt[0]
            ps.point.y = tip_tt[1]
            ps.point.z = tip_tt[2]
            ps.pose.orientation.x = tip_tq[0]
            ps.pose.orientation.y = tip_tq[1]
            ps.pose.orientation.z = tip_tq[2]
            ps.pose.orientation.w = tip_tq[3]

            self.set_goal_pub.publish(ps)

            #this is to make the omni force well move if the arm has moved but the commanded
            #position of the arm has not changed
            tip_omni, msg_frame = tfu.posestamped_as_matrix(msg)
            m_o1 = tfu.transform(self.omni_name, msg_frame, self.tflistener) * tip_omni
            ee_point = np.matrix(tr.translation_from_matrix(m_o1)).T
            q = self.robot.get_joint_angles()
            pos, rot = self.robot.kinematics.FK(q, self.robot.kinematics.n_jts)
            # tip_torso = tfu.transform('/torso_lift_link', self.gripper_tip_frame, self.tflistener) \
            #                       * tfu.tf_as_matrix(([0.,0.,0.], tr.quaternion_from_euler(0,0,0)))

            tip_torso = hrl_tr.composeHomogeneousTransform(rot, pos)
            #tip_torso = tfu.tf_as_matrix(([pos, rot]))
            #def composeHomogeneousTransform(rot, disp):

            center_t, center_q = self.omni_T_torso(tip_torso)
            center_col_vec = np.matrix(center_t).T

            #Send force control info
            wr = OmniFeedback()
            # offsets (0, -.268, -.15) introduced by Hai in phantom driver
            # should be cleaned up at some point so that it is consistent with position returned by phantom -marc
            lock_pos = tr.translation_matrix(np.matrix([0,-.268,-.150]))*tfu.transform(self.omni_name+'_sensable', self.omni_name, self.tflistener)*np.row_stack((center_col_vec, np.matrix([1.])))

            wr.position.x = (lock_pos[0,0])*1000.0  #multiply by 1000 mm/m to get units phantom expects
            wr.position.y = (lock_pos[1,0])*1000.0 
            wr.position.z = (lock_pos[2,0])*1000.0 

            # wr.position.x = tip_tt[0]  #multiply by 1000 mm/m to get units phantom expects
            # wr.position.y = tip_tt[1]
            # wr.position.z = tip_tt[2]
            self.omni_fb.publish(wr)
Beispiel #16
0
def process_bag(full_bag_name, prosilica_image_file, model_image_file, experiment_start_condition_pkl, arm_used='left'):
    bag_path, bag_name_ext = os.path.split(full_bag_name)
    filename, ext = os.path.splitext(bag_name_ext)
    
    ###############################################################################
    # Playback the bag
    bag_playback = Process(target=playback_bag, args=(full_bag_name,))
    bag_playback.start()

    ###############################################################################
    ## Listen for transforms using rosbag
    rospy.init_node('bag_proceessor')

    tl = tf.TransformListener()

    print 'waiting for transform'
    tl.waitForTransform('map', 'base_footprint', rospy.Time(), rospy.Duration(20))
    # Extract the starting location map_T_bf
    p_base = tfu.transform('map', 'base_footprint', tl) \
            * tfu.tf_as_matrix(([0., 0., 0., 1.], tr.quaternion_from_euler(0,0,0)))
    t, r = tfu.matrix_as_tf(p_base)
    pose_base = (t, r)
    print 'done with tf'

    ##########################################################
    ## Find contact locations
    start_conditions = ut.load_pickle(experiment_start_condition_pkl)
    start_conditions['highdef_image'] = prosilica_image_file
    start_conditions['model_image'] = model_image_file
    rospy.loginfo('extracting object localization features')
    start_conditions['pose_parameters'] = extract_object_localization_features2(start_conditions, tl, arm_used, p_base)

    if bag_playback.is_alive():
        rospy.loginfo('Terminating playback process')
        bag_playback.terminate()
        time.sleep(1)
        bag_playback.terminate()
        time.sleep(1)
        rospy.loginfo('Playback process terminated? %s' % str(not bag_playback.is_alive()))


    ###############################################################################
    #Read bag using programmatic API
    pr2_kinematics = pr2k.PR2Kinematics(tl)
    converter = JointMsgConverter()
    rospy.loginfo('opening bag, reading state topics')
    topics_dict = ru.bag_sel(full_bag_name, ['/joint_states', '/l_cart/command_pose', 
                                             '/r_cart/command_pose', '/torso_controller/state',
                                             '/pressure/l_gripper_motor', '/pressure/r_gripper_motor'])

    ## Select the arm that has been moving, segment joint states based on contact states.
    if arm_used == 'left':
        pressures = topics_dict['/pressure/l_gripper_motor']
    elif arm_used == 'right':
        pressures = topics_dict['/pressure/r_gripper_motor']
    else:
        raise RuntimeError('arm_used invalid')

    ## find contact times
    rospy.loginfo('Finding contact times')
    left_f, right_f, ptimes = hpr2.pressure_state_to_mat(pressures['msg'])

    ## create segments based on contacts
    # TODO: make this accept more contact stages
    contact_times = find_contact_times(left_f, right_f, ptimes, 250)
    if len(contact_times) > 2:
        time_segments = [['start', contact_times[0]], [contact_times[0], contact_times[-1]], [contact_times[-1], 'end']]
    else:
        time_segments = [['start', 'end']]

    rospy.loginfo('Splitting messages based on contact times')
    ## split pressure readings based on contact times
    pressure_lseg = segment_msgs(time_segments, topics_dict['/pressure/l_gripper_motor']['msg'])
    pressure_rseg = segment_msgs(time_segments, topics_dict['/pressure/r_gripper_motor']['msg'])

    ## split cartesian commands based on contact times
    lcart_seg = segment_msgs(time_segments, topics_dict['/l_cart/command_pose']['msg'])
    rcart_seg = segment_msgs(time_segments, topics_dict['/r_cart/command_pose']['msg'])

    ## split joint states
    joint_states = topics_dict['/joint_states']['msg']
    print 'there are %d joint state messages in bag' % len(joint_states)

    j_segs     = segment_msgs(time_segments, topics_dict['/joint_states']['msg'])
    jseg_dicts = [converter.msgs_to_dict(seg) for seg in j_segs]
    # find the first set of joint states
    j0_dict    = jseg_dicts[0][0]

    ## perform FK
    rospy.loginfo('Performing FK to find tip locations')
    bf_T_obj = htf.composeHomogeneousTransform(start_conditions['pose_parameters']['frame_bf'], 
                                               start_conditions['pose_parameters']['center_bf'])
    obj_T_bf = np.linalg.inv(bf_T_obj)
    for jseg_dict in jseg_dicts:
        for d in jseg_dict:
            rtip_bf = pr2_kinematics.right.fk('base_footprint',
                    'r_wrist_roll_link', 'r_gripper_tool_frame',
                    d['poses']['rarm'].A1.tolist())
            ltip_bf =  pr2_kinematics.left.fk('base_footprint',
                    'l_wrist_roll_link', 'l_gripper_tool_frame',
                    d['poses']['larm'].A1.tolist())
            rtip_obj = obj_T_bf * rtip_bf
            ltip_obj = obj_T_bf * ltip_bf

            d['rtip_obj'] = tfu.matrix_as_tf(rtip_obj)
            d['ltip_obj'] = tfu.matrix_as_tf(ltip_obj)

            d['rtip_bf'] = tfu.matrix_as_tf(rtip_bf)
            d['ltip_bf'] = tfu.matrix_as_tf(ltip_bf)
    
    ###############################################################################
    # make movement state dictionaries, one for each state
    movement_states = []
    for i, seg in enumerate(time_segments):
        name = "state_%d" % i
        start_times = [lcart_seg[i][0].header.stamp.to_time(), 
                       rcart_seg[i][0].header.stamp.to_time(), 
                       jseg_dicts[i][0]['time'],
                       pressure_lseg[i][0].header.stamp.to_time(), 
                       pressure_rseg[i][0].header.stamp.to_time()]

        sdict = {'name': name,
                 'start_time': np.min(start_times),
                 'cartesian': [[ru.ros_to_dict(ps) for ps in lcart_seg[i]], 
                               [ru.ros_to_dict(ps) for ps in rcart_seg[i]]],
                 'joint_states': jseg_dicts[i]
                 #'pressure': [pressure_lseg[i], pressure_rseg[i]]
                 } 

        movement_states.append(sdict)

    # store in a dict
    data = {'start_conditions': start_conditions, # ['camera_info', 'map_T_bf', 'pro_T_bf', 'points' (in base_frame), 
                                                  # 'highdef_image', 'model_image',
                                                    ## 'pose_parameters'
                                                        ## 'descriptors'
                                                        ## 'directions' (wrt to cannonical orientation)
                                                        ## 'closest_feature'
            'base_pose': pose_base, 
            'robot_pose': j0_dict,
            'arm': arm_used,
            'movement_states': movement_states}

    # save dicts to pickles
    processed_bag_name = '%s_processed.pkl' % os.path.join(bag_path, filename)
    rospy.loginfo('saving to %s' % processed_bag_name)
    ut.save_pickle(data, processed_bag_name)
    bag_playback.join()
    rospy.loginfo('finished!')
Beispiel #17
0
                     np.ones( X.shape ).flatten() ]).T # weights (multiplicative)

xyw_orig = np.copy( xyw )

rm = rfid_model.RfidModel( rfid_model.yaml_fname )
# pf = lib_pfilter.PFilter( lib_pfilter.NoMotion(), rm, xyw )
# pf.measurement( 80 )


# Antenna reading at origin (max near 4,0)
# rv = np.copy( xyw )
rv = rm.weight_set( 83, xyw )

# Move antenna to 4,-4 and rotated 90 deg
xy = xyw[:,0:2]
trans = tr.composeHomogeneousTransform( tr.rotZ( math.radians(-90) ), np.matrix([5,5,0]).T )
xy_new = np.array((trans * tr.xyToHomogenous( xy.T) )[0:2].T)

xyw_new = np.column_stack([ xy_new, rv[:,2] ])  # take weights from last iteration
rv_new = rm.weight_set( 83, xyw_new )

to_print = np.copy( xyw_orig )
to_print[:,2] = rv_new[:,2]  # Just keep weights, but use original points in "map" frame



dw = DisplayWeights()

while not rospy.is_shutdown():
    print 'UPDATING'
    dw.update( to_print )
Beispiel #18
0
def camTlaser( res = np.zeros(7) ):
    # @ Duke, res = np.array([0.8, 0.9, -1.7, 3.1, 0.061, 0.032, -0.035 ])
    rot = tr.Ry( math.radians( 0.0 + res[0] )) * tr.Rz( math.radians( 0.0 + res[1] )) * tr.Rx( math.radians( -90.0 + res[2] )) * tr.Rz( math.radians( -90.0 + res[3]))
    disp = np.matrix([ res[4], res[5], res[6] ]).T + np.matrix([ 0.0, 0.0, 0.0 ]).T
    return tr.composeHomogeneousTransform(rot, disp)
Beispiel #19
0
    np.ones(X.shape).flatten()
]).T  # weights (multiplicative)

xyw_orig = np.copy(xyw)

rm = rfid_model.RfidModel(rfid_model.yaml_fname)
# pf = lib_pfilter.PFilter( lib_pfilter.NoMotion(), rm, xyw )
# pf.measurement( 80 )

# Antenna reading at origin (max near 4,0)
# rv = np.copy( xyw )
rv = rm.weight_set(83, xyw)

# Move antenna to 4,-4 and rotated 90 deg
xy = xyw[:, 0:2]
trans = tr.composeHomogeneousTransform(tr.rotZ(math.radians(-90)),
                                       np.matrix([5, 5, 0]).T)
xy_new = np.array((trans * tr.xyToHomogenous(xy.T))[0:2].T)

xyw_new = np.column_stack([xy_new, rv[:,
                                      2]])  # take weights from last iteration
rv_new = rm.weight_set(83, xyw_new)

to_print = np.copy(xyw_orig)
to_print[:,
         2] = rv_new[:,
                     2]  # Just keep weights, but use original points in "map" frame

dw = DisplayWeights()

while not rospy.is_shutdown():
    print 'UPDATING'
Beispiel #20
0
def panTroll(rollAng, residuals = np.zeros(6) ):
    rotResid, dispResid = residualXform( residuals )
    rot = rotResid * tr.Rx( -1.0 * rollAng )
    disp = dispResid + np.matrix([0.02021, 0.0, 0.04236 ]).T
    return tr.composeHomogeneousTransform(rot, disp)
Beispiel #21
0
def process_bag(full_bag_name,
                prosilica_image_file,
                model_image_file,
                experiment_start_condition_pkl,
                arm_used='left'):
    bag_path, bag_name_ext = os.path.split(full_bag_name)
    filename, ext = os.path.splitext(bag_name_ext)

    ###############################################################################
    # Playback the bag
    bag_playback = Process(target=playback_bag, args=(full_bag_name, ))
    bag_playback.start()

    ###############################################################################
    ## Listen for transforms using rosbag
    rospy.init_node('bag_proceessor')

    tl = tf.TransformListener()

    print 'waiting for transform'
    tl.waitForTransform('map', 'base_footprint', rospy.Time(),
                        rospy.Duration(20))
    # Extract the starting location map_T_bf
    p_base = tfu.transform('map', 'base_footprint', tl) \
            * tfu.tf_as_matrix(([0., 0., 0., 1.], tr.quaternion_from_euler(0,0,0)))
    t, r = tfu.matrix_as_tf(p_base)
    pose_base = (t, r)
    print 'done with tf'

    ##########################################################
    ## Find contact locations
    start_conditions = ut.load_pickle(experiment_start_condition_pkl)
    start_conditions['highdef_image'] = prosilica_image_file
    start_conditions['model_image'] = model_image_file
    rospy.loginfo('extracting object localization features')
    start_conditions[
        'pose_parameters'] = extract_object_localization_features2(
            start_conditions, tl, arm_used, p_base)

    if bag_playback.is_alive():
        rospy.loginfo('Terminating playback process')
        bag_playback.terminate()
        time.sleep(1)
        bag_playback.terminate()
        time.sleep(1)
        rospy.loginfo('Playback process terminated? %s' %
                      str(not bag_playback.is_alive()))

    ###############################################################################
    #Read bag using programmatic API
    pr2_kinematics = pr2k.PR2Kinematics(tl)
    converter = JointMsgConverter()
    rospy.loginfo('opening bag, reading state topics')
    topics_dict = ru.bag_sel(full_bag_name, [
        '/joint_states', '/l_cart/command_pose', '/r_cart/command_pose',
        '/torso_controller/state', '/pressure/l_gripper_motor',
        '/pressure/r_gripper_motor'
    ])

    ## Select the arm that has been moving, segment joint states based on contact states.
    if arm_used == 'left':
        pressures = topics_dict['/pressure/l_gripper_motor']
    elif arm_used == 'right':
        pressures = topics_dict['/pressure/r_gripper_motor']
    else:
        raise RuntimeError('arm_used invalid')

    ## find contact times
    rospy.loginfo('Finding contact times')
    left_f, right_f, ptimes = hpr2.pressure_state_to_mat(pressures['msg'])

    ## create segments based on contacts
    # TODO: make this accept more contact stages
    contact_times = find_contact_times(left_f, right_f, ptimes, 250)
    if len(contact_times) > 2:
        time_segments = [['start', contact_times[0]],
                         [contact_times[0], contact_times[-1]],
                         [contact_times[-1], 'end']]
    else:
        time_segments = [['start', 'end']]

    rospy.loginfo('Splitting messages based on contact times')
    ## split pressure readings based on contact times
    pressure_lseg = segment_msgs(
        time_segments, topics_dict['/pressure/l_gripper_motor']['msg'])
    pressure_rseg = segment_msgs(
        time_segments, topics_dict['/pressure/r_gripper_motor']['msg'])

    ## split cartesian commands based on contact times
    lcart_seg = segment_msgs(time_segments,
                             topics_dict['/l_cart/command_pose']['msg'])
    rcart_seg = segment_msgs(time_segments,
                             topics_dict['/r_cart/command_pose']['msg'])

    ## split joint states
    joint_states = topics_dict['/joint_states']['msg']
    print 'there are %d joint state messages in bag' % len(joint_states)

    j_segs = segment_msgs(time_segments, topics_dict['/joint_states']['msg'])
    jseg_dicts = [converter.msgs_to_dict(seg) for seg in j_segs]
    # find the first set of joint states
    j0_dict = jseg_dicts[0][0]

    ## perform FK
    rospy.loginfo('Performing FK to find tip locations')
    bf_T_obj = htf.composeHomogeneousTransform(
        start_conditions['pose_parameters']['frame_bf'],
        start_conditions['pose_parameters']['center_bf'])
    obj_T_bf = np.linalg.inv(bf_T_obj)
    for jseg_dict in jseg_dicts:
        for d in jseg_dict:
            rtip_bf = pr2_kinematics.right.fk('base_footprint',
                                              'r_wrist_roll_link',
                                              'r_gripper_tool_frame',
                                              d['poses']['rarm'].A1.tolist())
            ltip_bf = pr2_kinematics.left.fk('base_footprint',
                                             'l_wrist_roll_link',
                                             'l_gripper_tool_frame',
                                             d['poses']['larm'].A1.tolist())
            rtip_obj = obj_T_bf * rtip_bf
            ltip_obj = obj_T_bf * ltip_bf

            d['rtip_obj'] = tfu.matrix_as_tf(rtip_obj)
            d['ltip_obj'] = tfu.matrix_as_tf(ltip_obj)

            d['rtip_bf'] = tfu.matrix_as_tf(rtip_bf)
            d['ltip_bf'] = tfu.matrix_as_tf(ltip_bf)

    ###############################################################################
    # make movement state dictionaries, one for each state
    movement_states = []
    for i, seg in enumerate(time_segments):
        name = "state_%d" % i
        start_times = [
            lcart_seg[i][0].header.stamp.to_time(),
            rcart_seg[i][0].header.stamp.to_time(), jseg_dicts[i][0]['time'],
            pressure_lseg[i][0].header.stamp.to_time(),
            pressure_rseg[i][0].header.stamp.to_time()
        ]

        sdict = {
            'name':
            name,
            'start_time':
            np.min(start_times),
            'cartesian': [[ru.ros_to_dict(ps) for ps in lcart_seg[i]],
                          [ru.ros_to_dict(ps) for ps in rcart_seg[i]]],
            'joint_states':
            jseg_dicts[i]
            #'pressure': [pressure_lseg[i], pressure_rseg[i]]
        }

        movement_states.append(sdict)

    # store in a dict
    data = {
        'start_conditions':
        start_conditions,  # ['camera_info', 'map_T_bf', 'pro_T_bf', 'points' (in base_frame), 
        # 'highdef_image', 'model_image',
        ## 'pose_parameters'
        ## 'descriptors'
        ## 'directions' (wrt to cannonical orientation)
        ## 'closest_feature'
        'base_pose': pose_base,
        'robot_pose': j0_dict,
        'arm': arm_used,
        'movement_states': movement_states
    }

    # save dicts to pickles
    processed_bag_name = '%s_processed.pkl' % os.path.join(bag_path, filename)
    rospy.loginfo('saving to %s' % processed_bag_name)
    ut.save_pickle(data, processed_bag_name)
    bag_playback.join()
    rospy.loginfo('finished!')
Beispiel #22
0
def laserTtilt(tiltAng, residuals = np.zeros(6) ):
    rotResid, dispResid = residualXform( residuals )
    rot = rotResid * tr.Ry( +1.0 * tiltAng )
    disp = dispResid + np.matrix([ 0.03354, 0.0, 0.23669 ]).T
    return tr.composeHomogeneousTransform(rot, disp)
Beispiel #23
0
    def omni_pose_cb(self, msg):
        self.publish_rviz_markers()
        if self.enabled:
            #Get the omni's tip pose in the PR2's torso frame
            tip_omni, msg_frame = tfu.posestamped_as_matrix(msg)
            self.torso_lift_link_T_omni(tip_omni, msg_frame)
            #self.torso_T_omni(tip_omni, msg_frame)

            #Publish new arm pose
            ps = PoseStamped()
            ps.header.frame_id = '/torso_lift_link'
            ps.header.stamp = rospy.get_rostime()
            ps.pose.position.x = self.tip_tt[0]
            ps.pose.position.y = self.tip_tt[1]
            ps.pose.position.z = self.tip_tt[2]
            ps.pose.orientation.x = self.tip_tq[0]
            ps.pose.orientation.y = self.tip_tq[1]
            ps.pose.orientation.z = self.tip_tq[2]
            ps.pose.orientation.w = self.tip_tq[3]

            self.set_goal_pub.publish(ps)

            if self.zero_out_forces:
                wr = OmniFeedback()
                wr.force.x = 0
                wr.force.y = 0
                wr.force.z = 0
                self.omni_fb.publish(wr)
                self.zero_out_forces = False
        else:
            #this is a zero order hold publishing the last received values until the control loop is active again
            tip_tt = self.tip_tt
            tip_tq = self.tip_tq
            ps = PointStamped()
            ps.header.frame_id = '/torso_lift_link'
            ps.header.stamp = rospy.get_rostime()
            ps.point.x = tip_tt[0]
            ps.point.y = tip_tt[1]
            ps.point.z = tip_tt[2]
            ps.pose.orientation.x = tip_tq[0]
            ps.pose.orientation.y = tip_tq[1]
            ps.pose.orientation.z = tip_tq[2]
            ps.pose.orientation.w = tip_tq[3]

            self.set_goal_pub.publish(ps)

            #this is to make the omni force well move if the arm has moved but the commanded
            #position of the arm has not changed
            tip_omni, msg_frame = tfu.posestamped_as_matrix(msg)
            m_o1 = tfu.transform(self.omni_name, msg_frame,
                                 self.tflistener) * tip_omni
            ee_point = np.matrix(tr.translation_from_matrix(m_o1)).T
            q = self.robot.get_joint_angles()
            pos, rot = self.robot.kinematics.FK(q, self.robot.kinematics.n_jts)
            # tip_torso = tfu.transform('/torso_lift_link', self.gripper_tip_frame, self.tflistener) \
            #                       * tfu.tf_as_matrix(([0.,0.,0.], tr.quaternion_from_euler(0,0,0)))

            tip_torso = hrl_tr.composeHomogeneousTransform(rot, pos)
            #tip_torso = tfu.tf_as_matrix(([pos, rot]))
            #def composeHomogeneousTransform(rot, disp):

            center_t, center_q = self.omni_T_torso(tip_torso)
            center_col_vec = np.matrix(center_t).T

            #Send force control info
            wr = OmniFeedback()
            # offsets (0, -.268, -.15) introduced by Hai in phantom driver
            # should be cleaned up at some point so that it is consistent with position returned by phantom -marc
            lock_pos = tr.translation_matrix(np.matrix(
                [0, -.268, -.150])) * tfu.transform(
                    self.omni_name + '_sensable', self.omni_name,
                    self.tflistener) * np.row_stack(
                        (center_col_vec, np.matrix([1.])))

            wr.position.x = (
                lock_pos[0, 0]
            ) * 1000.0  #multiply by 1000 mm/m to get units phantom expects
            wr.position.y = (lock_pos[1, 0]) * 1000.0
            wr.position.z = (lock_pos[2, 0]) * 1000.0

            # wr.position.x = tip_tt[0]  #multiply by 1000 mm/m to get units phantom expects
            # wr.position.y = tip_tt[1]
            # wr.position.z = tip_tt[2]
            self.omni_fb.publish(wr)