Example #1
0
def space_from_joints(joint_angles):
    T01, T12, T23, T34, T4e = direct_kinematics(joint_angles)
    T = tfs.concatenate_matrices(T01, T12, T23, T34, T4e)
    rz1, ry, rz2 = tfs.euler_from_matrix(T, 'szyz')
    trans = tfs.translation_from_matrix(T)
    S = np.append(trans, rz2)
    return S
Example #2
0
def pack_pose(time, child, parent, matrix=None, trans=None, quat=None):

    if matrix is not None and (trans is None and quat is None):
        trans = tfs.translation_from_matrix(matrix)
        quat = tfs.quaternion_from_matrix(matrix)
    elif matrix is None and trans is not None and quat is not None:
        matrix = None  # nothing
    else:
        print 'invalid use'

    t = TransformStamped()
    t.header.frame_id = parent
    t.header.stamp = time
    t.child_frame_id = child
    t.transform.translation.x = trans[0]
    t.transform.translation.y = trans[1]
    t.transform.translation.z = trans[2]

    quat = numpy.array(quat)
    quat = quat / numpy.linalg.norm(quat, ord=2)
    t.transform.rotation.x = quat[0]
    t.transform.rotation.y = quat[1]
    t.transform.rotation.z = quat[2]
    t.transform.rotation.w = quat[3]

    return t
    def wrench_callback(self, state):
        #calculating force estimate from position error (does not compensate for motion error due to dynamics)
        q = self.robot.get_joint_angles()
        pos,_ = self.robot.kinematics.FK(q)
        x_err = np.matrix([state.point.x-pos[0,0], state.point.y-pos[1,0], state.point.z-pos[2,0]]).T
                                  
        ##########This was for debugging purposes
        # ps = PointStamped()
        # ps.header.frame_id = '/torso_lift_link'
        # ps.header.stamp = rospy.get_rostime()
        # ps.point.x = x_dot_err[0,0]
        # ps.point.y = x_dot_err[0,0]
        # ps.point.z = 0

        #self.err_pub.publish(ps)

        feedback = -1.0*self.kp*x_err  # - self.kd*x_dot_err  This term is now included in real-time omni driver

        #find and use the rotation matrix from wrench to torso                                                                   
        df_R_ee = tfu.rotate(self.dest_frame, 'torso_lift_link', self.tflistener) 

        wr_df = self.force_scaling*np.array(tr.translation_from_matrix(df_R_ee * tfu.translation_matrix([feedback[0,0], feedback[1,0], feedback[2,0]])))

        #limiting the max and min force feedback sent to omni                                                                    
        wr_df = np.where(wr_df>self.omni_max_limit, self.omni_max_limit, wr_df)
        wr_df = np.where(wr_df<self.omni_min_limit, self.omni_min_limit, wr_df)

        wr = OmniFeedback()
        wr.force.x = wr_df[0]
        wr.force.y = wr_df[1]
        wr.force.z = wr_df[2]
        if self.enable == True:
            self.omni_fb.publish(wr)
Example #4
0
 def _makePreGraspPose(self, boxMat, axis):
     if axis==0: #x axis
         alpha = 0
         gamma = 0
     else: #y axis
         alpha = pi/2
         gamma = -pi/2
     ik_request = PositionIKRequest()
     ik_request.ik_link_name = self.toolLinkName
     with self._joint_state_lock:
         ik_request.ik_seed_state.joint_state = copy.deepcopy(self._joint_states)
     ik_request.pose_stamped = PoseStamped()
     ik_request.pose_stamped.header.stamp = rospy.Time.now()
     ik_request.pose_stamped.header.frame_id = self.frameID
     beta = pi/2
     while beta < 3*pi/2:
         rotationMatrix = transformations.euler_matrix(alpha, beta, gamma, 'rzyz')
         distance = self.preGraspDistance + self.gripperFingerLength
         preGraspMat = transformations.translation_matrix([0,0,-distance])
         fullMat = transformations.concatenate_matrices(boxMat, rotationMatrix, preGraspMat)
         p = transformations.translation_from_matrix(fullMat)
         q = transformations.quaternion_from_matrix(fullMat)
         print "trying {} radians".format(beta)
         try:
             self._ik_server(ik_request, Constraints(), rospy.Duration(5.0))
         except rospy.service.ServiceException:
             beta += 0.1
         else:
             if ik_resp.error_code.val > 0:
                 return beta
             else:
                 #print ik_resp.error_code.val
                 beta += 0.01
     rospy.logerr('No way to pick this up. All IK solutions failed.')
     return 7 * pi / 6
Example #5
0
def space_from_joints_small(joint_angles):
    T01, T12, T23, T34, T45, T5e, Tec, Tem = direct_kinematics(joint_angles)
    T = tfs.concatenate_matrices(T01, T12, T23, T34, T45, T5e, Tem)
    rx, ry, rz = tfs.euler_from_matrix(T, 'sxyz')
    trans = tfs.translation_from_matrix(T)
    S = np.concatenate((trans, [rz, ry]), axis=1)
    return S
Example #6
0
    def transformPose(self, target_frame, ps):
        """
        :param target_frame: the tf target frame, a string
        :param ps: the geometry_msgs.msg.PoseStamped message
        :return: new geometry_msgs.msg.PoseStamped message, in frame target_frame
        :raises: any of the exceptions that :meth:`~tf.Transformer.lookupTransform` can raise

        Transforms a geometry_msgs PoseStamped message to frame target_frame, returns a new PoseStamped message.
        """
        # mat44 is frame-to-frame transform as a 4x4
        mat44 = self.asMatrix(target_frame, ps.header)

        # pose44 is the given pose as a 4x4
        pose44 = numpy.dot(xyz_to_mat44(ps.pose.position), xyzw_to_mat44(ps.pose.orientation))

        # txpose is the new pose in target_frame as a 4x4
        txpose = numpy.dot(mat44, pose44)

        # xyz and quat are txpose's position and orientation
        xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
        quat = tuple(transformations.quaternion_from_matrix(txpose))

        # assemble return value PoseStamped
        r = geometry_msgs.msg.PoseStamped()
        r.header.stamp = ps.header.stamp
        r.header.frame_id = target_frame
        r.pose = geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
        return r
def computeTransformation(world, slam, camera):
    # Here we do not care about the slamMworld transformation
    # timing as it is constant.
    tWorld = listener.getLatestCommonTime(world, camera)
    tMap = listener.getLatestCommonTime(slam, camera)
    t = min(tWorld, tMap)

    # Current pose given by the SLAM.
    (slamMcam_T, slamMcam_Q) = listener.lookupTransform(camera, slam, t)
    slamMcam = np.matrix(transformer.fromTranslationRotation(slamMcam_T,
                                                             slamMcam_Q))

    # Estimation of the camera position given by control.
    #FIXME: order is weird but it works.
    (worldMcam_T, worldMcam_Q) = listener.lookupTransform(world, camera, t)
    worldMcam = np.matrix(transformer.fromTranslationRotation(worldMcam_T,
                                                              worldMcam_Q))

    (slamMworld_T, slamMworld_Q) = listener.lookupTransform(slam, world, t)
    slamMworld = np.matrix(transformer.fromTranslationRotation(slamMworld_T,
                                                               slamMworld_Q))
    slamMcamEstimated = slamMworld * worldMcam

    # Inverse frames.
    camMslam = np.linalg.inv(slamMcam)
    camMslamEstimated = np.linalg.inv(slamMcamEstimated)

    # Split.
    camMslam_T = translation_from_matrix(camMslam)
    camMslam_Q = quaternion_from_matrix(camMslam)
    camMslam_E = euler_from_matrix(camMslam)

    camMslamEstimated_T = translation_from_matrix(camMslamEstimated)
    camMslamEstimated_Q = quaternion_from_matrix(camMslamEstimated)
    camMslamEstimated_E = euler_from_matrix(camMslamEstimated)

    # Compute correction.
    camMslamCorrected_T = [camMslam_T[0],
                           camMslamEstimated_T[1],
                           camMslam_T[2]]
    camMslamCorrected_E = [camMslamEstimated_E[0],
                           camMslam_E[1],
                           camMslamEstimated_E[2]]

    camMslamCorrected_Q = quaternion_from_euler(*camMslamCorrected_E)

    return (camMslamCorrected_T, camMslamCorrected_Q, t)
	def getUnitTranslationVector(self):
		d = self.getPositionDistance()
		
		if(d == 0):
			return (0,0,0)
		else:
			t = translation_from_matrix(self._matrix)
			return (t[0]/d, t[1]/d, t[2]/d)
Example #9
0
    def _makeGraspPath(self, preGraspGoal):
        '''Given a pregrasp MoveArmGoal message, generate a MoveArmGoal message to perform the final
        approach to the object to grasp it.'''
        
        graspGoal = MoveArmGoal()
        graspGoal.planner_service_name = self.plannerServiceName
        motion_plan_request = MotionPlanRequest()
        motion_plan_request.group_name = self.armGroupName
        motion_plan_request.num_planning_attempts = 25
        motion_plan_request.planner_id = ""
        motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
        
        #Orientation constraint is the same as for pregrasp
        motion_plan_request.goal_constraints.orientation_constraints = copy.deepcopy(preGraspGoal.motion_plan_request.goal_constraints.orientation_constraints)
        #motion_plan_request.goal_constraints.orientation_constraints[0].orientation = Quaternion(0.656778, 0.261999, 0.648401, -0.282093) #stow orientation for debug
        graspGoal.motion_plan_request = motion_plan_request
        
        #Translate from pregrasp position to final position in a roughly straight line
        o = motion_plan_request.goal_constraints.orientation_constraints[0].orientation
        p = preGraspGoal.motion_plan_request.goal_constraints.position_constraints[0].position
        preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
        preGraspMat[:3, 3] = [p.x,p.y,p.z]
        distance = self.preGraspDistance * .5 + self.gripperFingerLength * .5
        graspTransMat = transformations.translation_matrix([0,0,distance])
        graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
        #print preGraspMat
        #print graspTransMat
        #print graspMat
        p = transformations.translation_from_matrix(graspMat)
       
        #Publish grasp transform for visualization
        self._tf_broadcaster.sendTransform(
                (p[0],p[1],p[2]),
                (o.x, o.y, o.x, o.w),
                motion_plan_request.goal_constraints.orientation_constraints[0].header.stamp,
                "grasp",
                motion_plan_request.goal_constraints.orientation_constraints[0].header.frame_id)
 
        pos_constraint = PositionConstraint()
        pos_constraint.header = motion_plan_request.goal_constraints.orientation_constraints[0].header
        pos_constraint.link_name = self.toolLinkName
        pos_constraint.position = Point(p[0],p[1],p[2])
        #pos_constraint.position = Point(-0.0644721, 0.609922, 0) #Stow position for debug
        pos_constraint.constraint_region_shape.type = Shape.SPHERE
        pos_constraint.constraint_region_shape.dimensions = [0.01]
        pos_constraint.weight = 1
        motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
        #TODO: Add path constraint to require a (roughly) cartesian move
        
        #Turn off collision operations between the gripper and all objects
        for collisionName in self.gripperCollisionNames:
            collisionOperation = CollisionOperation(collisionName, 
                                    CollisionOperation.COLLISION_SET_ALL,
                                    0.0,
                                    CollisionOperation.DISABLE)
            graspGoal.operations.collision_operations.append(collisionOperation)
        return graspGoal
Example #10
0
def get_relative_pose(object_a, object_b):
    object_a_mat = get_object_pose_as_matrix(object_a)
    object_b_mat = get_object_pose_as_matrix(object_b)
    rel_mat = numpy.linalg.inv(object_a_mat).dot(object_b_mat)
    trans = transformations.translation_from_matrix(rel_mat)
    rot = transformations.quaternion_from_matrix(rel_mat)
    rot = [rot[3], rot[0], rot[1], rot[2]]
    print "pose: [%f, %f, %f, %f, %f, %f, %f]" % (trans[0], trans[1], trans[2], rot[0], rot[1], rot[2], rot[3])
    return (trans, rot)
Example #11
0
    def _makeMove(self):
        '''Move forward a small distance'''
        print "Making move goal"
        graspGoal = MoveArmGoal()
        graspGoal.planner_service_name = self.plannerServiceName
        motion_plan_request = MotionPlanRequest()
        motion_plan_request.group_name = self.armGroupName
        motion_plan_request.num_planning_attempts = 5
        motion_plan_request.planner_id = ""
        motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
        #Orientation constraint is the same as for previous
        #Create Orientation constraint object
        o_constraint = OrientationConstraint()
        o_constraint.header.frame_id = self.frameID
        o_constraint.header.stamp = rospy.Time.now()
        o_constraint.link_name = self.toolLinkName
        o_constraint.orientation = Quaternion(0.656788, 0.261971, 0.648416, -0.282059)
        o_constraint.absolute_roll_tolerance = 0.04
        o_constraint.absolute_pitch_tolerance = 0.04
        o_constraint.absolute_yaw_tolerance = 0.04
        o_constraint.weight = 1
        motion_plan_request.goal_constraints.orientation_constraints.append(o_constraint)
        
        #Translate from pregrasp position to final position in a roughly straight line
        o = o_constraint.orientation
        p = Point(-0.064433, 0.609915, 0)
        preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
        preGraspMat[:3, 3] = [p.x,p.y,p.z]
        distance = .3#self.preGraspDistance + self.gripperFingerLength/2
        graspTransMat = transformations.translation_matrix([0,0,distance])
        graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
        #print preGraspMat
        #print graspTransMat
        #print graspMat
        p = transformations.translation_from_matrix(graspMat)
       
        #Publish grasp transform for visualization
        self._tf_broadcaster.sendTransform(
                (p[0],p[1],p[2]),
                (o.x, o.y, o.x, o.w),
                o_constraint.header.stamp,
                "grasp",
                o_constraint.header.frame_id)
 
        pos_constraint = PositionConstraint()
        pos_constraint.header = o_constraint.header
        pos_constraint.link_name = self.toolLinkName
        pos_constraint.position = Point(p[0],p[1],p[2])
        pos_constraint.constraint_region_shape.type = Shape.SPHERE
        pos_constraint.constraint_region_shape.dimensions = [0.01]#[0.01, 0.01, 0.01]
        pos_constraint.weight = 1
        motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
        graspGoal.motion_plan_request = motion_plan_request
        return graspGoal
Example #12
0
    def run(self):
        r = rospy.Rate(50)
        while not rospy.is_shutdown():
            try:
                common_link = "/base_link"
                c_T_rgrip = tfu.transform(common_link, "/r_gripper_tool_frame", self.tflistener)
                c_T_lgrip = tfu.transform(common_link, "/l_gripper_tool_frame", self.tflistener)
                gripper_right_c = np.matrix(tr.translation_from_matrix(c_T_rgrip * tr.translation_matrix([0, 0, 0.0])))
                gripper_left_c = np.matrix(tr.translation_from_matrix(c_T_lgrip * tr.translation_matrix([0, 0, 0.0])))
                look_at_point_c = ((gripper_right_c + gripper_left_c) / 2.0).A1.tolist()

                g = PointHeadGoal()
                g.target.header.frame_id = "/base_link"
                g.target.point = Point(*look_at_point_c)
                g.min_duration = rospy.Duration(1.0)
                g.max_velocity = 10.0
                self.head_client.send_goal(g)
                self.head_client.wait_for_result(rospy.Duration(1.0))
                r.sleep()
            except tf.LookupException, e:
                print e
Example #13
0
def mat2pose(m):
    trans = tr.translation_from_matrix(m)
    rot   = tr.quaternion_from_matrix(m)
    p = Pose()
    p.position.x = trans[0]
    p.position.y = trans[1]
    p.position.z = trans[2]
    p.orientation.x = rot[0]
    p.orientation.y = rot[1]
    p.orientation.z = rot[2]
    p.orientation.w = rot[3]
    return p
def transformPose(mat44, ps):
    # pose44 is the given pose as a 4x4
    pose44 = numpy.dot(tf.listener.xyz_to_mat44(ps.pose.position), tf.listener.xyzw_to_mat44(ps.pose.orientation))

    # txpose is the new pose in target_frame as a 4x4
    txpose = numpy.dot(mat44, pose44)

    # xyz and quat are txpose's position and orientation
    xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
    quat = tuple(transformations.quaternion_from_matrix(txpose))

    # assemble return value Pose
    return geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
 def get_current_pose(self):
     frame_described_in = str(self.frame_box.currentText())
     arm = tu.selected_radio_button(self.arm_radio_buttons).lower()
     if arm == 'right':
         arm_tip_frame = VelocityPriorityMoveTool.RIGHT_TIP
     else:
         arm_tip_frame = VelocityPriorityMoveTool.LEFT_TIP
     self.tf_listener.waitForTransform(frame_described_in, arm_tip_frame, rospy.Time(), rospy.Duration(2.))
     p_arm = tfu.tf_as_matrix(self.tf_listener.lookupTransform(frame_described_in, arm_tip_frame, rospy.Time(0)))
     trans, rotation = tr.translation_from_matrix(p_arm), tr.quaternion_from_matrix(p_arm)
     for value, vr in zip(trans, [self.xline, self.yline, self.zline]):
         vr.setValue(value)
     for value, vr in zip(tr.euler_from_quaternion(rotation), [self.phi_line, self.theta_line, self.psi_line]):
         vr.setValue(np.degrees(value))
Example #16
0
    def calculate_bridge_and_base_pose(self, table_state, phi):
        # Calculate bridge pose from cue position and phi
        phi_radians = math.pi * phi / 180.0
        cue_x,    cue_y    = self.get_cue_ball_pos(table_state)
        bridge_x, bridge_y = self.get_bridge_pos(cue_x, cue_y, phi)
        (qx, qy, qz, qw)   = transformations.quaternion_about_axis(phi_radians, (0, 0, 1))

        bridge_pos         = transformations.translation_matrix([bridge_x, bridge_y, 0.0])
        bridge_orient      = transformations.quaternion_matrix([qx, qy, qz, qw])
        bridge_pose        = transformations.concatenate_matrices(bridge_pos, bridge_orient)
        bridge_pose_pos    = transformations.translation_from_matrix(bridge_pose)
        bridge_pose_orient = transformations.quaternion_from_matrix(bridge_pose)

        # Produce base pose via fixed transform from bridge pose
        bridge_to_base_trans = transformations.translation_matrix([Constants.BRIDGE_IN_BASE_X, Constants.BRIDGE_IN_BASE_Y, Constants.BRIDGE_IN_BASE_Z])
        bridge_to_base_rot   = transformations.quaternion_matrix([Constants.BRIDGE_IN_BASE_QX, Constants.BRIDGE_IN_BASE_QY, Constants.BRIDGE_IN_BASE_QZ, Constants.BRIDGE_IN_BASE_QW])
        bridge_to_base       = transformations.concatenate_matrices(bridge_to_base_trans, bridge_to_base_rot)
        base_to_bridge       = transformations.inverse_matrix(bridge_to_base)
        base_pose            = transformations.concatenate_matrices(bridge_pose, base_to_bridge)  # correct order?
        base_pose_pos        = transformations.translation_from_matrix(base_pose)
        base_pose_orient     = transformations.quaternion_from_matrix(base_pose)
        print "BASE_POSE", base_pose, "BRIDGE_POSE", bridge_pose 
        return ((bridge_pose_pos, bridge_pose_orient), (base_pose_pos, base_pose_orient))
	def calENU(self):
		if(self.body.isNone() or self.headingAngle is None):
			return False
		else:
			br = self.body.getRotationAroundZ()
			
			q = rotation_matrix(br+self.headingAngle, (0,0,1))
			
			t = translation_from_matrix(self.body.getMatrix())
			t = translation_matrix(t)
			
			self.enu.setMatrix(t).transformByMatrix(q)
			return True
#eoc
Example #18
0
    def move(self, newPlan):

        br = tf.TransformBroadcaster()
        rate = rospy.Rate(1.0/self.dt)
        for joints in newPlan:

            for k in range(0, len(joints)):
                self.pubs[k].publish(joints[k])
            self.joints = joints
            rate.sleep()

        T = self.matrix_from_joint(joints)
        q = tfs.quaternion_from_matrix(T)
        t = tfs.translation_from_matrix(T)
        br.sendTransform(t, q, rospy.Time.now(), "new pose", "base")
def publish_result(gmodel):
    result = gmodel.grasps
    print "the total grasp is %d" % len(result)
    i = 0
    pose_array = geometry_msgs.msg.PoseArray()
    for grasp in result:
        Tgrasp = gmodel.getGlobalGraspTransform(grasp)
        xyz = tuple(transformations.translation_from_matrix(Tgrasp))[:3]
        quat = tuple(transformations.quaternion_from_matrix(Tgrasp))
        # assemble return value PoseStampe
        pose_array.poses.append(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat)))
    pose_array.header.frame_id = "/kinfu_origin"
    pose_array.header.stamp = rospy.Time.now()
    grasp_array_pub.publish(pose_array)
    rospy.on_shutdown(shut_down_hook)
Example #20
0
def tfm(matrix, parent, child, stamp):
    rotation = quaternion_from_matrix(matrix)
    translation = translation_from_matrix(matrix)
    t = TransformStamped()
    t.header.frame_id = parent
    t.header.stamp = stamp
    t.child_frame_id = child
    t.transform.translation.x = translation[0]
    t.transform.translation.y = translation[1]
    t.transform.translation.z = translation[2]
    t.transform.rotation.x = rotation[0]
    t.transform.rotation.y = rotation[1]
    t.transform.rotation.z = rotation[2]
    t.transform.rotation.w = rotation[3]
    return tfMessage([t])
Example #21
0
    def __init__(self, robot,     
            center_in_torso_frame, #=[1,.3,-1], 
            scaling_in_base_frame, #=[3.5, 3., 5.],
            omni_name, #='omni1', 
            set_goal_pose_topic, # = 'l_cart/command_pose',
            tfbroadcast=None, #=None,
            tflistener=None): #=None):

        #threading.Thread.__init__(self)
        self.enabled = False
        self.zero_out_forces = True

        self.robot = robot
        #self.X_BOUNDS = [.3, 1.5] #Bound translation of PR2 arms in the X direction (in torso link frame)
        self.kPos = 15.
        self.kVel = 0.5
        self.kPos_close = 70.
        self.omni_name = omni_name
        self.center_in_torso_frame = center_in_torso_frame
        self.scaling_in_base_frame = scaling_in_base_frame
        self.tflistener = tflistener
        self.tfbroadcast = tfbroadcast
        self.prev_dt = 0.0
        q = self.robot.get_joint_angles()
        self.tip_tt, self.tip_tq = self.robot.kinematics.FK(q, self.robot.kinematics.n_jts)

        # self.tip_tt = np.zeros((3,1))
        # self.tip_tq = np.zeros((4,1))
        self.teleop_marker_pub = rospy.Publisher('/teleop/viz/goal', Marker)

        rate = rospy.Rate(100.0)

        self.omni_fb = rospy.Publisher(self.omni_name + '_force_feedback', OmniFeedback)
        self.set_goal_pub = rospy.Publisher(set_goal_pose_topic, PoseStamped)

        rospy.loginfo('Attempting to calculate scaling from omni to arm workspace')
        success = False
        while not success and (not rospy.is_shutdown()):
            rate.sleep()
            try:
                m = tfu.translation_matrix(self.scaling_in_base_frame)
                vec_l0 = tr.translation_from_matrix(tfu.rotate(self.omni_name + '_center', '/torso_lift_link',  self.tflistener)*m)
                self.scale_omni_l0 = np.abs(vec_l0)  
                success = True
            except tf.LookupException, e:
                pass
            except tf.ConnectivityException, e:
                pass
Example #22
0
def pose_cb(ps):
    m_f, frame = tfu.posestamped_as_matrix(ps)
    m_o1 = tfu.transform('/omni1', frame, listener) * m_f
    ee_point = np.matrix(tr.translation_from_matrix(m_o1)).T
    center = np.matrix([-.10, 0, .30]).T
    dif = 30*(center - ee_point)
    #force_dir = dif / np.linalg.norm(dif)
    force_o1 = dif #force_dir * np.sum(np.power(dif, 2))

    force_s = tfu.transform('/omni1_sensable', '/omni1', listener) * np.row_stack((force_o1, np.matrix([1.])))
    print np.linalg.norm(center - ee_point)

    wr = Wrench()
    wr.force.x = force_s[0]
    wr.force.y = force_s[1]
    wr.force.z = force_s[2]
    wpub.publish(wr)
  def publish_relative_frames2(self, data):
    tmp_dict = {}
    for segment in data.segments:
      if(segment.is_quaternion):
        tmp_dict[segment.id] = (segment.position,segment.quat,self.tr.fromTranslationRotation(segment.position,segment.quat))
      else:	  
        tmp_dict[segment.id] = (segment.position,segment.euler)
  
    for idx in tmp_dict.keys():
      p_idx = xsens_client.get_segment_parent_id(idx)
      child_data = tmp_dict[idx]
      if(p_idx==-1):
        continue
      elif(p_idx==0):
        helper = tft.quaternion_about_axis(1,(-1,0,0))
        new_quat = tft.quaternion_multiply(tft.quaternion_inverse(helper),child_data[1])#if(segment.is_quaternion): TODO Handle Euler
        #self.sendTransform(child_data[0],
        self.sendTransform(child_data[0],
            child_data[1],
            #(1.0,0,0,0),#FIXME
            rospy.Time.now(),
            xsens_client.get_segment_name(idx),
            self.ref_frame)
      elif(p_idx>0):
        
        parent_data = tmp_dict[p_idx]
	new_quat = tft.quaternion_multiply(tft.quaternion_inverse(parent_data[1]),child_data[1])
	tmp_trans = (parent_data[0][0] - child_data[0][0],parent_data[0][1] - child_data[0][1],parent_data[0][2] - child_data[0][2])
        new_trans = qv_mult(parent_data[1],tmp_trans)
        self.sendTransform(
	  new_trans,
	  new_quat,
	  rospy.Time.now(),
	  xsens_client.get_segment_name(idx),
	  xsens_client.get_segment_name(p_idx))

      else:
        parent_data = tmp_dict[p_idx]
        
        this_data = np.multiply(tft.inverse_matrix(child_data[2]) , parent_data[2])
        self.sendTransform(
	  tft.translation_from_matrix(this_data),
	  tft.quaternion_from_matrix(this_data),
	  rospy.Time.now(),
	  xsens_client.get_segment_name(idx),
	  xsens_client.get_segment_name(p_idx))
def change_frame(box):
    listener = tf.TransformListener()
    print "frame %s" % box.header.frame_id
    try:
        now = rospy.Time(0)
        listener.waitForTransform(box.header.frame_id, 'kinfu_origin', now, rospy.Duration(2.0))
        (trans,rot) = listener.lookupTransform('kinfu_origin', box.header.frame_id, now)
        mat44 = numpy.dot(transformations.translation_matrix(trans), transformations.quaternion_matrix(rot))
        pose44 = numpy.dot(tf.listener.xyz_to_mat44(box.pose.position), tf.listener.xyzw_to_mat44(box.pose.orientation))
        txpose = numpy.dot(mat44, pose44)
        xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
        quat = tuple(transformations.quaternion_from_matrix(txpose))
        # assemble return value PoseStampe
        box.pose = geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
        box.header.frame_id = "kinfu_origin"
    except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException), e:
        print "tf error: %s" % e
        return
    def get_current_pose(self):
        frame_described_in = str(self.frameline.text())
        left = ('Left' == str(self.arm_box.currentText()))
        right = False
        if not left:
            right = True
            arm_tip_frame = LinearMoveTool.RIGHT_TIP
        else:
            arm_tip_frame = LinearMoveTool.LEFT_TIP
        
        self.tf_listener.waitForTransform(frame_described_in, arm_tip_frame, rospy.Time(), rospy.Duration(2.))
        p_arm = tfu.tf_as_matrix(self.tf_listener.lookupTransform(frame_described_in, arm_tip_frame, rospy.Time(0))) * tr.identity_matrix()
        trans, rotation = tr.translation_from_matrix(p_arm), tr.quaternion_from_matrix(p_arm)

        for value, vr in zip(trans, [self.xline, self.yline, self.zline]):
            vr.setText(str(value))
        for value, vr in zip(tr.euler_from_quaternion(rotation), [self.phi_line, self.theta_line, self.psi_line]):
            vr.setText(str(np.degrees(value)))
Example #26
0
    def calc_transformation(self, name, relative_to=None):
        calc_from_joint = False
        if relative_to:
            if relative_to in self.urdf.link_map:
                parent_link_name = relative_to
            elif relative_to in self.urdf.joint_map:
                parent_link_name = self.urdf.joint_map[name].parent
                calc_from_joint = True
        else:
            parent_link_name = self.urdf.get_root()

        calc_to_joint = False
        if name in self.urdf.link_map:
            child_link_name = name
        elif name in self.urdf.joint_map:
            child_link_name = self.urdf.joint_map[name].child
            calc_to_joint = True

        chains = self.urdf.get_chain(parent_link_name, child_link_name,
                                     joints=True, links=True)
        if calc_from_joint:
            chains = chains[1:]
        if calc_to_joint:
            chains = chains[:-1]

        poses = []
        for name in chains:
            if name in self.urdf.joint_map:
                joint = self.urdf.joint_map[name]
                if joint.origin is not None:
                    poses.append(joint.origin)
            elif name in self.urdf.link_map:
                link = self.urdf.link_map[name]
                if link.visual is not None and link.visual.origin is not None:
                    poses.append(link.visual.origin)
        m = np.dot(T.translation_matrix((0,0,0)),
                   T.euler_matrix(0,0,0))
        for p in poses:
            n = np.dot(T.translation_matrix(tuple(p.xyz)),
                       T.euler_matrix(*tuple(p.rpy)))
            m = np.dot(m, n)
        t = T.translation_from_matrix(m)
        q = T.quaternion_from_matrix(m)
        return tuple(t), (q[3], q[0], q[1], q[2])
Example #27
0
def get_trans_to_trans(trans1, rot1, trans2, rot2):
    quat_mat1 = transformations.quaternion_matrix(rot1)
    quat_mat2 = transformations.quaternion_matrix(rot2)
    trans_mat1 = transformations.translation_matrix(trans1)
    trans_mat2 = transformations.translation_matrix(trans2)
    mat1 = transformations.concatenate_matrices(trans_mat1, quat_mat1)
    mat2 = transformations.concatenate_matrices(trans_mat2, quat_mat2)
    rel_mat = numpy.linalg.inv(mat1).dot(mat2)
    rel_trans = transformations.translation_from_matrix(rel_mat)
    rel_rot = transformations.quaternion_from_matrix(rel_mat)
    rel_rot = [rel_rot[3], rel_rot[0], rel_rot[1], rel_rot[2]]
    print "rel_pose: [%f, %f, %f, %f, %f, %f, %f]" % (
        rel_trans[0],
        rel_trans[1],
        rel_trans[2],
        rel_rot[0],
        rel_rot[1],
        rel_rot[2],
        rel_rot[3],
    )
Example #28
0
def matrixToPose(matrix):
    """Converts a Matrix  to a Pose message by extracting the translation and
    rotation specified by the Matrix.

    Parameters:
    matrix: a 4x4 transformation matrix
    """
    pos = Pose()
    T = transformations.translation_from_matrix(matrix)
    pos.position.x = T[0]
    pos.position.y = T[1]
    pos.position.z = T[2]
    
    q = transformations.quaternion_from_matrix(matrix)
    pos.orientation.x = q[0]
    pos.orientation.y = q[1]
    pos.orientation.z = q[2]
    pos.orientation.w = q[3]

    return pos
Example #29
0
    def torso_lift_link_T_omni(self, tip_omni, msg_frame):
        center_T_6 = tfu.transform(self.omni_name+'_center', msg_frame, self.tflistener)
        tip_center = center_T_6*tip_omni
        tip_center_t = (np.array(tr.translation_from_matrix(tip_center))*np.array(self.scale_omni_l0)).tolist()
        tip_center_q = tr.quaternion_from_matrix(tip_center)

        # z_T_6 = tfu.transform(self.omni_name+'_link0', msg_frame, self.tflistener)
        # tip_0 = z_T_6*tip_omni
        # tip_0t = (np.array(tr.translation_from_matrix(tip_0))*np.array(self.scale_omni_l0)).tolist()
        # tip_0q = tr.quaternion_from_matrix(tip_0)

    #     #Transform into torso frame so we can bound arm workspace
        tll_T_0 = tfu.transform('/torso_lift_link', self.omni_name + '_center', self.tflistener)
        tip_torso_mat = tll_T_0 * tfu.tf_as_matrix([tip_center_t, tip_center_q])
        tip_tt, tip_tq = tfu.matrix_as_tf(tip_torso_mat)

        #don't need to bound, arm controller should do this
    #     tip_tt[0] = limit_range(tip_tt[0], self.X_BOUNDS[0], self.X_BOUNDS[1])
        self.tip_tt = tip_tt
        self.tip_tq = tip_tq
Example #30
0
    def wrench_callback(self, state):
        # calculating force estimate from position error (does not compensate for motion error due to dynamics)
        x_err = np.matrix([state.x_err.linear.x, state.x_err.linear.y, state.x_err.linear.z]).T
        x_dot = np.matrix([state.xd.linear.x, state.xd.linear.y, state.xd.linear.z]).T
        feedback = -1.0 * self.kp * x_err - self.kd * x_dot

        # currently the calculated force feedback is published but not to omni, we use the velocity limited state from the controller
        #        wr_ee = [state.F.force.x, state.F.force.y, state.F.force.z]
        wr_ee = [feedback[0, 0], feedback[1, 0], feedback[2, 0]]

        # this is a simple FIR filter designed in Matlab to smooth the force estimate
        shift_right = np.array(self.history[:, 0 : self.FIR.size - 1])
        new_col = np.array(wr_ee).reshape((3, 1))
        self.history = np.matrix(np.hstack((new_col, shift_right)))
        wr_ee_filt = self.history * self.FIR

        # find and use the rotation matrix from wrench to torso
        df_R_ee = tfu.rotate(self.dest_frame, "torso_lift_link", self.tflistener) * tfu.rotate(
            "torso_lift_link", self.wrench_frame, self.tflistener
        )
        wr_df = self.force_scaling * np.array(
            tr.translation_from_matrix(
                df_R_ee * tfu.translation_matrix([wr_ee_filt[0, 0], wr_ee_filt[1, 0], wr_ee_filt[2, 0]])
            )
        )

        # limiting the max and min force feedback sent to omni
        wr_df = np.where(wr_df > self.omni_max_limit, self.omni_max_limit, wr_df)
        wr_df = np.where(wr_df < self.omni_min_limit, self.omni_min_limit, wr_df)

        wr = Wrench()
        wr.force.x = wr_df[0, 0]
        wr.force.y = wr_df[1, 0]
        wr.force.z = wr_df[2, 0]

        # publishing of two different wrenches calculated, DELETE first when all finished
        if self.enable == False:
            self.filtered_fb.publish(wr)

        if self.enable == True:
            self.omni_fb.publish(wr)
Example #31
0
def transformBack(tf_xyzquat, pose):
    T_mat = tfm.concatenate_matrices( tfm.translation_matrix(tf_xyzquat[0:3]), tfm.quaternion_matrix(tf_xyzquat[3:7]))
    pose_mat = tfm.concatenate_matrices( tfm.translation_matrix(pose[0:3]),  tfm.quaternion_matrix(pose[3:7]) )
    new_pose_mat = np.dot(pose_mat, tfm.inverse_matrix(T_mat))
    return tfm.translation_from_matrix(new_pose_mat).tolist() + tfm.quaternion_from_matrix(new_pose_mat).tolist()
Example #32
0
    pose.position.y = point[1]
    pose.position.z = point[2]
    pose.orientation.x = 0
    pose.orientation.y = 0
    pose.orientation.z = 0
    pose.orientation.w = 1

    pos = pose.position
    ori = pose.orientation
    mat = np.dot(translation_matrix((pos.x, pos.y, pos.z)),
                 quaternion_matrix((ori.x, ori.y, ori.z, ori.w)))
    transform_mat = np.dot(translation_matrix((0.017438, 0.110540, -0.003173)),
                           quaternion_matrix((0, 0, 1, 0)))
    new_mat = np.dot(transform_mat, mat)

    new_trans = translation_from_matrix(new_mat)
    new_quat = quaternion_from_matrix(new_mat)

    new_pose = Pose()
    new_pose.position.x = new_trans[0]
    new_pose.position.y = new_trans[1]
    new_pose.position.z = new_trans[2]
    new_pose.orientation.x = new_quat[0]
    new_pose.orientation.y = new_quat[1]
    new_pose.orientation.z = new_quat[2]
    new_pose.orientation.w = new_quat[3]
    util.send_traj_point_marker(marker_pub=marker_pub,
                                pose=new_pose,
                                id=idx + 300,
                                rgba_tuple=rgba_tuple)
Example #33
0
def transform_back(pose1, frame):
    pose1_mat = tfm.concatenate_matrices( tfm.translation_matrix(pose1[0:3]), tfm.quaternion_matrix(pose1[3:7]))
    frame_mat = tfm.concatenate_matrices( tfm.translation_matrix(frame[0:3]),  tfm.quaternion_matrix(frame[3:7]) )
    new_pose_mat = np.dot(frame_mat, pose1_mat)
    return tfm.translation_from_matrix(new_pose_mat).tolist() + tfm.quaternion_from_matrix(new_pose_mat).tolist()
Example #34
0
                         odom_tf.transform.rotation.w)

            tmat_odom = translation_matrix(odom_trans)
            qmat_odom = quaternion_matrix(odom_quat)
            odom_matrix = np.dot(tmat_odom, qmat_odom)

            height = goal.z  # Store/maintain given height
            # Goal in map coords
            tmat_goal = translation_matrix((goal.x, goal.y, goal.z))
            qmat_goal = quaternion_matrix(
                quaternion_from_euler(0, 0, math.radians(goal.yaw)))
            goal_matrix = np.dot(tmat_goal, qmat_goal)

            # Goal in odom coords
            final_mat = np.dot(odom_matrix, goal_matrix)
            trans = translation_from_matrix(final_mat)
            goal.x = trans[0]
            goal.y = trans[1]
            goal.z = height

            _, _, yaw = euler_from_quaternion(
                quaternion_from_matrix(final_mat))
            goal.yaw = math.degrees(yaw)
            # goal.header.frame_id = "cf1/odom"
            print("Transformed goal 'cf1/odom'")
            print(goal)

            hover_publisher.publish(goal)
            goal_pose = goal
            goal = None
Example #35
0
def matrix_to_transform(matrix):
    t_list = t.translation_from_matrix(matrix)
    q_list = t.quaternion_from_matrix(matrix)
    translation = Vector3(t_list[0], t_list[1], t_list[2])
    orientation = Quaternion(q_list[0], q_list[1], q_list[2], q_list[3])
    return Transform(translation, orientation)
Example #36
0
def fk(dhs, qs):
    Ts = [dh2T(*dh, q=q) for (dh, q) in zip(dhs, qs)]
    T = reduce(lambda a, b: np.matmul(a, b), Ts)  # base_link -> object
    txn = tx.translation_from_matrix(T)
    rxn = tx.quaternion_from_matrix(T)
    return txn, rxn
    def estimate(self,
                 t,
                 q,
                 camera_info,
                 target_position=None,
                 occlusion_treshold=0.01,
                 output_viz=True):
        perspective_timer = cv2.getTickCount()
        visible_detections = []
        rot = quaternion_matrix(q)
        trans = translation_matrix(t)
        if target_position is None:
            target = translation_matrix([0.0, 0.0, 1000.0])
            target_position = translation_from_matrix(
                np.dot(np.dot(trans, rot), target))
        view_matrix = p.computeViewMatrix(t, target_position, [0, 0, 1])

        width = HumanVisualModel.WIDTH
        height = HumanVisualModel.HEIGHT

        projection_matrix = p.computeProjectionMatrixFOV(
            HumanVisualModel.FOV, HumanVisualModel.ASPECT,
            HumanVisualModel.CLIPNEAR, HumanVisualModel.CLIPFAR)

        rendered_width = int(width / 3.0)
        rendered_height = int(height / 3.0)

        width_ratio = width / rendered_width
        height_ratio = height / rendered_height

        camera_image = p.getCameraImage(rendered_width,
                                        rendered_height,
                                        viewMatrix=view_matrix,
                                        renderer=p.ER_BULLET_HARDWARE_OPENGL,
                                        projectionMatrix=projection_matrix)

        rgb_image = cv2.resize(np.array(camera_image[2]), (width, height))
        depth_image = np.array(camera_image[3], np.float32).reshape(
            (rendered_height, rendered_width))

        far = HumanVisualModel.CLIPFAR
        near = HumanVisualModel.CLIPNEAR
        real_depth_image = far * near / (far - (far - near) * depth_image)

        if self.use_saliency is not False:
            saliency_map, saliency_heatmap = self.saliency_estimator.estimate(
                camera_image[2], real_depth_image)
            saliency_heatmap_resized = cv2.resize(saliency_heatmap,
                                                  (width, height))

        mask_image = camera_image[4]
        unique, counts = np.unique(np.array(mask_image).flatten(),
                                   return_counts=True)

        for sim_id, count in zip(unique, counts):
            if sim_id > 0:
                cv_mask = np.array(mask_image.copy())
                cv_mask[cv_mask != sim_id] = 0
                cv_mask[cv_mask == sim_id] = 255
                xmin, ymin, w, h = cv2.boundingRect(cv_mask.astype(np.uint8))

                visible_area = w * h + 1
                screen_area = rendered_width * rendered_height + 1
                if screen_area - visible_area == 0:
                    confidence = 1.0
                else:
                    confidence = visible_area / float(screen_area -
                                                      visible_area)
                #TODO compute occlusion score as a ratio between visible 2d bbox and projected 2d bbox areas
                depth = real_depth_image[int(ymin + h / 2.0)][int(xmin +
                                                                  w / 2.0)]

                xmin = int(xmin * width_ratio)
                ymin = int(ymin * height_ratio)
                w = int(w * width_ratio)
                h = int(h * height_ratio)

                id = self.simulator.reverse_entity_id_map[sim_id]
                if confidence > occlusion_treshold:
                    det = Detection(int(xmin),
                                    int(ymin),
                                    int(xmin + w),
                                    int(ymin + h),
                                    id,
                                    confidence,
                                    depth=depth)
                    visible_detections.append(det)

        # for subject_detection in visible_detections:
        #     for object_detection in visible_detections:
        #         if subject_detection != object_detection:
        #             pass #TODO create inference batch for egocentric relation detection

        perspective_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                    perspective_timer)

        if output_viz is True:
            viz_frame = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            if self.use_saliency is not False:
                viz_frame = cv2.addWeighted(saliency_heatmap_resized, 0.4,
                                            viz_frame, 0.7, 0)
            cv2.rectangle(viz_frame, (0, 0), (250, 40), (200, 200, 200), -1)
            perspective_fps_str = "Perspective taking fps: {:0.1f}hz".format(
                perspective_fps)
            cv2.putText(viz_frame,
                        "Nb detections: {}".format(len(visible_detections)),
                        (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
            cv2.putText(viz_frame, perspective_fps_str, (5, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
            for detection in visible_detections:
                detection.draw(viz_frame, (0, 200, 0))

            return rgb_image, real_depth_image, visible_detections, viz_frame
        else:
            return rgb_image, real_depth_image, visible_detections
Example #38
0
def matrixToPose(matrix):
    t = tuple(translation_from_matrix(matrix))
    q = tuple(quaternion_from_matrix(matrix))

    return Pose(Point(t[0], t[1], t[2]), Quaternion(q[0], q[1], q[2], q[3]))
Example #39
0
 def __get_cartesian_from_matrix(cls, h_matrix):
     # Returns a list of [roll, pitch, yaw, X, Y, Z]
     euler_from_matrix = list(TF.euler_from_matrix(h_matrix))
     translation_from_matrix = list(TF.translation_from_matrix(h_matrix))
     return euler_from_matrix + translation_from_matrix
Example #40
0
 def getPositionDistance(self):
     t = translation_from_matrix(self._matrix)
     return sqrt(pow(t[0], 2) + pow(t[1], 2) + pow(t[2], 2))
Example #41
0
 def getTranslation(self):
     return translation_from_matrix(self._matrix)
Example #42
0
    def callback(self, msg):

        tag_infos = []

        new_tag_data = AprilTagsWithInfos()

        # Load tag detections message
        for detection in msg.detections:

            # ------ start tag info processing

            new_info = TagInfo()
            # Can use id 1 as long as no bundles are used
            new_info.id = int(detection.tag_id)
            id_info = self.tags_dict[new_info.id]
            # rospy.loginfo("[%s] report detected id=[%s] for april tag!",self.node_name,new_info.id)
            # Check yaml file to fill in ID-specific information
            new_info.tag_type = self.sign_types[id_info["tag_type"]]
            if new_info.tag_type == self.info.S_NAME:
                new_info.street_name = id_info["street_name"]
            elif new_info.tag_type == self.info.SIGN:
                new_info.traffic_sign_type = self.traffic_sign_types[
                    id_info["traffic_sign_type"]]

                # publish for FSM
                # parking apriltag event
                msg_parking = BoolStamped()
                msg_parking.header.stamp = rospy.Time(0)
                if new_info.traffic_sign_type == TagInfo.PARKING:
                    msg_parking.data = True
                else:
                    msg_parking.data = False
                self.pub_postPros_parking.publish(msg_parking)

                # intersection apriltag event
                msg_intersection = BoolStamped()
                msg_intersection.header.stamp = rospy.Time(0)
                if ((new_info.traffic_sign_type == TagInfo.FOUR_WAY) or
                    (new_info.traffic_sign_type == TagInfo.RIGHT_T_INTERSECT)
                        or
                    (new_info.traffic_sign_type == TagInfo.LEFT_T_INTERSECT) or
                    (new_info.traffic_sign_type == TagInfo.T_INTERSECTION)):
                    msg_intersection.data = True
                else:
                    msg_intersection.data = False
                self.pub_postPros_intersection.publish(msg_intersection)

            elif new_info.tag_type == self.info.VEHICLE:
                new_info.vehicle_name = id_info["vehicle_name"]

            # TODO: Implement location more than just a float like it is now.
            # location is now 0.0 if no location is set which is probably not that smart
            if self.loc == 226:
                l = id_info["location_226"]
                if l is not None:
                    new_info.location = l
            elif self.loc == 316:
                l = id_info["location_316"]
                if l is not None:
                    new_info.location = l

            tag_infos.append(new_info)
            # --- end tag info processing

            # Define the transforms
            veh_t_camxout = tr.translation_matrix(
                (self.camera_x, self.camera_y, self.camera_z))
            veh_R_camxout = tr.euler_matrix(0, self.camera_theta * np.pi / 180,
                                            0, "rxyz")
            veh_T_camxout = tr.concatenate_matrices(
                veh_t_camxout,
                veh_R_camxout)  # 4x4 Homogeneous Transform Matrix

            camxout_T_camzout = tr.euler_matrix(-np.pi / 2, 0, -np.pi / 2,
                                                "rzyx")
            veh_T_camzout = tr.concatenate_matrices(veh_T_camxout,
                                                    camxout_T_camzout)

            tagzout_T_tagxout = tr.euler_matrix(-np.pi / 2, 0, np.pi / 2,
                                                "rxyz")

            # Load translation
            trans = detection.transform.translation
            rot = detection.transform.rotation
            header = Header()
            header.stamp = rospy.Time.now()
            camzout_t_tagzout = tr.translation_matrix(
                (trans.x * self.scale_x, trans.y * self.scale_y,
                 trans.z * self.scale_z))
            camzout_R_tagzout = tr.quaternion_matrix(
                (rot.x, rot.y, rot.z, rot.w))
            camzout_T_tagzout = tr.concatenate_matrices(
                camzout_t_tagzout, camzout_R_tagzout)

            veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout,
                                                    camzout_T_tagzout,
                                                    tagzout_T_tagxout)

            # Overwrite transformed value
            (trans.x, trans.y,
             trans.z) = tr.translation_from_matrix(veh_T_tagxout)
            (rot.x, rot.y, rot.z,
             rot.w) = tr.quaternion_from_matrix(veh_T_tagxout)

            new_tag_data.detections.append(detection)

        new_tag_data.infos = tag_infos
        # Publish Message
        self.pub_postPros.publish(new_tag_data)
Example #43
0
#/map-/base_footprint
a0 = [0, 0, 1]
b0 = [0.0, 0.0, 0.0, 1]

#/odom-/base_footprint
a1 = [0, 0, 1]
b1 = [0.0, 0.0, 0.0, 1]

c0 = np.dot(transformations.translation_matrix(a0),
            transformations.quaternion_matrix(b0))

c1 = np.linalg.inv(c0)

mat44 = np.dot(transformations.translation_matrix(a1),
               transformations.quaternion_matrix(b1))
# txpose is the new pose in target_frame as a 4x4
txpose = np.dot(mat44, c1)
# xyz and quat are txpose's position and orientation
txpose = np.linalg.inv(txpose)

xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
quat = tuple(transformations.quaternion_from_matrix(txpose))

print xyz
print quat

abc = np.dot(transformations.translation_matrix([0, 0, 0]),
             transformations.quaternion_matrix(quat))
print abc
Example #44
0
    def callback(self, pose_ws, transform_cs, c_plane_array):

        # convert messages to numpy
        G_ws = poseMsgToMatrix(pose_ws.pose)
        G_cs = transformMsgToMatrix(transform_cs.transform)

        # initialization
        if not self._is_init:

            # save for next iteration
            self._G_ws = G_ws
            self._G_cs = G_cs

            # anchor starting point
            t0 = transformations.translation_from_matrix(G_ws)
            prior_factor = factor.PriorFactor(self._point_var, np.eye(3), t0, 1e-6*np.ones(3))
            self._fg.add_factor(prior_factor)

            self._point_stamp_map[self._point_var] = pose_ws.header.stamp

            self._is_init = True
            return

        # frame-to-frame translation 
        G_ss = np.dot(np.linalg.inv(self._G_ws), G_ws)
        t_s = transformations.translation_from_matrix(G_ss)

        # generate odometry factor
        point_var = variable.PointVariable(3)
        odom_factor = factor.OdometryFactor(self._point_var, point_var, 
                self._G_cs[:3, :3], t_s, self.sigma_t*np.ones(3))

        # generate observation factors
        obsv_factors = []
        for plane in c_plane_array.planes:

            plane_var = variable.LandmarkVariable(1, plane.label.data, plane.intensity.data)
            v_c = np.array([[plane.plane.coef[0], plane.plane.coef[1], plane.plane.coef[2]]])
            d_c = -plane.plane.coef[3]
            plane_var.facing = -1 if d_c > 0 else 1
            plane_factor = factor.ObservationFactor(point_var, plane_var, v_c, d_c, np.array([self.sigma_d]))
            obsv_factors.append(plane_factor)

            # bookkeeping
            self._var_id_map[plane_var] = plane.id.data
            if plane.label.data not in self._label_orientation_map:
                self._label_orientation_map[plane.label.data] = v_c

        # insert into factorgraph
        factor_list = [odom_factor] + obsv_factors
        self._lock.acquire(True)
        for f in factor_list:
            self._fg.add_factor(f)
        self._lock.release()

        # save for next iteration
        self._G_ws = G_ws
        self._G_cs = G_cs
        self._point_var = point_var

        # save point timestamp
        self._point_stamp_map[point_var] = pose_ws.header.stamp
Example #45
0
def mat2poselist(mat):
    pos = tfm.translation_from_matrix(mat)
    quat = tfm.quaternion_from_matrix(mat)
    return pos.tolist() + quat.tolist()
    def check_const_mating(self, ref_consts, move_consts, tr, quat):
        tf_mat = utils.get_tf_matrix(tr, quat)
        for ref_const, move_const in zip(ref_consts, move_consts):
            ref_const_copy = copy.deepcopy(ref_const)
            move_const_copy = copy.deepcopy(move_const)
            move_entry = move_const_copy["entryCoordinate"]
            move_end = move_const_copy["endCoordinate"]
            move_const_copy["entryCoordinate"] = tf.concatenate_matrices(
                tf_mat, move_entry)
            move_const_copy["endCoordinate"] = tf.concatenate_matrices(
                tf_mat, move_end)

            ref_type = ref_const["type"]
            move_type = move_const["type"]
            if ref_type == "hole":
                hole_const = ref_const_copy
                pin_const = move_const_copy
            else:
                hole_const = move_const_copy
                pin_const = ref_const_copy

            hole_entry = hole_const["entryCoordinate"]
            hole_end = hole_const["endCoordinate"]

            pin_entry = pin_const["entryCoordinate"]
            pin_end = pin_const["endCoordinate"]

            hole_entry_tr = tf.translation_from_matrix(hole_entry)
            hole_end_tr = tf.translation_from_matrix(hole_end)
            pin_entry_tr = tf.translation_from_matrix(pin_entry)
            pin_end_tr = tf.translation_from_matrix(pin_end)

            ref_axis = tf.unit_vector((hole_end_tr - hole_entry_tr).round(6))

            eps = 1e-03
            ref_axis[np.abs(ref_axis) < eps] = 0

            inter_entry_diff = (hole_entry_tr - pin_entry_tr).round(6)
            #inter_entry_diff[np.abs(inter_entry_diff) < eps] = 0
            inter_end_diff = (hole_end_tr - pin_end_tr).round(6)
            #inter_end_diff[np.abs(inter_end_diff) < eps] = 0
            entry_end_diff = (pin_end_tr - hole_entry_tr).round(6)
            #entry_end_diff[np.abs(entry_end_diff) < eps] = 0

            print("\n---")
            print("parent: {}, child: {}".format(ref_const["name"],
                                                 move_const["name"]))
            print(inter_entry_diff)
            print(inter_end_diff)
            print(entry_end_diff)
            '''if np.linalg.norm(inter_entry_diff) == 0. or np.array_equal(np.sign(inter_entry_diff), ref_axis):
                pass
            else:
                print("1")
                return False

            if pin_const["feature"] == "screw":
                continue
            else:
                if np.array_equal(np.sign(entry_end_diff), ref_axis):
                    pass
                else:
                    print("2")
                    return False
                
                if np.linalg.norm(inter_end_diff) == 0. or np.array_equal(np.sign(inter_end_diff), ref_axis):
                    pass
                else:
                    print("3")
                    return False'''
        return True
Example #47
0
    def _adjust_camera_position(self):
        """
        Recalculate position of the camera to match robot frames with markers
        """
        if not self._is_robot_camera_frames_of_the_same_figure():
            return

        positions = []
        for i in range(len(self._camera_markers_ids)):
            transformation = self.get_recent_transformation(
                self._camera_frame, self._camera_markers_ids[i])
            point = TransformationManager.point_from_tf(transformation)
            positions.append(point)

        markers_positions = numpy.array(positions)

        positions = []
        for i in range(len(self._marker_holder_frames)):
            transformation = self.get_recent_filtered_transformation(
                self._camera_frame, self._marker_holder_frames[i])
            point = TransformationManager.point_from_tf(transformation)
            positions.append(point)

        robot_marker_holders_positions = numpy.array(positions)

        if not numpy.allclose(robot_marker_holders_positions,
                              markers_positions,
                              atol=self._distance_error_tolerance):

            affine_transformation_matrix = superimposition_matrix(
                markers_positions.T, robot_marker_holders_positions.T)

            translation = translation_from_matrix(affine_transformation_matrix)

            needed_rotation = quaternion_from_matrix(
                affine_transformation_matrix)

            euler_angles = euler_from_quaternion(needed_rotation)

            apply_translation = not numpy.allclose(
                translation, numpy.zeros(3),
                atol=self._distance_error_tolerance)
            apply_rotation = not numpy.allclose(
                euler_angles, numpy.zeros(3), atol=self._angle_error_tolerance)

            if apply_translation or apply_rotation:
                rospy.loginfo("Applying camera correction")

            if apply_translation:
                rospy.loginfo("  Translation => " + str(translation))

                self._camera_pose.transform.translation.x += translation[0]
                self._camera_pose.transform.translation.y += translation[1]
                self._camera_pose.transform.translation.z += translation[2]

            if apply_rotation:
                rospy.loginfo("  Rotation => " + str(needed_rotation))

                current_rotation = [self._camera_pose.transform.rotation.x,
                                    self._camera_pose.transform.rotation.y,
                                    self._camera_pose.transform.rotation.z,
                                    self._camera_pose.transform.rotation.w]

                new_rotation = quaternion_multiply(current_rotation,
                                                   needed_rotation)

                self._camera_pose.transform.rotation.x = new_rotation[0]
                self._camera_pose.transform.rotation.y = new_rotation[1]
                self._camera_pose.transform.rotation.z = new_rotation[2]
                self._camera_pose.transform.rotation.w = new_rotation[3]
    def get_AsmPose_by_HolePin(self, ref_obj, ref_const_names, move_obj,
                               move_const_names):
        target_tr = np.array([0, 0, 0])
        target_quat = np.array([0, 0, 0, 1])
        ref_axis = np.array([0, 0, -1])
        criterion = []
        success = False

        ref_consts = [ref_obj.assemConsts[const] for const \
            in ref_const_names]
        move_consts = [move_obj.assemConsts[const] for const \
            in move_const_names]
        criterion = [self.HolePin_criterion(ref, move) for (ref, move) \
            in zip(ref_consts, move_consts)]
        if False in criterion:
            pprint(ref_consts)
            pprint(move_consts)
            rospy.logwarn(
                "Hole and pin's specs are not satispying given citerion")
            pass
        else:
            ref_coors = [const[cri+"Coordinate"] for (const, cri) \
                in zip(ref_consts, criterion)]
            ref_quats = [tf.quaternion_from_matrix(coor) for coor in ref_coors]
            ref_trs = [tf.translation_from_matrix(coor) for coor in ref_coors]
            ref_zs = [utils.get_transformed_zAxis(quat) for quat in ref_quats]
            ref_z_diffs = [
                utils.zAxis_difference(ref_zs[0], zAxis) for zAxis in ref_zs
            ]
            ref_tr_diffs = [tr - ref_trs[0] for tr in ref_trs]

            move_coors = [const[cri+"Coordinate"] for (const, cri) \
                in zip(move_consts, criterion)]
            move_quats = [
                tf.quaternion_from_matrix(coor) for coor in move_coors
            ]
            move_trs = [
                tf.translation_from_matrix(coor) for coor in move_coors
            ]
            move_zs = [
                utils.get_transformed_zAxis(quat) for quat in move_quats
            ]
            move_z_diffs = [
                utils.zAxis_difference(move_zs[0], zAxis) for zAxis in move_zs
            ]
            move_tr_diffs = [tr - move_trs[0] for tr in move_trs]

            is_same_z_diffs = [np.allclose(ref_z_diff, move_z_diff, rtol=0.005, atol=0.005) \
                for (ref_z_diff, move_z_diff) in zip(ref_z_diffs, move_z_diffs)]
            if not is_same_z_diffs:
                print(ref_z_diffs)
                print(move_z_diffs)
                print(is_same_z_diffs)
                rospy.logwarn(
                    "Target hole and pin's direction is not proper for assembly!"
                )
                pass
            else:
                if len(ref_consts) > 1:
                    move_quat_inv1 = tf.quaternion_inverse(move_quats[0])
                    temp_quat = tf.quaternion_multiply(ref_quats[0],
                                                       move_quat_inv1)

                    ref_axis = ref_zs[0]

                    move2_tr_from_ref1_temp = \
                        utils.get_translated_origin([0,0,0], temp_quat, move_tr_diffs[1])[:3, 3]
                    move2_tr_from_ref1_temp_re = \
                        utils.get_translated_origin([0,0,0], tf.quaternion_inverse(ref_quats[0]), move2_tr_from_ref1_temp)[:3, 3]
                    ref2_tr_from_ref1_temp = \
                        utils.get_translated_origin([0,0,0], tf.quaternion_inverse(ref_quats[0]), ref_tr_diffs[1])[:3, 3]

                    grad_move2_from_ref1_temp = \
                        np.arctan2(move2_tr_from_ref1_temp_re[1], move2_tr_from_ref1_temp_re[0])
                    grad_ref2_from_ref1_temp = \
                        np.arctan2(ref2_tr_from_ref1_temp[1], ref2_tr_from_ref1_temp[0])

                    temp_grad1 = tf.quaternion_about_axis(
                        grad_move2_from_ref1_temp, ref_axis)
                    temp_grad1_inv = tf.quaternion_inverse(temp_grad1)
                    temp_grad2 = tf.quaternion_about_axis(
                        grad_ref2_from_ref1_temp, ref_axis)

                    target_quat = tf.quaternion_multiply(
                        temp_grad1_inv, temp_quat)
                    target_quat = tf.quaternion_multiply(
                        temp_grad2, target_quat)
                    target_tr = utils.get_translated_origin(
                        ref_trs[0], target_quat, -move_trs[0])[:3, 3]
                else:
                    ref_axis = ref_zs[0]

                    move_quat_inv = tf.quaternion_inverse(move_quats[0])
                    target_quat = tf.quaternion_multiply(
                        ref_quats[0], move_quat_inv)
                    target_tr = utils.get_translated_origin(
                        ref_trs[0], target_quat, -move_trs[0])[:3, 3]

                if "pin" in ref_const_names[0]:
                    ref_axis = -ref_zs[0]
                else:
                    ref_axis = ref_zs[0]

                edit_quats = [tf.quaternion_multiply(target_quat, move_quat) \
                    for move_quat in move_quats]
                edit_zs = [
                    utils.get_transformed_zAxis(quat) for quat in edit_quats
                ]
                edit_quat_mat = tf.quaternion_matrix(target_quat)
                edit_coors = [tf.concatenate_matrices(edit_quat_mat, coor) \
                    for coor in move_coors]
                edit_trs = [
                    tf.translation_from_matrix(coor) for coor in edit_coors
                ]
                edit_tr_diffs = [tr - edit_trs[0] for tr in edit_trs]

                edit_tr_min = self.get_min_vector_from_ref(
                    edit_zs, edit_trs, edit_tr_diffs)
                ref_tr_min = self.get_min_vector_from_ref(
                    ref_zs, ref_trs, ref_tr_diffs)
                is_same_arange = [np.allclose(ref, move, rtol=0.05, atol=0.05) \
                    for (ref, move) in zip(ref_tr_min, edit_tr_min)]
                arange_success = all(is_same_arange)

                stuck_success = self.check_const_mating(
                    ref_consts, move_consts, target_tr, target_quat)
                success = arange_success and stuck_success

        return target_tr, target_quat, ref_axis, criterion, success
Example #49
0
    def set_pose(self,
                 joint_group,
                 position,
                 rpy=None,
                 task_duration=7.0,
                 do_wait=True,
                 ref_frame_name=None):
        '''
        @deprecated: Use set_pose_target (from MoveGroupCommander) directly.
        Accept pose defined by position and RPY in Cartesian format.

        @type joint_group: str
        @type position: [float]
        @param position: x, y, z.
        @type rpy: [float]
        @param rpy: If None, keep the current orientation by using
                    MoveGroupCommander.set_position_target. See:
                     'http://moveit.ros.org/doxygen/' +
                     'classmoveit__commander_1_1move__group_1_1' +
                     'MoveGroupCommander.html#acfe2220fd85eeb0a971c51353e437753'
        @param ref_frame_name: reference frame for target pose, i.e. "LARM_JOINT5_Link".
        '''
        # convert to tuple to list
        position = list(position)
        if not rpy is None:
            rpy = list(rpy)
        #
        # Check if MoveGroup is instantiated.
        if not self.MG_LARM or not self.MG_RARM:
            try:
                self._init_moveit_commanders()
            except RuntimeError as e:
                rospy.logerr(self._MSG_NO_MOVEGROUP_FOUND)
                raise e

        # Locally assign the specified MoveGroup
        movegr = None
        if Constant.GRNAME_LEFT_ARM == joint_group:
            movegr = self.MG_LARM
        elif Constant.GRNAME_RIGHT_ARM == joint_group:
            movegr = self.MG_RARM
        else:
            rospy.logerr('joint_group must be either %s, %s or %s' %
                         (Constant.GRNAME_LEFT_ARM, Constant.GRNAME_RIGHT_ARM,
                          Constant.GRNAME_BOTH_ARMS))
            return

        # set reference frame
        if ref_frame_name:
            ref_pose = movegr.get_current_pose(ref_frame_name).pose
            ref_mat = compose_matrix(translate=[
                ref_pose.position.x, ref_pose.position.y, ref_pose.position.z
            ],
                                     angles=list(
                                         euler_from_quaternion([
                                             ref_pose.orientation.x,
                                             ref_pose.orientation.y,
                                             ref_pose.orientation.z,
                                             ref_pose.orientation.w
                                         ])))
            target_mat = numpy.dot(
                ref_mat,
                compose_matrix(translate=position, angles=rpy or [0, 0, 0]))
            position = list(translation_from_matrix(target_mat))
            rpy = list(euler_from_matrix(target_mat))

        # If no RPY specified, give position and return the method.
        if not rpy:
            try:
                movegr.set_position_target(position)
            except MoveItCommanderException as e:
                rospy.logerr(str(e))
            (movegr.go(do_wait) or movegr.go(do_wait) or rospy.logerr(
                'MoveGroup.go fails; jointgr={}'.format(joint_group)))
            return

        # Not necessary to convert from rpy to quaternion, since
        # MoveGroupCommander.set_pose_target can take rpy format too.
        pose = copy.deepcopy(position)
        pose.extend(rpy)
        rospy.loginfo('setpose jntgr={} mvgr={} pose={} posi={} rpy={}'.format(
            joint_group, movegr, pose, position, rpy))
        try:
            movegr.set_pose_target(pose)
        except MoveItCommanderException as e:
            rospy.logerr(str(e))
        except Exception as e:
            rospy.logerr(str(e))

        (movegr.go(do_wait) or movegr.go(do_wait)
         or rospy.logerr('MoveGroup.go fails; jointgr={}'.format(joint_group)))
Example #50
0
def numpy_to_pose(P):
    xyz = tuple(transformations.translation_from_matrix(P))[:3]
    quat = tuple(transformations.quaternion_from_matrix(P))
    return geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz),
                                  geometry_msgs.msg.Quaternion(*quat))
        return res_1.x
        

if __name__ == '__main__':
    color1 = (0,255,255)
    color2 = (0,0,255)
    color3 = (255,0,255)
    
    # x0_ext_old = [1.08592196e-01, -5.96469288e-01, 6.09164354e-01] + 
        # [9.05378371e-01, 3.06042346e-02, -5.82887034e-02, -4.19470873e-01]) # viewer
    x0_ext_old = [7.16834068e-01, -7.68849430e-02, 3.78838750e-01] + [6.89344221e-01, 6.44350485e-01, -2.20748124e-01, -2.46753445e-01]  # observer
    
    
    transform = tfm.concatenate_matrices(tfm.translation_matrix(x0_ext_old[0:3]), tfm.quaternion_matrix(x0_ext_old[3:7]))
    inversed_transform = tfm.inverse_matrix(transform)
    translation = tfm.translation_from_matrix(inversed_transform)
    quaternion = tfm.quaternion_from_matrix(inversed_transform)

    x0_ext =  translation.tolist() + quat_to_rod(quaternion.tolist())

    if sys.argv[1] == 'observer':
        if sys.argv[3] == '640':
            x0_int = [617.605, 617.605, 305.776, 238.914, 0.0,0.0,0.0,0.0,0.0]  # observer
        elif sys.argv[3] == '1080':
            x0_int = [1389.61, 1389.61, 927.997, 537.558, 0.0,0.0,0.0,0.0,0.0]  # observer
    else:
        if sys.argv[3] == '640':
            x0_int = [618.337, 618.337, 310.987, 236.15, 0.0,0.0,0.0,0.0,0.0] # viewer 
        elif sys.argv[3] == '1080':
            x0_int = [1391.26, 1391.26, 939.721, 531.336, 0.0,0.0,0.0,0.0,0.0]  # viewer
            
def dh_2_rpy(table_name, config_file):
    lines_read = 0 #nie bierzemy pod uwage 1 wiersza

    with open(join('config', table_name), 'r') as f:
        reader = csv.DictReader(f, delimiter=' ')
        for row in reader: 
            for key in list(row):
                table[key].append(float(row[key]))
            lines_read+=1

    print(table)
    print(lines_read)
    assert(lines_read == 3)
    
    roll = []
    pitch = []
    yaw = []

    xes = []
    yes = []
    zes = []

    x_axis = (1, 0, 0)
    z_axis = (0, 0, 1)
    params = []
    link_len = []



    for i in range(0, lines_read):
        tz = translation_matrix((0, 0, table['d'][i]))  
        rz = rotation_matrix(table['theta'][i], z_axis)    
        tx = translation_matrix((table['a'][i], 0, 0))  
        rx = rotation_matrix(table['alpha'][i], x_axis) 

        params.append({'theta': table['theta'][i], 'alpha': table['alpha'][i], 'a': table['a'][i], 'd': table['d'][i]})

        #params = table

        dh_matrix = concatenate_matrices(tx, rx, tz, rz)

        (r, p, y) = euler_from_matrix(dh_matrix)
        (x_, y_, z_) = translation_from_matrix(dh_matrix)

        roll.append(r)
        pitch.append(p)
        yaw.append(y)
        xes.append(x_)
        yes.append(y_)
        zes.append(z_)
        link_len.append(table['d'][i])

    #params = []

    with open(join('config', config_file), 'w+') as f:
        for i in range(0, lines_read):
            #joint = {}
    	    f.write("roll%d: %.2f\n" % ((i+1), roll[i]))
            #joint['roll'] = float(roll[i])

    	    f.write("pitch%d: %.2f\n" % ((i+1), pitch[i]))
            #joint['pitch'] = float(pitch[i])

    	    f.write("yaw%d: %.2f\n" % ((i+1), yaw[i]))
            #joint['yaw'] = float(yaw[i])

    	    f.write("x%d: %.2f\n" % ((i+1), xes[i]))
            #joint['x'] = float(xes[i])

    	    f.write("y%d: %.2f\n" % ((i+1), yes[i]))
            #joint['y'] = float(yes[i])

    	    f.write("z%d: %.2f\n" % ((i+1), zes[i]))
            #joint['z'] = float(zes[i])
            f.write("link%d_length: %.2f\n" % ((i+1), float(link_len[i])))
            #params.append(joint)

    return params 
Example #53
0
def prepare_corner_grasp_parameter(ec_frame, selected_wall_names, objects,
                                   current_object_idx, object_params,
                                   ifco_in_base_transform, params):

    # hand pose above and behind the object
    pre_approach_transform = params['pre_approach_transform']

    corner_frame = np.copy(ec_frame)
    print("Prepare Corner: ", pu.tf_dbg_call_to_string(corner_frame,
                                                       "prepare"))
    used_ec_frame, corner_frame_alpha_zero = GraspFrameRecipes.get_derived_corner_grasp_frames(
        corner_frame, object_params['frame'])
    pre_approach_pose = used_ec_frame.dot(params['hand_transform'].dot(
        pre_approach_transform))  # TODO order of hand and pre_approach

    # down_dist = params['down_dist']  #  dist lower than ifco bottom: behavior of the high level planner
    # dist = z difference to ifco bottom minus hand frame offset (dist from hand frame to collision point)
    # (more realistic behavior since we have a force threshold when going down to the bottom)
    bounded_down_dist = pre_approach_pose[2, 3] - ifco_in_base_transform[2, 3]
    hand_frame_to_bottom_offset = 0.07  # 7cm TODO maybe move to handarm_parameters.py
    bounded_down_dist = min(params['down_dist'],
                            bounded_down_dist - hand_frame_to_bottom_offset)

    # goal pose for go down movement
    go_down_pose = tra.translation_matrix([0, 0, -bounded_down_dist
                                           ]).dot(pre_approach_pose)

    # pose after lifting. This is somewhat fake, since the real go_down_pose will be determined by
    # the FT-Switch during go_down and the actual lifted distance by the TimeSwitch (or a pose switch in case
    # the robot allows precise small movements) TODO better solution?
    fake_lift_up_dist = np.min([params['lift_dist'], 0.01])  # 1cm
    corrective_lift_pose = tra.translation_matrix([0, 0, fake_lift_up_dist
                                                   ]).dot(go_down_pose)

    sliding_dist = params['sliding_dist']
    wall_dir = tra.translation_matrix([0, 0, -sliding_dist])
    # slide direction is given by the corner_frame_alpha_zero
    wall_dir[:3, 3] = corner_frame_alpha_zero[:3, :3].dot(wall_dir[:3, 3])

    slide_to_wall_pose = wall_dir.dot(corrective_lift_pose)

    # now project it into the wall plane!
    z_projection = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0],
                             [0, 0, 0, 1]])

    to_wall_plane_transform = corner_frame_alpha_zero.dot(
        z_projection.dot(
            tra.inverse_matrix(corner_frame_alpha_zero).dot(
                slide_to_wall_pose)))
    slide_to_wall_pose[:3, 3] = tra.translation_from_matrix(
        to_wall_plane_transform)

    checked_motions = [
        'pre_approach', 'go_down', 'corrective_lift', 'slide_to_wall'
    ]

    goals = [
        pre_approach_pose, go_down_pose, corrective_lift_pose,
        slide_to_wall_pose
    ]

    # Take orientation of object but translation of pre grasp pose
    pre_grasp_pos_manifold = np.copy(object_params['frame'])
    pre_grasp_pos_manifold[:3,
                           3] = tra.translation_from_matrix(pre_approach_pose)

    slide_pos_manifold = np.copy(slide_to_wall_pose)

    goal_manifold_frames = {
        'pre_approach': pre_grasp_pos_manifold,

        # Use object frame for sampling
        'go_down': np.copy(go_down_pose),
        'corrective_lift': np.copy(corrective_lift_pose),
        # should always be the same frame as go_down # TODO use world orientation?

        # Use wall frame for sampling. Keep in mind that the wall frame has different orientation, than world.
        'slide_to_wall': slide_pos_manifold,
    }

    goal_manifold_orientations = {
        # use hand orientation
        'pre_approach': tra.quaternion_from_matrix(pre_approach_pose),

        # Use object orientation
        'go_down': tra.quaternion_from_matrix(
            go_down_pose),  # TODO use hand orietation instead?

        # should always be the same orientation as go_down
        'corrective_lift': tra.quaternion_from_matrix(corrective_lift_pose),

        # use wall orientation
        'slide_to_wall': tra.quaternion_from_matrix(
            corner_frame),  # TODO is that the right one?
    }

    allowed_collisions = {

        # 'init_joint': [],

        # no collisions are allowed during going to pre_grasp pose
        'pre_approach': [],

        # Only allow touching the bottom of the ifco
        'go_down': [
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name='bottom',
                             terminating=False),
        ],
        'corrective_lift': [
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name='bottom',
                             terminating=False),
        ],
        'slide_to_wall': [
            # Allow all other objects to be touched as well
            # (since hand will go through them in simulation)
            AllowedCollision(type=AllowedCollision.BOUNDING_BOX,
                             box_id=obj_idx,
                             terminating=False,
                             required=obj_idx == current_object_idx)
            for obj_idx in range(0, len(objects))
        ] + [
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name=selected_wall_names[0],
                             terminating=False),
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name=selected_wall_names[1],
                             terminating=False),
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name='bottom',
                             terminating=False),
        ],
    }

    return FeasibilityQueryParameters(checked_motions, goals,
                                      allowed_collisions, goal_manifold_frames,
                                      goal_manifold_orientations)
Example #54
0
def xyzquat_from_matrix(matrix):
    return tfm.translation_from_matrix(matrix).tolist() + tfm.quaternion_from_matrix(matrix).tolist()
Example #55
0
    def step(self):
        t = rospy.Time.now()
        _, _, oldwidth, oldheight = glGetFloatv(GL_VIEWPORT)
        height = min(oldheight, int(oldwidth / self.aspect + .5))
        width = int(self.aspect * height + .5)
        glViewport(0, 0, width, height)

        msg = CameraInfo()
        msg.header.stamp = t
        msg.header.frame_id = '/' + self.name
        msg.height = height
        msg.width = width
        f = 1 / math.tan(math.radians(self.fovy) / 2) * height / 2
        msg.P = [
            f,
            0,
            width / 2 - .5,
            0,
            0,
            f,
            height / 2 - .5,
            0,
            0,
            0,
            1,
            0,
        ]
        self.info_pub.publish(msg)

        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        perspective(self.fovy, width / height, 0.1)
        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()
        # rotates into the FLU coordinate system
        glMultMatrixf([[0., 0., -1., 0.], [-1., 0., 0., 0.], [0., 1., 0., 0.],
                       [0., 0., 0., 1.]])
        # after that, +x is forward, +y is left, and +z is up
        self.set_pose_func()

        with GLMatrix:
            rotate_to_body(self.base_link_body)
            glcamera_from_body = glGetFloatv(GL_MODELVIEW_MATRIX).T
        camera_from_body = numpy.array([  # camera from glcamera
            [1, 0, 0, 0],
            [0, -1, 0, 0],
            [0, 0, -1, 0],
            [0, 0, 0, 1],
        ]).dot(glcamera_from_body)
        body_from_camera = numpy.linalg.inv(camera_from_body)
        tf_br.sendTransform(
            transformations.translation_from_matrix(body_from_camera),
            transformations.quaternion_from_matrix(body_from_camera), t,
            "/" + self.name, "/base_link")

        if not self.image_pub.get_num_connections():
            glViewport(0, 0, oldwidth, oldheight)
            return

        self.world.draw()

        x = glReadPixels(0,
                         0,
                         width,
                         height,
                         GL_RGBA,
                         GL_UNSIGNED_BYTE,
                         outputType=None)
        x = numpy.reshape(x, (height, width, 4))
        x = x[::-1]

        msg = Image()
        msg.header.stamp = t
        msg.header.frame_id = '/' + self.name
        msg.height = height
        msg.width = width
        msg.encoding = 'rgba8'
        msg.is_bigendian = 0
        msg.step = width * 4
        msg.data = x.tostring()
        self.image_pub.publish(msg)

        glViewport(0, 0, oldwidth, oldheight)
Example #56
0
def prepare_surface_grasp_parameter(objects, current_object_idx, object_params,
                                    params):
    # use kinematic checks
    # TODO create proxy; make it a persistent connection?

    # Code duplication from planner.py TODO put at a shared location

    # Set the initial pose above the object
    goal_ = np.copy(
        object_params['frame'])  # TODO: this should be support_surface_frame
    goal_[:3, 3] = tra.translation_from_matrix(object_params['frame'])
    goal_ = goal_.dot(params['hand_transform'])

    # the grasp frame is symmetrical - check which side is nicer to reach
    # this is a hacky first version for our WAM
    zflip_transform = tra.rotation_matrix(math.radians(180.0), [0, 0, 1])
    if goal_[0][0] < 0:
        goal_ = goal_.dot(zflip_transform)

    # hand pose above object
    pre_grasp_pose = goal_.dot(params['pre_approach_transform'])

    # down_dist = params['down_dist']  #  dist lower than ifco bottom: behavior of the high level planner
    # dist = z difference to object centroid (both transformations are w.r.t. to world frame
    # (more realistic behavior since we have to touch the object for a successful grasp)
    down_dist = pre_grasp_pose[2, 3] - object_params['frame'][
        2, 3]  # get z-translation difference

    # goal pose for go down movement
    go_down_pose = tra.translation_matrix([0, 0,
                                           -down_dist]).dot(pre_grasp_pose)

    post_grasp_pose = params['post_grasp_transform'].dot(
        go_down_pose
    )  # TODO it would be better to allow relative motion as goal frames

    checked_motions = [
        "pre_approach", "go_down"
    ]  # , "post_grasp_rot"] ,go_up, go_drop_off  # TODO what about remaining motions? (see wallgrasp)

    goals = [pre_grasp_pose, go_down_pose]  # , post_grasp_pose]

    # TODO what about using the bounding boxes as for automatic goal manifold calculation?

    # Take orientation of object but translation of pre grasp pose
    pre_grasp_pos_manifold = np.copy(object_params['frame'])
    pre_grasp_pos_manifold[:3, 3] = tra.translation_from_matrix(pre_grasp_pose)

    goal_manifold_frames = {
        'pre_approach': pre_grasp_pos_manifold,

        # Use object frame for resampling
        'go_down': np.copy(
            object_params['frame'])  # TODO change that again to go_down_pose!?
    }

    goal_manifold_orientations = {
        # use hand orientation
        'pre_approach': tra.quaternion_from_matrix(pre_grasp_pose),

        # Use object orientation
        'go_down': tra.quaternion_from_matrix(go_down_pose),
        # tra.quaternion_from_matrix(object_params['frame'])  # TODO use hand orietation instead?
    }

    # The collisions that are allowed per motion in message format
    allowed_collisions = {
        # no collisions are allowed during going to pre_grasp pose
        'pre_approach': [],
        'go_down': [
            AllowedCollision(type=AllowedCollision.BOUNDING_BOX,
                             box_id=current_object_idx,
                             terminating=True,
                             required=True),
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name='bottom',
                             terminating=False)
        ] + [
            AllowedCollision(type=AllowedCollision.BOUNDING_BOX,
                             box_id=obj_idx,
                             terminating=False)
            for obj_idx, o in enumerate(objects)
            if obj_idx != current_object_idx
            and params['go_down_allow_touching_other_objects']
        ],

        # TODO also account for the additional object in a way?
        'post_grasp_rot': [
            AllowedCollision(type=AllowedCollision.BOUNDING_BOX,
                             box_id=current_object_idx,
                             terminating=True),
            AllowedCollision(type=AllowedCollision.ENV_CONSTRAINT,
                             constraint_name='bottom',
                             terminating=False)
        ]
    }

    print("ALLOWED COLLISIONS:", allowed_collisions)

    return FeasibilityQueryParameters(checked_motions, goals,
                                      allowed_collisions, goal_manifold_frames,
                                      goal_manifold_orientations)
Example #57
0
def matrix_as_tf(mat):
    return (tr.translation_from_matrix(mat), tr.quaternion_from_matrix(mat))
Example #58
0
def create_wall_grasp(object_frame,
                      bounding_box,
                      wall_frame,
                      handarm_params,
                      object_type,
                      pre_grasp_pose=None,
                      alternative_behavior=None):

    # Get the parameters from the handarm_parameters.py file
    obj_type_params = {}
    obj_params = {}
    if object_type in handarm_params['wall_grasp']:
        obj_type_params = handarm_params['wall_grasp'][object_type]
    if 'object' in handarm_params['wall_grasp']:
        obj_params = handarm_params['wall_grasp']['object']

    hand_transform = getParam(obj_type_params, obj_params, 'hand_transform')
    downward_force = getParam(obj_type_params, obj_params, 'downward_force')
    wall_force = getParam(obj_type_params, obj_params, 'wall_force')
    slide_IFCO_speed = getParam(obj_type_params, obj_params, 'slide_speed')
    pre_approach_transform = getParam(obj_type_params, obj_params,
                                      'pre_approach_transform')
    scooping_angle_deg = getParam(obj_type_params, obj_params,
                                  'scooping_angle_deg')

    init_joint_config = handarm_params['init_joint_config']

    thumb_pos_preshape = getParam(obj_type_params, obj_params,
                                  'thumb_pos_preshape')
    post_grasp_transform = getParam(obj_type_params, obj_params,
                                    'post_grasp_transform')

    rotate_time = handarm_params['rotate_duration']
    down_IFCO_speed = handarm_params['down_IFCO_speed']

    # Set the twists to use TRIK controller with

    # Down speed is negative because it is defined on the world frame
    down_IFCO_twist = np.array([0, 0, -down_IFCO_speed, 0, 0, 0])

    # Slide speed is positive because it is defined on the EE frame + rotation of the scooping angle
    slide_IFCO_twist_matrix = tra.rotation_matrix(
        math.radians(scooping_angle_deg),
        [1, 0, 0]).dot(tra.translation_matrix([0, 0, slide_IFCO_speed]))
    slide_IFCO_twist = np.array([
        slide_IFCO_twist_matrix[0, 3], slide_IFCO_twist_matrix[1, 3],
        slide_IFCO_twist_matrix[2, 3], 0, 0, 0
    ])

    rviz_frames = []

    if pre_grasp_pose is None:
        # this is the EC frame. It is positioned like object and oriented to the wall
        ec_frame = np.copy(wall_frame)
        ec_frame[:3, 3] = tra.translation_from_matrix(object_frame)
        ec_frame = ec_frame.dot(hand_transform)

        pre_approach_pose = ec_frame.dot(pre_approach_transform)

    else:
        pre_approach_pose = pre_grasp_pose

    # Rviz debug frames
    rviz_frames.append(object_frame)
    rviz_frames.append(pre_approach_pose)
    # rviz_frames.append(pm.toMatrix(pm.fromMsg(res.reachable_hand_pose)))

    control_sequence = []

    # # 0. Go to initial nice mid-joint configuration
    # control_sequence.append(ha.JointControlMode(goal = init_joint_config, goal_is_relative = '0', name = 'init', controller_name = 'GoToInitController'))

    # # 0b. Switch when config is reached
    # control_sequence.append(ha.JointConfigurationSwitch('init', 'Pregrasp', controller = 'GoToInitController', epsilon = str(math.radians(1.0))))

    # 1. Go above the object
    control_sequence.append(
        ha.InterpolatedHTransformControlMode(pre_approach_pose,
                                             controller_name='GoAboveObject',
                                             goal_is_relative='0',
                                             name="Pregrasp"))

    # 1b. Switch when hand reaches the goal pose
    control_sequence.append(
        ha.FramePoseSwitch('Pregrasp',
                           'StayStill',
                           controller='GoAboveObject',
                           epsilon='0.01'))

    # 2. Go to gravity compensation
    control_sequence.append(
        ha.CartesianVelocityControlMode(np.array([0, 0, 0, 0, 0, 0]),
                                        controller_name='StayStillCtrl',
                                        name="StayStill",
                                        reference_frame="EE"))

    # 2b. Wait for a bit to allow vibrations to attenuate
    control_sequence.append(
        ha.TimeSwitch('StayStill',
                      'softhand_pretension',
                      duration=handarm_params['stay_still_duration']))

    # 3. Pretension
    speed = np.array([20])
    thumb_pos = np.array([0, 0, 0])
    diff_pos = np.array([0, 0, 15])
    thumb_contact_force = np.array([0])
    thumb_grasp_force = np.array([0])
    diff_contact_force = np.array([0])
    diff_grasp_force = np.array([0])
    thumb_pretension = np.array([0])
    diff_pretension = np.array([15])
    force_feedback_ratio = np.array([0])
    prox_level = np.array([0])
    touch_level = np.array([0])
    mode = np.array([0])
    command_count = np.array([0])

    control_sequence.append(
        ha.ros_CLASHhandControlMode(goal=np.concatenate(
            (speed, thumb_pos, diff_pos, thumb_contact_force,
             thumb_grasp_force, diff_contact_force, diff_grasp_force,
             thumb_pretension, diff_pretension, force_feedback_ratio,
             prox_level, touch_level, mode, command_count)),
                                    name='softhand_pretension'))

    # 3b. Switch when hand closing time ends
    control_sequence.append(
        ha.TimeSwitch('softhand_pretension',
                      'GoDown',
                      duration=handarm_params['hand_closing_duration']))

    # 4. Go down onto the object/table, in world frame
    control_sequence.append(
        ha.CartesianVelocityControlMode(down_IFCO_twist,
                                        controller_name='GoDown',
                                        name="GoDown",
                                        reference_frame="world"))

    # 4b. Switch when force threshold is exceeded
    force = np.array([0, 0, downward_force, 0, 0, 0])
    control_sequence.append(
        ha.ForceTorqueSwitch('GoDown',
                             'CloseBeforeSlide',
                             goal=force,
                             norm_weights=np.array([0, 0, 1, 0, 0, 0]),
                             jump_criterion="THRESH_UPPER_BOUND",
                             goal_is_relative='1',
                             frame_id='world',
                             port='2'))

    # 5. Close a bit before sliding
    speed = np.array([30])
    thumb_pos = thumb_pos_preshape
    diff_pos = np.array([10, 15, 0])
    thumb_contact_force = np.array([0])
    thumb_grasp_force = np.array([0])
    diff_contact_force = np.array([0])
    diff_grasp_force = np.array([0])
    thumb_pretension = np.array([15])
    diff_pretension = np.array([15])
    force_feedback_ratio = np.array([0])
    prox_level = np.array([0])
    touch_level = np.array([0])
    mode = np.array([0])
    command_count = np.array([1])

    control_sequence.append(
        ha.ros_CLASHhandControlMode(goal=np.concatenate(
            (speed, thumb_pos, diff_pos, thumb_contact_force,
             thumb_grasp_force, diff_contact_force, diff_grasp_force,
             thumb_pretension, diff_pretension, force_feedback_ratio,
             prox_level, touch_level, mode, command_count)),
                                    name='CloseBeforeSlide'))

    # 5b. Time switch
    control_sequence.append(
        ha.TimeSwitch('CloseBeforeSlide',
                      'SlideToWall',
                      duration=handarm_params['hand_closing_duration']))

    # 6. Go towards the wall to slide object to wall
    control_sequence.append(
        ha.CartesianVelocityControlMode(slide_IFCO_twist,
                                        controller_name='SlideToWall',
                                        name="SlideToWall",
                                        reference_frame="EE"))

    # 6b. Switch when the f/t sensor is triggered with normal force from wall
    force = np.array([0, 0, wall_force, 0, 0, 0])
    control_sequence.append(
        ha.ForceTorqueSwitch('SlideToWall',
                             'softhand_close',
                             'ForceSwitch',
                             goal=force,
                             norm_weights=np.array([0, 0, 1, 0, 0, 0]),
                             jump_criterion="THRESH_UPPER_BOUND",
                             goal_is_relative='1',
                             frame_id='world',
                             frame=wall_frame,
                             port='2'))

    # 7. Close the hand
    speed = np.array([30])
    thumb_pos = np.array([0, 50, 30])
    diff_pos = np.array([55, 50, 20])
    thumb_contact_force = np.array([0])
    thumb_grasp_force = np.array([0])
    diff_contact_force = np.array([0])
    diff_grasp_force = np.array([0])
    thumb_pretension = np.array([15])
    diff_pretension = np.array([15])
    force_feedback_ratio = np.array([0])
    prox_level = np.array([0])
    touch_level = np.array([0])
    mode = np.array([0])
    command_count = np.array([1])

    control_sequence.append(
        ha.ros_CLASHhandControlMode(goal=np.concatenate(
            (speed, thumb_pos, diff_pos, thumb_contact_force,
             thumb_grasp_force, diff_contact_force, diff_grasp_force,
             thumb_pretension, diff_pretension, force_feedback_ratio,
             prox_level, touch_level, mode, command_count)),
                                    name='softhand_close'))

    # 7b. Switch when hand closing time ends
    control_sequence.append(
        ha.TimeSwitch('softhand_close',
                      'PostGraspRotate',
                      duration=handarm_params['hand_closing_duration']))

    # 8. Rotate hand after closing and before lifting it up relative to current hand pose
    control_sequence.append(
        ha.CartesianVelocityControlMode(post_grasp_transform,
                                        controller_name='PostGraspRotate',
                                        name='PostGraspRotate',
                                        reference_frame='EE'))

    # 8b. Switch when hand rotated
    control_sequence.append(
        ha.TimeSwitch('PostGraspRotate', 'GoUp', duration=rotate_time))

    return control_sequence, rviz_frames
    def data_cb(self, joint_msg, detection_msgs):
        try:
            jorder = ['waist', 'shoulder', 'elbow', 'hand', 'wrist']
            j_idx = [joint_msg.name.index(j) for j in jorder]
            j = [joint_msg.position[i] for i in j_idx]
            j = np.concatenate((j, [0]))  # assume final joint is fixed
        except Exception as e:
            rospy.logerr_throttle(
                1.0, 'Joint Positions Failed : {} \n {}'.format(e, joint_msg))

        Xs = [np.eye(4) for _ in range(self._num_markers)]
        vis = [False for _ in range(self._num_markers)]

        if len(detection_msgs.detections) <= 0:
            return

        try:
            for pm in detection_msgs.detections:
                # pm.pose = pwcs
                # pm.pose.pose = pwc
                # pm.pose.pose.pose = p

                # TODO : technically requires tf.transformPose(...) for robustness.
                #pose = tf.transformPose(

                ps = PoseStamped(header=pm.pose.header, pose=pm.pose.pose.pose)
                # this transform should theoretically be painless + static
                # Optionally, store the transform beforehand and apply it
                ps = self._tfl.transformPose('stereo_optical_link', ps)

                q = ps.pose.orientation
                p = ps.pose.position
                m_id = pm.id[0]
                X = tx.compose_matrix(angles=tx.euler_from_quaternion(
                    [q.x, q.y, q.z, q.w]),
                                      translate=[p.x, p.y, p.z])
                i = self._m2i[m_id]  # transform tag id to index
                if i >= self._num_markers:
                    continue
                self._i2m[i] = m_id
                Xs[i] = X
                vis[i] = True
                self._seen[i] = True
        except Exception as e:
            rospy.logerr_throttle(1.0, 'TF Failed : {}'.format(e))

        Xs = np.float32(Xs)

        self._data.append((j, Xs, vis))

        self._step += 1
        rospy.loginfo_throttle(
            1.0, 'Current Step : {}; vis : {}'.format(self._step, vis))

        T = self._calib.eval_1(j, Xs, vis)  # == (1, M, 4, 4)

        now = rospy.Time.now()

        m_msg = AprilTagDetectionArray()
        m_msg.header.frame_id = 'base_link'
        m_msg.header.stamp = now

        pv_msg = PoseArray()
        pv_msg.header.stamp = now
        pv_msg.header.frame_id = 'base_link'

        for i in range(self._num_markers):
            if vis[i]:
                txn = tx.translation_from_matrix(T[i])
                rxn = tx.quaternion_from_matrix(T[i])

                msg = AprilTagDetection()
                msg.id = [self._i2m[i]]
                msg.size = [0.0]  # not really a thing

                pwcs = msg.pose
                pwcs.header.frame_id = 'base_link'
                pwcs.header.stamp = now
                fill_pose_msg(pwcs.pose.pose, txn, rxn)

                m_msg.detections.append(msg)
                pv_msg.poses.append(pwcs.pose.pose)
        self._pub.publish(m_msg)
        self._vpub.publish(pv_msg)
Example #60
0
def pack_matrix(matrix, time, child, parent):
    trans = tfs.translation_from_matrix(matrix)
    rot = tfs.quaternion_from_matrix(matrix)
    return pack_transrot(trans, rot, time, child, parent)