def publish_state(self, t):
        state_msg = TransformStamped()
        t_ned_imu = tft.translation_matrix(self.model.get_position())
        R_ned_imu = tft.quaternion_matrix(self.model.get_orientation())
        T_ned_imu = tft.concatenate_matrices(t_ned_imu, R_ned_imu)
        #rospy.loginfo("T_ned_imu = \n" + str(T_ned_imu))
        if self.T_imu_vicon is None:
            # grab the static transform from imu to vicon frame from param server:
            frames = rospy.get_param("frames", None)
            ypr = frames['vicon_to_imu']['rotation']
            q_vicon_imu = tft.quaternion_from_euler(*[radians(x) for x in ypr], axes='rzyx') # xyzw
            R_vicon_imu = tft.quaternion_matrix(q_vicon_imu)
            t_vicon_imu = tft.translation_matrix(frames['vicon_to_imu']['translation'])
#            rospy.loginfo(str(R_vicon_imu))
#            rospy.loginfo(str(t_vicon_imu))
            self.T_vicon_imu = tft.concatenate_matrices(t_vicon_imu, R_vicon_imu)
            self.T_imu_vicon = tft.inverse_matrix(self.T_vicon_imu)
            self.T_enu_ned = tft.euler_matrix(radians(90), 0, radians(180), 'rzyx')
            rospy.loginfo(str(self.T_enu_ned))
            rospy.loginfo(str(self.T_imu_vicon))
            #rospy.loginfo(str(T_vicon_imu))
        # we have /ned -> /imu, need to output /enu -> /vicon:
        T_enu_vicon = tft.concatenate_matrices(self.T_enu_ned, T_ned_imu, self.T_imu_vicon )
        state_msg.header.stamp  = t
        state_msg.header.frame_id = "/enu"
        state_msg.child_frame_id = "/simflyer1/flyer_vicon"
        stt = state_msg.transform.translation
        stt.x, stt.y, stt.z = T_enu_vicon[:3,3]
        stro = state_msg.transform.rotation
        stro.x, stro.y, stro.z, stro.w = tft.quaternion_from_matrix(T_enu_vicon)
        
        self.pub_state.publish(state_msg)
Exemplo n.º 2
0
    def tag_callback(self, msg_tag):
        # Listen for the transform of the tag in the world
        avg = PoseAverage.PoseAverage()
        for tag in msg_tag.detections:
            try:
                Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
                Mtbase_w=self.transform_to_matrix(Tt_w.transform)
                Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
                Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
                Mt_r=self.pose_to_matrix(tag.pose)
                Mr_t=np.linalg.inv(Mt_r)
                Mr_w=np.dot(Mt_w,Mr_t)
                Tr_w = self.matrix_to_transform(Mr_w)
                avg.add_pose(Tr_w)
                self.publish_sign_highlight(tag.id)
            except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
                rospy.logwarn("Error looking up transform for tag_%s", tag.id)
                rospy.logwarn(ex.message)

        Tr_w =  avg.get_average() # Average of the opinions

        # Broadcast the robot transform
        if Tr_w is not None:
            # Set the z translation, and x and y rotations to 0
            Tr_w.translation.z = 0
            rot = Tr_w.rotation
            rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
            (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
            T = TransformStamped()
            T.transform = Tr_w
            T.header.frame_id = self.world_frame
            T.header.stamp = rospy.Time.now()
            T.child_frame_id = self.duckiebot_frame
            self.pub_tf.publish(TFMessage([T]))
            self.lifetimer = rospy.Time.now()
Exemplo n.º 3
0
def space_from_joints(joint_angles):
    T01, T12, T23, T34, T4e = direct_kinematics(joint_angles)
    T = tfs.concatenate_matrices(T01, T12, T23, T34, T4e)
    rz1, ry, rz2 = tfs.euler_from_matrix(T, 'szyz')
    trans = tfs.translation_from_matrix(T)
    S = np.append(trans, rz2)
    return S
Exemplo n.º 4
0
 def transformation_matrix(rot_angle, rot_dir, trans):
     # print "rot_angle: ", rot_angle
     # print "rot_dir: ", rot_dir
     # print "trans: ", trans
     return tfs.concatenate_matrices(
         tfs.rotation_matrix(rot_angle, rot_dir),
         tfs.translation_matrix(trans))
Exemplo n.º 5
0
def space_from_joints_small(joint_angles):
    T01, T12, T23, T34, T45, T5e, Tec, Tem = direct_kinematics(joint_angles)
    T = tfs.concatenate_matrices(T01, T12, T23, T34, T45, T5e, Tem)
    rx, ry, rz = tfs.euler_from_matrix(T, 'sxyz')
    trans = tfs.translation_from_matrix(T)
    S = np.concatenate((trans, [rz, ry]), axis=1)
    return S
Exemplo n.º 6
0
 def _makePreGraspPose(self, boxMat, axis):
     if axis==0: #x axis
         alpha = 0
         gamma = 0
     else: #y axis
         alpha = pi/2
         gamma = -pi/2
     ik_request = PositionIKRequest()
     ik_request.ik_link_name = self.toolLinkName
     with self._joint_state_lock:
         ik_request.ik_seed_state.joint_state = copy.deepcopy(self._joint_states)
     ik_request.pose_stamped = PoseStamped()
     ik_request.pose_stamped.header.stamp = rospy.Time.now()
     ik_request.pose_stamped.header.frame_id = self.frameID
     beta = pi/2
     while beta < 3*pi/2:
         rotationMatrix = transformations.euler_matrix(alpha, beta, gamma, 'rzyz')
         distance = self.preGraspDistance + self.gripperFingerLength
         preGraspMat = transformations.translation_matrix([0,0,-distance])
         fullMat = transformations.concatenate_matrices(boxMat, rotationMatrix, preGraspMat)
         p = transformations.translation_from_matrix(fullMat)
         q = transformations.quaternion_from_matrix(fullMat)
         print "trying {} radians".format(beta)
         try:
             self._ik_server(ik_request, Constraints(), rospy.Duration(5.0))
         except rospy.service.ServiceException:
             beta += 0.1
         else:
             if ik_resp.error_code.val > 0:
                 return beta
             else:
                 #print ik_resp.error_code.val
                 beta += 0.01
     rospy.logerr('No way to pick this up. All IK solutions failed.')
     return 7 * pi / 6
Exemplo n.º 7
0
def flipOrbToNDT (orbPose):
    qOrb = [orbPose.qx, orbPose.qy, orbPose.qz, orbPose.qw]
    orbFlip = trafo.concatenate_matrices(
        trafo.quaternion_matrix(qOrb),
        trafo.rotation_matrix(np.pi/2, (1,0,0)),
        trafo.rotation_matrix(np.pi/2, (0,0,1))
    )
    return trafo.quaternion_from_matrix(orbFlip)
Exemplo n.º 8
0
def matrix_from_StampedTransform(msg):
    T = msg.transform
    trans = [T.translation.x, T.translation.y, T.translation.z]
    quat = [T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w]

    return tfs.concatenate_matrices(
        tfs.translation_matrix(trans),
        tfs.quaternion_matrix(quat))
Exemplo n.º 9
0
    def _makeGraspPath(self, preGraspGoal):
        '''Given a pregrasp MoveArmGoal message, generate a MoveArmGoal message to perform the final
        approach to the object to grasp it.'''
        
        graspGoal = MoveArmGoal()
        graspGoal.planner_service_name = self.plannerServiceName
        motion_plan_request = MotionPlanRequest()
        motion_plan_request.group_name = self.armGroupName
        motion_plan_request.num_planning_attempts = 25
        motion_plan_request.planner_id = ""
        motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
        
        #Orientation constraint is the same as for pregrasp
        motion_plan_request.goal_constraints.orientation_constraints = copy.deepcopy(preGraspGoal.motion_plan_request.goal_constraints.orientation_constraints)
        #motion_plan_request.goal_constraints.orientation_constraints[0].orientation = Quaternion(0.656778, 0.261999, 0.648401, -0.282093) #stow orientation for debug
        graspGoal.motion_plan_request = motion_plan_request
        
        #Translate from pregrasp position to final position in a roughly straight line
        o = motion_plan_request.goal_constraints.orientation_constraints[0].orientation
        p = preGraspGoal.motion_plan_request.goal_constraints.position_constraints[0].position
        preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
        preGraspMat[:3, 3] = [p.x,p.y,p.z]
        distance = self.preGraspDistance * .5 + self.gripperFingerLength * .5
        graspTransMat = transformations.translation_matrix([0,0,distance])
        graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
        #print preGraspMat
        #print graspTransMat
        #print graspMat
        p = transformations.translation_from_matrix(graspMat)
       
        #Publish grasp transform for visualization
        self._tf_broadcaster.sendTransform(
                (p[0],p[1],p[2]),
                (o.x, o.y, o.x, o.w),
                motion_plan_request.goal_constraints.orientation_constraints[0].header.stamp,
                "grasp",
                motion_plan_request.goal_constraints.orientation_constraints[0].header.frame_id)
 
        pos_constraint = PositionConstraint()
        pos_constraint.header = motion_plan_request.goal_constraints.orientation_constraints[0].header
        pos_constraint.link_name = self.toolLinkName
        pos_constraint.position = Point(p[0],p[1],p[2])
        #pos_constraint.position = Point(-0.0644721, 0.609922, 0) #Stow position for debug
        pos_constraint.constraint_region_shape.type = Shape.SPHERE
        pos_constraint.constraint_region_shape.dimensions = [0.01]
        pos_constraint.weight = 1
        motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
        #TODO: Add path constraint to require a (roughly) cartesian move
        
        #Turn off collision operations between the gripper and all objects
        for collisionName in self.gripperCollisionNames:
            collisionOperation = CollisionOperation(collisionName, 
                                    CollisionOperation.COLLISION_SET_ALL,
                                    0.0,
                                    CollisionOperation.DISABLE)
            graspGoal.operations.collision_operations.append(collisionOperation)
        return graspGoal
Exemplo n.º 10
0
def get_trans_to_trans(trans1, rot1, trans2, rot2):
    quat_mat1 = transformations.quaternion_matrix(rot1)
    quat_mat2 = transformations.quaternion_matrix(rot2)
    trans_mat1 = transformations.translation_matrix(trans1)
    trans_mat2 = transformations.translation_matrix(trans2)
    mat1 = transformations.concatenate_matrices(trans_mat1, quat_mat1)
    mat2 = transformations.concatenate_matrices(trans_mat2, quat_mat2)
    rel_mat = numpy.linalg.inv(mat1).dot(mat2)
    rel_trans = transformations.translation_from_matrix(rel_mat)
    rel_rot = transformations.quaternion_from_matrix(rel_mat)
    rel_rot = [rel_rot[3], rel_rot[0], rel_rot[1], rel_rot[2]]
    print "rel_pose: [%f, %f, %f, %f, %f, %f, %f]" % (
        rel_trans[0],
        rel_trans[1],
        rel_trans[2],
        rel_rot[0],
        rel_rot[1],
        rel_rot[2],
        rel_rot[3],
    )
Exemplo n.º 11
0
    def _makeMove(self):
        '''Move forward a small distance'''
        print "Making move goal"
        graspGoal = MoveArmGoal()
        graspGoal.planner_service_name = self.plannerServiceName
        motion_plan_request = MotionPlanRequest()
        motion_plan_request.group_name = self.armGroupName
        motion_plan_request.num_planning_attempts = 5
        motion_plan_request.planner_id = ""
        motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
        #Orientation constraint is the same as for previous
        #Create Orientation constraint object
        o_constraint = OrientationConstraint()
        o_constraint.header.frame_id = self.frameID
        o_constraint.header.stamp = rospy.Time.now()
        o_constraint.link_name = self.toolLinkName
        o_constraint.orientation = Quaternion(0.656788, 0.261971, 0.648416, -0.282059)
        o_constraint.absolute_roll_tolerance = 0.04
        o_constraint.absolute_pitch_tolerance = 0.04
        o_constraint.absolute_yaw_tolerance = 0.04
        o_constraint.weight = 1
        motion_plan_request.goal_constraints.orientation_constraints.append(o_constraint)
        
        #Translate from pregrasp position to final position in a roughly straight line
        o = o_constraint.orientation
        p = Point(-0.064433, 0.609915, 0)
        preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
        preGraspMat[:3, 3] = [p.x,p.y,p.z]
        distance = .3#self.preGraspDistance + self.gripperFingerLength/2
        graspTransMat = transformations.translation_matrix([0,0,distance])
        graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
        #print preGraspMat
        #print graspTransMat
        #print graspMat
        p = transformations.translation_from_matrix(graspMat)
       
        #Publish grasp transform for visualization
        self._tf_broadcaster.sendTransform(
                (p[0],p[1],p[2]),
                (o.x, o.y, o.x, o.w),
                o_constraint.header.stamp,
                "grasp",
                o_constraint.header.frame_id)
 
        pos_constraint = PositionConstraint()
        pos_constraint.header = o_constraint.header
        pos_constraint.link_name = self.toolLinkName
        pos_constraint.position = Point(p[0],p[1],p[2])
        pos_constraint.constraint_region_shape.type = Shape.SPHERE
        pos_constraint.constraint_region_shape.dimensions = [0.01]#[0.01, 0.01, 0.01]
        pos_constraint.weight = 1
        motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
        graspGoal.motion_plan_request = motion_plan_request
        return graspGoal
Exemplo n.º 12
0
 def convert_pose_inverse_transform(self, pose):
     """ This is a helper method to invert a transform (this is built into
         the tf C++ classes, but ommitted from Python) """
     transform = t.concatenate_matrices(
         t.translation_matrix(
             [pose.position.x, pose.position.y, pose.position.z]),
         t.quaternion_matrix([
             pose.orientation.x, pose.orientation.y, pose.orientation.z,
             pose.orientation.w
         ]))
     inverse_transform_matrix = t.inverse_matrix(transform)
     return (t.translation_from_matrix(inverse_transform_matrix),
             t.quaternion_from_matrix(inverse_transform_matrix))
Exemplo n.º 13
0
 def update_real_const_pose(self):
     if self.real_pose_mat is None:
         #rospy.logwarn("Real pose of '{}' is not detected!".format(self.referencePart))
         pass
     else:
         for const_name in self.assemConsts.keys():
             const = self.assemConsts[const_name]
             const_end_pose = const["endCoordinate"]
             real_const_end_pose = tf.concatenate_matrices(
                 self.real_pose_mat, const_end_pose)
             const["real_endCoordinate"] = real_const_end_pose
             const_quat = tf.quaternion_from_matrix(real_const_end_pose)
             const["real_zAxis"] = utils.get_transformed_zAxis(const_quat)
def odomVisualPublisher(v, th, time):
    global last_time, voX, voY, voT
    current_time = time

    dt = (current_time.to_sec() - last_time.to_sec())
    voX += v * math.cos(voT) * dt
    voY += v * math.sin(voT) * dt
    voT += th * dt

    msg = Odometry()
    msg.header.stamp = current_time
    msg.pose.pose.position = Point(voX, voY, 0.0)

    tx, ty, tz, tw = tf.transformations.quaternion_from_euler(0.0, 0.0, voT)
    msg.pose.pose.orientation = Quaternion(tx, ty, tz, tw)
    pub1.publish(msg)

    pos = msg.pose.pose
    inversed_transform = t.concatenate_matrices(
        t.translation_matrix((pos.position.x, pos.position.y, pos.position.z)),
        t.quaternion_matrix((pos.orientation.x, pos.orientation.y,
                             pos.orientation.z, pos.orientation.w)))
    inversed_transform = t.inverse_matrix(inversed_transform)

    (position1,
     orientation1) = tfListener.lookupTransform("/odom", "/base_footprint",
                                                rospy.Time(0))
    inversed_transform_1 = t.concatenate_matrices(
        t.translation_matrix(position1), t.quaternion_matrix(orientation1))
    inversed_transform_1 = t.inverse_matrix(inversed_transform_1)

    multiply = np.dot(inversed_transform, inversed_transform_1)
    tran = t.translation_from_matrix(multiply)
    rot = t.quaternion_from_matrix(multiply)

    antena.sendTransform(tran, rot, current_time, 'odom_visual',
                         'base_footprint')
    last_time = current_time
Exemplo n.º 15
0
def get_object_pose_as_matrix(object_name):
    global object_poses
    obj = object_poses[object_name]
    obj_trans = (obj.pose.pose.pose.position.x, obj.pose.pose.pose.position.y, obj.pose.pose.pose.position.z)
    obj_quat = (
        obj.pose.pose.pose.orientation.x,
        obj.pose.pose.pose.orientation.y,
        obj.pose.pose.pose.orientation.z,
        obj.pose.pose.pose.orientation.w,
    )
    obj_quat_mat = transformations.quaternion_matrix(obj_quat)
    obj_trans_mat = transformations.translation_matrix(obj_trans)
    obj_mat = transformations.concatenate_matrices(obj_trans_mat, obj_quat_mat)
    return obj_mat
Exemplo n.º 16
0
def speed_bump_y(point):

    # Publish a rotated plane using a numpy transform matrix
    radian = 0.523599 # 30도
    sin = 0.5
    cos = 0.866
    depth = 0.3
    width = 2.0

    R_x_1 = transformations.rotation_matrix(radian, (1,0,0)) # Rotate around y-axis by 0.3 radians
    T0_1 = transformations.translation_matrix((point.x, point.y-0.5*cos*depth,0.5*sin*depth))
    T_1 = transformations.concatenate_matrices(T0_1, R_x_1)
    markers.publishPlane(T_1, width, depth, 'yellow', 5.0) # pose, width, depth, color, lifetime

    R_x_2 = transformations.rotation_matrix(-1*radian, (1,0,0)) # Rotate around y-axis by 0.3 radians
    T0_2 = transformations.translation_matrix((point.x,point.y+ 0.5*cos*depth,0.5*sin*depth))
    T_2 = transformations.concatenate_matrices(T0_2, R_x_2)
    markers.publishPlane(T_2, width, depth, 'yellow', 5.0) # pose, width, depth, color, lifetime

    center = Point(point.x, point.y, sin*depth + 0.1)
    P = Pose(center,Quaternion(0,0,0,1))
    scale = Vector3(0.15,0.15,0.2)
    markers.publishText(P, 'Speed Bump', 'white', scale, 5.0) # pose, text, color, scale, lifetime
Exemplo n.º 17
0
    def get_twisted_zAxis(self, rot):
        initial_zAxis = np.array([0, 0, 1])
        if len(rot) == 3:
            rot_mat = tf.euler_matrix(rot[0], rot[0], rot[0])
        elif len(rot) == 4:
            rot_mat = tf.quaternion_matrix(rot)
        else:
            print("Given rot component's number is wrong!")
            return TypeError
        initial_tf = tf.translation_matrix(initial_zAxis)
        twisted_tf = tf.concatenate_matrices(rot_mat, initial_tf)
        twisted_tf[np.abs(twisted_tf)<1e-5] = 0

        return twisted_tf[:3, 3]
Exemplo n.º 18
0
    def odom_cb(self, msg):
        stamp = msg.header.stamp

        try:
            self._tfl.waitForTransform('base_footprint', 'odom', stamp, timeout=self._timeout)
            o2b_p, o2b_q = self._tfl.lookupTransform('odom', 'base_footprint', stamp)#'base_footprint', 'odom', stamp)
        except tf.Exception as e:
            rospy.loginfo('w2o tf error  : {}'.format(e))
            return

        o2b_T = tx.concatenate_matrices(
                tx.translation_matrix(o2b_p), tx.quaternion_matrix(o2b_q)
                )
        o2b_T_i = tx.inverse_matrix(o2b_T)

        w2b = msg.pose.pose
        w2b_T = pm.toMatrix(pm.fromMsg(w2b))

        w2o_T = tx.concatenate_matrices(w2b_T, o2b_T_i)
        
        txn, rxn = pm.toTf(pm.fromMatrix(w2o_T))

        self._tfb.sendTransform(txn, rxn, stamp, 'odom', 'world')
Exemplo n.º 19
0
    def calculate_bridge_and_base_pose(self, table_state, phi):
        # Calculate bridge pose from cue position and phi
        phi_radians = math.pi * phi / 180.0
        cue_x,    cue_y    = self.get_cue_ball_pos(table_state)
        bridge_x, bridge_y = self.get_bridge_pos(cue_x, cue_y, phi)
        (qx, qy, qz, qw)   = transformations.quaternion_about_axis(phi_radians, (0, 0, 1))

        bridge_pos         = transformations.translation_matrix([bridge_x, bridge_y, 0.0])
        bridge_orient      = transformations.quaternion_matrix([qx, qy, qz, qw])
        bridge_pose        = transformations.concatenate_matrices(bridge_pos, bridge_orient)
        bridge_pose_pos    = transformations.translation_from_matrix(bridge_pose)
        bridge_pose_orient = transformations.quaternion_from_matrix(bridge_pose)

        # Produce base pose via fixed transform from bridge pose
        bridge_to_base_trans = transformations.translation_matrix([Constants.BRIDGE_IN_BASE_X, Constants.BRIDGE_IN_BASE_Y, Constants.BRIDGE_IN_BASE_Z])
        bridge_to_base_rot   = transformations.quaternion_matrix([Constants.BRIDGE_IN_BASE_QX, Constants.BRIDGE_IN_BASE_QY, Constants.BRIDGE_IN_BASE_QZ, Constants.BRIDGE_IN_BASE_QW])
        bridge_to_base       = transformations.concatenate_matrices(bridge_to_base_trans, bridge_to_base_rot)
        base_to_bridge       = transformations.inverse_matrix(bridge_to_base)
        base_pose            = transformations.concatenate_matrices(bridge_pose, base_to_bridge)  # correct order?
        base_pose_pos        = transformations.translation_from_matrix(base_pose)
        base_pose_orient     = transformations.quaternion_from_matrix(base_pose)
        print "BASE_POSE", base_pose, "BRIDGE_POSE", bridge_pose 
        return ((bridge_pose_pos, bridge_pose_orient), (base_pose_pos, base_pose_orient))
Exemplo n.º 20
0
    def set_relative_pose_object(self,
                                 workspace,
                                 x_rel,
                                 y_rel,
                                 yaw_rel,
                                 yaw_center=None):
        """
        Updates the transform base_link -> object_base in local tfBuffer

        :param workspace: reference workspace object
        :param x_rel: object base x position relative to workspace
        :param y_rel: object base y position relative to workspace
        :param yaw_rel: object base rotation on z relative to workspace
        :param yaw_center: Avoid over rotation
        """
        position = np.dot(workspace.position_matrix,
                          np.array([x_rel, y_rel, 1]))
        camera_rotation = transformations.euler_matrix(0, 0, yaw_rel)
        # Here we correct the object orientation to be similar to base_link if
        # the object in on the ground. Not neccessarily needed to be honest...
        convention_rotation = np.array([[0, -1, 0, 0], [-1, 0, 0, 0],
                                        [0, 0, -1, 0], [0, 0, 0, 1]])

        object_rotation = transformations.concatenate_matrices(
            workspace.rotation_matrix, camera_rotation, convention_rotation)
        roll, pitch, yaw = transformations.euler_from_matrix(object_rotation)

        # Correcting yaw to avoid out of reach targets
        if yaw_center is not None:
            if yaw < yaw_center - np.pi / 2:
                yaw += np.pi
            elif yaw > yaw_center + np.pi / 2:
                yaw -= np.pi

        q = transformations.quaternion_from_euler(roll, pitch, yaw)

        t = TransformStamped()
        t.transform.translation.x = position[0]
        t.transform.translation.y = position[1]
        t.transform.translation.z = position[2]

        t.transform.rotation.x = q[0]
        t.transform.rotation.y = q[1]
        t.transform.rotation.z = q[2]
        t.transform.rotation.w = q[3]

        t.header.frame_id = "base_link"
        t.child_frame_id = "object_base"

        self.__tf_buffer.set_transform(t, "default_authority")
Exemplo n.º 21
0
def main():
    #quaternion = numpy.empty((4, ), dtype=numpy.float64)
    print qx
    quaternion = [qx, qy, qz, qw]
    euler = t.euler_from_quaternion(t.quaternion_inverse(quaternion))
    transform = t.concatenate_matrices(t.translation_matrix([-x, -y, -z]),
                                       t.quaternion_matrix(quaternion))
    inversed_transform = t.inverse_matrix(transform)
    print "Inversed tf: "
    print inversed_transform
    print "HOW TO GO ON with that????? Just get the simple transform which might be wrong"
    print "Trans (m): "
    print[-x, -y, -z]
    print "Rot (rad): "
    print euler
def updateCoilPoseFromTF():
    global transListener, leftCoilPose

    now = rospy.Time.now()
    # Change left coil position into world position
    try:
        transListener.waitForTransform('minefield', 'left_coil', now, rospy.Duration(3.0)) 
        (trans,rot) = transListener.lookupTransform('minefield', 'left_coil', now)
    except:
        return

    tr = transformations.concatenate_matrices(transformations.translation_matrix(trans), transformations.quaternion_matrix(rot))

    leftCoilPose = PoseStamped()
    leftCoilPose.pose = pose_msg_from_matrix(tr)
Exemplo n.º 23
0
 def effector_target_pose(self, target_pose, offset):
     T = tfs.concatenate_matrices(
         self.listener.fromTranslationRotation(
             (target_pose.pose.position.x, target_pose.pose.position.y,
              target_pose.pose.position.z),
             (target_pose.pose.orientation.x,
              target_pose.pose.orientation.y,
              target_pose.pose.orientation.z,
              target_pose.pose.orientation.w)),
         self.listener.fromTranslationRotation(
             offset, tfs.quaternion_from_euler(0, radians(90), 0)))
     pose = gmsg.PoseStamped()
     pose.header.frame_id = target_pose.header.frame_id
     pose.pose = gmsg.Pose(gmsg.Point(*tfs.translation_from_matrix(T)),
                           gmsg.Quaternion(*tfs.quaternion_from_matrix(T)))
     return pose
def updateRobotPose():
    global robotPose

    # This function does not get the true robot pose, but only the pose of 'base_link' in the TF
    # you should replace it by the robot pose resulting from a good localization process 

    robotPose = PoseStamped()
    now = rospy.Time.now()
    # Get left coil position in relation to robot
    try:
        transListener.waitForTransform('minefield', 'base_link', now, rospy.Duration(2.0))    
        (trans,rot) = transListener.lookupTransform('minefield', 'base_link', now)
    except:
        return

    tr2 = transformations.concatenate_matrices(transformations.translation_matrix(trans), transformations.quaternion_matrix(rot))
    robotPose.pose = pose_msg_from_matrix(tr2)
Exemplo n.º 25
0
    def send_reset_to_rovio(self):
        rospy.wait_for_service('rovio/reset_to_pose')

        try:
            rovio_reset_srv = rospy.ServiceProxy('rovio/reset_to_pose',
                                                 SrvResetToPose)

            # compute pose from local ENU (East-North-Up frame) to
            # IMU frame of the ViSensor (== C frame, according to MSF)
            T_Enu_C = tf.concatenate_matrices(self._T_Enu_I, self._T_I_C)
            q_Enu_C = tf.quaternion_from_matrix(T_Enu_C)

            # set new Sensor IMU position and orientation respect to World frame
            # (which is now aligned to local ENU)
            self._pose_world_imu_msg.position.x = T_Enu_C[0, 3]
            self._pose_world_imu_msg.position.y = T_Enu_C[1, 3]
            self._pose_world_imu_msg.position.z = T_Enu_C[2, 3]
            self._pose_world_imu_msg.orientation.w = q_Enu_C[3]
            self._pose_world_imu_msg.orientation.x = q_Enu_C[0]
            self._pose_world_imu_msg.orientation.y = q_Enu_C[1]
            self._pose_world_imu_msg.orientation.z = q_Enu_C[2]

            rovio_reset_srv(self._pose_world_imu_msg)

            if self._verbose:
                q_Enu_I = tf.quaternion_from_matrix(self._T_Enu_I)
                (yaw, pitch, roll) = tf.euler_from_quaternion(q_Enu_I, 'rzyx')
                rospy.loginfo(rospy.get_name() +
                              ": body frame of MAV assumed with " +
                              str(math.degrees(roll)) + " (deg) roll, " +
                              str(math.degrees(pitch)) + " (deg) pitch, " +
                              str(math.degrees(yaw)) +
                              " (deg) yaw from local ENU (local axis, ZYX)")

            self.create_rovio_init_info()

            success = True
            message = "Service call to reset Rovio internal state sent"

            return (success, message)

        except rospy.ServiceException, e:
            success = False
            message = "Service call to reset Rovio internal state failed: %s" % e
            return (success, message)
Exemplo n.º 26
0
def get_inverse_trans_rot(pose):
    # Car transform
    transT_car = (pose.pose.position.x, pose.pose.position.y,
                  pose.pose.position.z)
    rotT_car = (pose.pose.orientation.x, pose.pose.orientation.y,
                pose.pose.orientation.z, pose.pose.orientation.w)

    # World transform
    transform = t.concatenate_matrices(t.translation_matrix(transT_car),
                                       t.quaternion_matrix(rotT_car))
    inversed_transform = t.inverse_matrix(transform)
    translation = t.translation_from_matrix(inversed_transform)
    quaternion = t.quaternion_from_matrix(inversed_transform)
    # rospy.loginfo('transT_world = {}'.format(translation))
    # rospy.loginfo('rotT_world = {}'.format(quaternion))
    # transT = translation
    # rotT = quaternion
    return translation, quaternion
Exemplo n.º 27
0
def callback(msg):
	global distance_z,distance_y,marker_yaw,flag,count
	markerID = 'marker_'+str(msg.id)
	try:
		(trans,rot) = listener.lookupTransform( markerID, 'camera', rospy.Time(0))
		transform = t.concatenate_matrices(t.translation_matrix(trans), t.quaternion_matrix(rot))
		inv_tf = t.inverse_matrix(transform)

		distance_z = trans[2]
		distance_y = trans[1]
		marker_yaw = math.atan(inv_tf[0][2]/inv_tf[2][2])
		
		camera2base()
		if(distance_z<0.75):
			if(flag==1):
				if( msg.id <= 214 and msg.id>= 200):
					for i in database:
						if(i[0] == msg.id):
							substance = i[1]
							break
				else:
					substance = 'OBSTACLE'
				prev_site = (soi_visited[-1][1],soi_visited[-1][2])
				for obj in SOI:
					if(obj[0]==msg.id):
						serial_no = SOI.index(obj)
						current_site = (obj[1],obj[2])
						soi_visited.append(obj)
						break
				if(msg.id==219):
					msg.id = 189		
				print str(serial_no)+':'+str(prev_site)+':'+str(current_site)+':'+str(msg.id)+':'+substance
				count+=1
				if(count==5):
					print 'Mission Accomplished'		
				
				pub.publish()
				flag = 0
			localize(msg.id)

		
	except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
		pass	
Exemplo n.º 28
0
def read_aruco_transformation():

    with open(
            '/home/roya/catkin_ws/src/pam_manipulation_planning/config/aruco_camera.yaml'
    ) as f:
        aruco = yaml.safe_load(f)

    kinect_T_aruco_rot = quaternion_matrix([
        aruco["kinect_orientation_x"], aruco["kinect_orientation_y"],
        aruco["kinect_orientation_z"], aruco["kinect_orientation_w"]
    ])
    kinect_T_aruco_trans = translation_matrix([
        aruco["kinect_position_x"], aruco["kinect_position_y"],
        aruco["kinect_position_z"]
    ])
    transform = t.concatenate_matrices(kinect_T_aruco_trans,
                                       kinect_T_aruco_rot)

    return transform
def updateCoilPoseFromTF():
    global transListener, leftCoilPose

    now = rospy.Time.now()
    # Change left coil position into world position
    try:
        transListener.waitForTransform('minefield', 'left_coil', now,
                                       rospy.Duration(3.0))
        (trans, rot) = transListener.lookupTransform('minefield', 'left_coil',
                                                     now)
    except:
        return

    tr = transformations.concatenate_matrices(
        transformations.translation_matrix(trans),
        transformations.quaternion_matrix(rot))

    leftCoilPose = PoseStamped()
    leftCoilPose.pose = pose_msg_from_matrix(tr)
def updateRobotPose():
    global robotPose

    # This function does not get the true robot pose, but only the pose of 'base_link' in the TF
    # you should replace it by the robot pose resulting from a good localization process

    robotPose = PoseStamped()
    now = rospy.Time.now()
    # Get left coil position in relation to robot
    try:
        transListener.waitForTransform('minefield', 'base_link', now,
                                       rospy.Duration(2.0))
        (trans, rot) = transListener.lookupTransform('minefield', 'base_link',
                                                     now)
    except:
        return

    tr2 = transformations.concatenate_matrices(
        transformations.translation_matrix(trans),
        transformations.quaternion_matrix(rot))
    robotPose.pose = pose_msg_from_matrix(tr2)
def updateCoilPoseManually(referencePose):
    global transListener, leftCoilPose

    now = rospy.Time.now()
    # Get left coil pose in relation to robot
    try:
        transListener.waitForTransform('base_link', 'left_coil', now, rospy.Duration(2.0))    
        (trans,rot) = transListener.lookupTransform('base_link', 'left_coil', now)
    except:
        return

    localCoil_Mat = transformations.concatenate_matrices(transformations.translation_matrix(trans), transformations.quaternion_matrix(rot))

    # Use reference robot pose
    robot_Mat = matrix_from_pose_msg(referencePose)

    # Compute corrected coil pose
    corrected_Mat = np.dot(robot_Mat, localCoil_Mat)

    leftCoilPose = PoseStamped()
    leftCoilPose.pose = pose_msg_from_matrix(corrected_Mat)
Exemplo n.º 32
0
def callback(msg):
    global distance_z, distance_y, marker_yaw, localized, Marker_ID
    markerID = 'marker_' + str(msg.id)
    try:
        (trans, rot) = listener.lookupTransform(markerID, 'camera',
                                                rospy.Time(0))
        transform = t.concatenate_matrices(t.translation_matrix(trans),
                                           t.quaternion_matrix(rot))
        inv_tf = t.inverse_matrix(transform)

        distance_z = trans[2]
        distance_y = trans[1]
        marker_yaw = math.atan(inv_tf[0][2] / inv_tf[2][2])

        #convert readings to base_link
        camera2base()

        #If the detected marker is within 50 cm of the robot then it will do three things:
        # 1. Localize
        # 2. Tell the node goal_pt to cancel the goal because ArUco is visible from here.
        #    eyes.publish() sends an Empty ROS message to do this.
        # 3. Finally it will display the output.
        # The step 2 is carried out only if the ArUco was not previously detected and the ID has been added to SOI list.
        # This is done by the if statement checking the variable 'localized'. It is set to 1 in the localize() function
        # if ID has been successfully added to the SOI list.
        if (distance_z < 0.5):

            localize(msg.id)

            if (not visited.has_key(msg.id)) and localized:
                eyes.publish()
                localized = 0
                Marker_ID = msg.id
                print Marker_ID

            display_output(msg.id)

    except (tf.LookupException, tf.ConnectivityException,
            tf.ExtrapolationException):
        pass
Exemplo n.º 33
0
    def find_error_to_target(self):
        ## finding the difference in distance between target point and drone using transformations
        try:
            (trans_drone, rot_drone) = self.listener.lookupTransform(
                '/end_effector', '/camera_optical', rospy.Time(0))
            x, y = self.pixel_to_meter()
            print("x", x)
            print("y", y)
            trans_target = [x, y, self.trans[2]]
            rot_target = [0.11320321, 0, 0, 0.99357186]
            euler_target = euler_from_quaternion(rot_target)
            euler_drone = euler_from_quaternion(rot_drone)

            matrix_target = compose_matrix(None, None, euler_target,
                                           trans_target, None)
            matrix_drone = compose_matrix(None, None, euler_drone, trans_drone,
                                          None)

            matrix_result = concatenate_matrices(matrix_drone, matrix_target)

            trans_result = translation_from_matrix(matrix_result)
            rot_result = quaternion_from_matrix(matrix_result)

            roll_result, pitch_result, yaw_result = euler_from_quaternion(
                rot_result)

            #for the .csv file
            self.array_x.append(trans_result[0])
            self.array_y.append(trans_result[1])
            self.array_z.append(trans_result[2])

            self.img_x.append(self.trans[0] + C_MID[0])
            self.img_y.append(self.trans[1] + C_MID[1])
            return True, trans_result

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException) as e:
            print(e)
            return False, None
Exemplo n.º 34
0
    def attach_obj(self, move_obj, rel_tr, rel_quat, subassembly_name):
        self.attach_cm(move_obj.cm, move_obj.assem_mass, rel_tr, rel_quat)
        self.attach_consts(move_obj.assemConsts, rel_tr, rel_quat)
        self.attach_dict(move_obj.assemDict, rel_tr, rel_quat, move_obj.referencePart)
        self.assemXML = xmltodict.unparse(self.assemDict)
        self.assem_mass += move_obj.assem_mass

        rel_tf = utils.get_tf_matrix(rel_tr, rel_quat)
        for comp_name, comp in move_obj.components.items():
            comp_tr = comp["tr"]
            comp_quat = comp["quat"]
            comp_mass = comp["mass"]
            comp_tf = utils.get_tf_matrix(comp_tr, comp_quat)
            new_tf = tf.concatenate_matrices(rel_tf,comp_tf)
            new_tr = tf.translation_from_matrix(new_tf)
            new_quat = tf.quaternion_from_matrix(new_tf)
            self.components[comp_name] = {"tr": new_tr, "quat": new_quat, "mass": comp_mass}
        
        self.subassem_components[self.obj_name]["mass"] = self.assem_mass
        self.subassem_components[move_obj.assem_name] = {"tr": rel_tr, "quat": rel_quat, "mass": move_obj.assem_mass}

        self.assem_name = subassembly_name
Exemplo n.º 35
0
    def get_transform_wrt_odom_frame(self):
        try:
            (trans,
             rot) = self.transformer.lookupTransform("map", "odom",
                                                     rospy.Time(0.2))
        except:
            return None

        cur_pose = self.get_cur_ros_pose()

        transform = tftrans.concatenate_matrices(
            tftrans.translation_matrix(trans), tftrans.quaternion_matrix(rot))
        inversed_transform = tftrans.inverse_matrix(transform)

        inv_translation = tftrans.translation_from_matrix(inversed_transform)
        inv_quaternion = tftrans.quaternion_from_matrix(inversed_transform)

        transformStamped = geometry_msgs.msg.TransformStamped()
        transformStamped.transform.translation.x = inv_translation[0]
        transformStamped.transform.translation.y = inv_translation[1]
        transformStamped.transform.translation.z = inv_translation[2]
        transformStamped.transform.rotation.x = inv_quaternion[0]
        transformStamped.transform.rotation.y = inv_quaternion[1]
        transformStamped.transform.rotation.z = inv_quaternion[2]
        transformStamped.transform.rotation.w = inv_quaternion[3]

        cur_transform_wrt_odom = tf2_geometry_msgs.do_transform_pose(
            cur_pose, transformStamped)

        translation = cur_transform_wrt_odom.pose.position

        quaternion = (cur_transform_wrt_odom.pose.orientation.x,
                      cur_transform_wrt_odom.pose.orientation.y,
                      cur_transform_wrt_odom.pose.orientation.z,
                      cur_transform_wrt_odom.pose.orientation.w)

        _, _, yaw = tf.transformations.euler_from_quaternion(quaternion)

        return translation, yaw
Exemplo n.º 36
0
def sigmoid(p, vertices, gt_rebased):
    # Get the current parameter set and build the transform
    roll, pitch, yaw = p
    q = quaternion_from_rpy(roll, pitch, yaw)
    cur_delta = to_transform([0.0, 0.0, 0.0, 0.0, q[0], q[1], q[2], q[3]])

    # Compute the quadratic error for the current delta transformation
    err = 0.0
    for i in range(len(vertices)):
        # Compute the corrected ground truth
        tf_gt = to_transform(gt_rebased[i])
        tf_gt_t = tf.translation_from_matrix(tf_gt)
        tf_gt_t = np.array([tf_gt_t[0], tf_gt_t[1], tf_gt_t[2], 1.0])
        tf_gt_corrected = tf.concatenate_matrices(cur_delta, tf_gt_t)
        tf_gt_corr_vect = [
            0.0, tf_gt_corrected[0], tf_gt_corrected[1], tf_gt_corrected[2],
            0.0, 0.0, 0.0, 1.0
        ]

        # Compute the error
        err += np.power(calc_dist(vertices[i], tf_gt_corr_vect), 2)

    return np.sqrt(err)
Exemplo n.º 37
0
    def _callback(self, fiducial_transforms):
        header = fiducial_transforms.header
        child_frame_id = self._frame_id

        if self._invert:
            child_frame_id = header.frame_id
            header.frame_id = self._frame_id

        obj = filter(lambda a: a.fiducial_id == self._fiducial_id,
                     fiducial_transforms.transforms)

        if len(obj) > 0:
            obj = obj[0]

            if self._invert:
                trans = obj.transform.translation
                rot = obj.transform.rotation
                trans = [trans.x, trans.y, trans.z]
                rot = [rot.x, rot.y, rot.z, rot.w]
                m_trans = t.translation_matrix(trans)
                m_rot = t.quaternion_matrix(rot)
                transform = t.concatenate_matrices(m_trans, m_rot)
                inverse_transform = t.inverse_matrix(transform)
                trans = t.translation_from_matrix(inverse_transform)
                rot = t.quaternion_from_matrix(inverse_transform)
                obj.transform.translation = Vector3(x=trans[0],
                                                    y=trans[1],
                                                    z=trans[2])
                obj.transform.rotation = Quaternion(x=rot[0],
                                                    y=rot[1],
                                                    z=rot[2],
                                                    w=rot[3])

            self._tf_brd.sendTransformMessage(
                TransformStamped(header=header,
                                 child_frame_id=child_frame_id,
                                 transform=obj.transform))
Exemplo n.º 38
0
    def execute(self, userdata):
        target_pos_from_orig_pos_local_frame = tft.translation_matrix(
            (self.grid[userdata.search_count][0] * self.grid_scale_x,
             self.grid[userdata.search_count][1] * self.grid_scale_y, 0.0))
        grasp_yaw_rotation_mat = tft.euler_matrix(0, 0, self.grasping_yaw)
        uav_target_coords = tft.concatenate_matrices(
            userdata.orig_global_trans, grasp_yaw_rotation_mat,
            target_pos_from_orig_pos_local_frame)
        uav_target_pos = tft.translation_from_matrix(uav_target_coords)

        self.robot.goPosWaitConvergence('global',
                                        [uav_target_pos[0], uav_target_pos[1]],
                                        self.robot.getTargetZ(),
                                        self.robot.getTargetYaw(),
                                        pos_conv_thresh=0.2,
                                        yaw_conv_thresh=0.1,
                                        vel_conv_thresh=0.1,
                                        timeout=self.grid_search_timeout)

        userdata.search_count += 1
        if userdata.search_count == len(self.grid):
            userdata.search_count = 0
            userdata.search_failed = True

            orig_pos = tft.translation_from_matrix(userdata.orig_global_trans)
            rospy.logwarn(self.__class__.__name__ + ": return to original pos")
            self.robot.goPosWaitConvergence('global',
                                            [orig_pos[0], orig_pos[1]],
                                            self.robot.getTargetZ(),
                                            self.robot.getTargetYaw(),
                                            pos_conv_thresh=0.1,
                                            yaw_conv_thresh=0.1,
                                            vel_conv_thresh=0.1,
                                            timeout=7)

        return 'succeeded'
def matrix_from_trans_rot(trans, rot):
    trans_mat = transformations.translation_matrix(trans)
    rot_mat = transformations.quaternion_matrix(rot)
    return transformations.concatenate_matrices(trans_mat, rot_mat)
def matrix_from_pose_msg(pose):
    t = transformations.translation_matrix((pose.position.x, pose.position.y, pose.position.z))
    q = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
    r = transformations.quaternion_matrix(q)
    return transformations.concatenate_matrices(t, r)
    def __init__(self, agent_topic_names=['ridgeback2', 'ridgeback1']):
        print("")
        print("Initializiation starting.\n")

        rospy.init_node('transform_publisher', anonymous=True)

        self.n_agent_topics = len(agent_topic_names)
        self.awaiting_msg_agent = [True] * self.n_agent_topics
        self.data_agent = [None] * self.n_agent_topics

        for ii in range(self.n_agent_topics):
            rospy.Subscriber(
                "/vrpn_client_node/" + agent_topic_names[ii] + "/pose",
                PoseStamped, self.callback_agent_optitrack, ii)

        # Use ridgeback-time stamp for messages
        self.ridgeback_stamp = None
        self.awaiting_msg_imu = True
        rospy.Subscriber("/imu/data", Imu, self.callback_imu)  # Get timestamp

        self.tf_listener = tf.TransformListener()
        self.tf_broadcast = tf2.TransformBroadcaster()

        # START LOOP
        while ((self.awaiting_msg_imu or np.sum(self.awaiting_msg_agent))
               and not rospy.is_shutdown()):
            # import pdb; pdb.set_trace() # BREAKPOINT
            if self.awaiting_msg_imu:
                print("Waiting for imu-stamp message ...")
            if np.sum(self.awaiting_msg_agent):
                print("Waiting for robot-message ...")
            rospy.sleep(0.25)

        rate = rospy.Rate(100)  # Hz
        print("Entering main loop....")

        while not rospy.is_shutdown():
            # lock.acquire()
            use_ridgeback_stamp = True
            if use_ridgeback_stamp:
                time_stamp = self.ridgeback_stamp
            else:
                print(
                    "Warning: A time synchronization problem might occur due to clock asynchronization"
                )
                print("we advice using the recorded ridgeback time.")

            try:
                (self.trans_odom2base,
                 self.rot_odom2base) = self.tf_listener.lookupTransform(
                     '/base_link', '/odom', rospy.Time(0))
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                continue

            transform = trafo.concatenate_matrices(
                trafo.translation_matrix(self.trans_odom2base),
                trafo.quaternion_matrix(self.rot_odom2base))
            inversed_transform = trafo.inverse_matrix(transform)

            # TODO inverse in other sence to have optitrack as 'master'
            trafo_odom2world_child = TransformStamped()
            # trafo_odom2world_child.header.stamp = rospy.Time.now()
            trafo_odom2world_child.header.stamp = time_stamp
            trafo_odom2world_child.header.frame_id = "base_link_clone"
            trafo_odom2world_child.child_frame_id = "odom"

            trafo_odom2world_child.transform.translation.x = self.trans_odom2base[
                0]
            trafo_odom2world_child.transform.translation.y = self.trans_odom2base[
                1]
            trafo_odom2world_child.transform.translation.z = self.trans_odom2base[
                2]

            trafo_odom2world_child.transform.rotation.x = self.rot_odom2base[0]
            trafo_odom2world_child.transform.rotation.y = self.rot_odom2base[1]
            trafo_odom2world_child.transform.rotation.z = self.rot_odom2base[2]
            trafo_odom2world_child.transform.rotation.w = self.rot_odom2base[3]
            self.tf_broadcast.sendTransform(trafo_odom2world_child)

            # Optitrack Data - Smoothened / quaternion is calculated throuh multiple markers
            marker_positions = np.zeros((3, self.n_agent_topics))
            for ii in range(self.n_agent_topics):
                marker_positions[:, ii] = [
                    self.data_agent[ii].pose.position.x,
                    self.data_agent[ii].pose.position.y,
                    self.data_agent[ii].pose.position.z
                ]

            center_position = np.sum(marker_positions,
                                     axis=1) / self.n_agent_topics

            if self.n_agent_topics == 1:
                quat = [
                    self.data_agent[0].pose.orientation.w,
                    self.data_agent[0].pose.orientation.x,
                    self.data_agent[0].pose.orientation.y,
                    self.data_agent.pose.orientation.z
                ]
            elif self.n_agent_topics == 2:
                y_dir_ridgeback = marker_positions[:, 1] - marker_positions[:,
                                                                            0]
                y_dir_ridgeback[2] = 0  # remove z deviation
                # x_direction = np.cross(y_direction, [0, 0, 1])

                y_dir_init = np.array([0, 1, 0])
                quat = quaternion_from_2vectors(y_dir_init, y_dir_ridgeback)
            else:
                raise NotImplementedError()

            delta_dist = [0.395, 0.06, 0]
            delta_dist = quat_vec_multiplication(quat, delta_dist)
            trafo_optitrack2base_link_clone = TransformStamped()
            trafo_optitrack2base_link_clone.header.stamp = time_stamp  # Time delay of clocks - use ridgeback-message stamp to bridge this
            # trafo_optitrack2base_link_clone.header.stamp = rospy.Time.now()
            trafo_optitrack2base_link_clone.header.frame_id = "world_optitrack"
            trafo_optitrack2base_link_clone.child_frame_id = "base_link_clone"
            trafo_optitrack2base_link_clone.transform.translation.x = center_position[
                0] + delta_dist[0]
            trafo_optitrack2base_link_clone.transform.translation.y = center_position[
                1] + delta_dist[1]
            trafo_optitrack2base_link_clone.transform.translation.z = 0

            trafo_optitrack2base_link_clone.transform.rotation.w = quat[0]
            trafo_optitrack2base_link_clone.transform.rotation.x = quat[1]
            trafo_optitrack2base_link_clone.transform.rotation.y = quat[2]
            trafo_optitrack2base_link_clone.transform.rotation.z = quat[3]

            self.tf_broadcast.sendTransform(trafo_optitrack2base_link_clone)

            # LAB to optitrack  [ set values MANUALLY]
            trafo_lab2optitrack = TransformStamped()
            trafo_lab2optitrack.header.stamp = time_stamp
            # trafo_lab2optitrack.header.stamp = rospy.Time.now()
            trafo_lab2optitrack.header.frame_id = "world_lab"
            trafo_lab2optitrack.child_frame_id = "world_optitrack"
            trafo_lab2optitrack.transform.translation.x = 0.0
            trafo_lab2optitrack.transform.translation.y = 0.0
            trafo_lab2optitrack.transform.translation.z = 0.0

            # q = tf.transformations.quaternion_from_euler(0, 0, -8./180*pi)
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            trafo_lab2optitrack.transform.rotation.x = q[0]
            trafo_lab2optitrack.transform.rotation.y = q[1]
            trafo_lab2optitrack.transform.rotation.z = q[2]
            trafo_lab2optitrack.transform.rotation.w = q[3]
            self.tf_broadcast.sendTransform(trafo_lab2optitrack)

            # Zero TF
            # TODO: is this really needed?
            trafo_0 = TransformStamped()
            trafo_0.header.stamp = time_stamp
            trafo_0.header.frame_id = "world_lab"
            trafo_0.child_frame_id = "world"
            trafo_0.transform.rotation.w = 1
            self.tf_broadcast.sendTransform(trafo_0)

            # lock.release()
            # return True

            rate.sleep()
Exemplo n.º 42
0
 def poseToTransform(self, pose):
     # Convert the pose to a 4x4 homogeneous transform
     pos = (pose.position.x, pose.position.y, pose.position.z)
     rot = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)
     trans = tr.concatenate_matrices(tr.translation_matrix(pos), tr.quaternion_matrix(rot))
     return trans
Exemplo n.º 43
0
def create_surface_grasp(object_frame, support_surface_frame, handarm_params,
                         object_type):

    # Get the relevant parameters for hand object combination
    print(object_type)
    if (object_type in handarm_params['surface_grasp']):
        params = handarm_params['surface_grasp'][object_type]
    else:
        params = handarm_params['surface_grasp']['object']

    hand_transform = params['hand_transform']
    pregrasp_transform = params['pregrasp_transform']
    post_grasp_transform = params['post_grasp_transform']  # TODO: USE THIS!!!

    drop_off_config = params['drop_off_config']
    downward_force = params['downward_force']
    hand_closing_time = params['hand_closing_duration']
    hand_synergy = params['hand_closing_synergy']
    down_speed = params['down_speed']
    up_speed = params['up_speed']

    # Set the initial pose above the object
    goal_ = np.copy(object_frame)  #TODO: this should be support_surface_frame
    goal_[:3, 3] = tra.translation_from_matrix(object_frame)
    goal_ = goal_.dot(hand_transform)

    #the grasp frame is symmetrical - check which side is nicer to reach
    #this is a hacky first version for our WAM
    zflip_transform = tra.rotation_matrix(math.radians(180.0), [0, 0, 1])
    if goal_[0][0] < 0:
        goal_ = goal_.dot(zflip_transform)

    # hand pose above object
    pre_grasp_pose = goal_.dot(pregrasp_transform)

    # Set the directions to use TRIK controller with
    dirDown = tra.translation_matrix([0, 0, -down_speed])
    dirUp = tra.translation_matrix([0, 0, up_speed])

    # Set the frames to visualize with RViz
    rviz_frames = []
    #rviz_frames.append(object_frame)
    #rviz_frames.append(goal_)
    #rviz_frames.append(pre_grasp_pose)

    # assemble controller sequence
    control_sequence = []

    # # 1. Go above the object - Pregrasp
    # control_sequence.append(
    #     ha.InterpolatedHTransformControlMode(pre_grasp_pose, controller_name='GoAboveIFCO', goal_is_relative='0',
    #                                          name='Pre_preGrasp'))
    #
    # # 1b. Switch when hand reaches the goal pose
    # control_sequence.append(ha.FramePoseSwitch('Pre_preGrasp', 'Pregrasp', controller='GoAboveIFCO', epsilon='0.01'))

    #Joao test
    #control_sequence.append(
    #     ha.JointControlMode(np.zeros(7), goal_is_relative='0', completion_times=np.array([10, 10, 10, 10, 10, 10, 10]), controller_name='GoToDropJointConfig1', name='GoDropOff1'))
    #
    # # 7.b  Switch when joint is reached
    # #control_sequence.append(ha.TimeSwitch('GoDropOff1', 'Pregrasp', duration = hand_closing_time))
    #control_sequence.append(ha.JointConfigurationSwitch('GoDropOff1', 'Pregrasp', controller='GoToDropJointConfig1',epsilon=str(math.radians(17.))))
    #
    # control_sequence.append(
    #     ha.JointControlMode(np.array([1.1, 1.2, 0.3, 0.4, 0.5, 1.6, 0.7]), goal_is_relative='0', completion_times=np.array([10, 10, 10, 10, 10, 10, 10]),
    #                         controller_name='GoToDropJointConfig2', name='GoDropOff2'))
    #
    # control_sequence.append(ha.JointConfigurationSwitch('GoDropOff2', 'Pregrasp', controller='GoToDropJointConfig2',epsilon=str(math.radians(7.))))

    # 2. Go above the object - Pregrasp

    control_sequence.append(
        ha.InterpolatedHTransformControlMode(tra.translation_matrix(
            [0, 0, 1.0]),
                                             controller_name='GoStart',
                                             goal_is_relative='0',
                                             name='Initial'))
    control_sequence.append(
        ha.FramePoseSwitch('Initial',
                           'Pregrasp',
                           controller='GoStart',
                           epsilon='0.05'))
    print(tra.translation_matrix([0, 0, 1.0]))
    #rviz_frames.append(object_frame)

    pregrasp_frame = tra.concatenate_matrices(
        object_frame, tra.inverse_matrix(pregrasp_transform),
        tra.inverse_matrix(hand_transform))
    print(pregrasp_frame)
    import IPython
    #IPython.embed()
    rviz_frames.append(tra.concatenate_matrices(pregrasp_frame,
                                                hand_transform))
    #rviz_frames.append(pregrasp_frame)
    publish_rviz_markers(rviz_frames, 'world', handarm_params)

    control_sequence.append(
        ha.InterpolatedHTransformControlMode(pregrasp_frame,
                                             controller_name='GoAboveObject',
                                             goal_is_relative='0',
                                             name='Pregrasp'))
    #
    # # 2b. Switch when hand reaches the goal pose
    control_sequence.append(
        ha.FramePoseSwitch('Pregrasp',
                           'finished',
                           controller='GoAboveObject',
                           epsilon='0.01'))
    #
    # # 3. Go down onto the object (relative in world frame) - Godown
    # control_sequence.append(
    #     ha.InterpolatedHTransformControlMode(dirDown, controller_name='GoDown', goal_is_relative='1', name="GoDown",
    #                                          reference_frame="world"))
    #
    # force  = np.array([0, 0, 0.5*downward_force, 0, 0, 0])
    # # 3b. Switch when goal is reached
    # #JOAO CHANGED
    # #control_sequence.append(ha.ForceTorqueSwitch('GoDown', 'softhand_close',  goal = force,norm_weights = np.array([0, 0, 1, 0, 0, 0]), jump_criterion = "THRESH_UPPER_BOUND", goal_is_relative = '1', frame_id = 'world', port = '2'))
    # control_sequence.append(ha.FramePoseSwitch('Pregrasp', 'GoDown', controller = 'GoAboveObject', epsilon = '0.01'))
    #
    #
    # # 4. Maintain the position
    # desired_displacement = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0 ], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
    # force_gradient = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0 ], [0.0, 0.0, 1.0, 0.005], [0.0, 0.0, 0.0, 1.0]])
    # desired_force_dimension = np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
    #
    # if handarm_params['isForceControllerAvailable']:
    #     control_sequence.append(ha.HandControlMode_ForceHT(name  = 'softhand_close', synergy = hand_synergy,
    #                                                        desired_displacement = desired_displacement,
    #                                                        force_gradient = force_gradient,
    #                                                        desired_force_dimension = desired_force_dimension))
    # else:
    #     # if hand is not RBO then create general hand closing mode?
    #     control_sequence.append(ha.SimpleRBOHandControlMode(goal = np.array([1])))
    #
    #
    # # 4b. Switch when hand closing time ends
    # # joao changed
    # #control_sequence.append(ha.TimeSwitch('softhand_close', 'PostGraspRotate', duration = hand_closing_time))
    # control_sequence.append(ha.FramePoseSwitch('Pregrasp', 'GoDown', controller = 'GoAboveObject', epsilon = '0.01'))
    #
    #
    # # 5. Rotate hand after closing and before lifting it up
    # # relative to current hand pose
    # control_sequence.append(
    #     ha.HTransformControlMode(post_grasp_transform, controller_name='PostGraspRotate', name='PostGraspRotate', goal_is_relative='1', ))
    #
    # # 5b. Switch when hand rotated
    # control_sequence.append(ha.FramePoseSwitch('PostGraspRotate', 'GoUp', controller='PostGraspRotate', epsilon='0.01', goal_is_relative='1', reference_frame = 'EE'))
    #
    # # 6. Lift upwards
    # control_sequence.append(ha.InterpolatedHTransformControlMode(dirUp, controller_name = 'GoUpHTransform', name = 'GoUp', goal_is_relative='1', reference_frame="world"))
    #
    # # 6b. Switch when joint is reached
    # control_sequence.append(ha.FramePoseSwitch('GoUp', 'GoDropOff', controller = 'GoUpHTransform', epsilon = '0.01', goal_is_relative='1', reference_frame="world"))
    #
    # # 7. Go to dropOFF
    # control_sequence.append(ha.JointControlMode(drop_off_config, controller_name = 'GoToDropJointConfig', name = 'GoDropOff'))
    #
    # # 7.b  Switch when joint is reached
    # control_sequence.append(ha.JointConfigurationSwitch('GoDropOff', 'finished', controller = 'GoToDropJointConfig', epsilon = str(math.radians(7.))))

    # 8. Block joints to finish motion and hold object in air
    finishedMode = ha.ControlMode(name='finished')
    finishedSet = ha.ControlSet()
    finishedSet.add(
        ha.Controller(name='JointSpaceController',
                      type='InterpolatedJointController',
                      goal=np.ones(7) * 0.2,
                      goal_is_relative=1,
                      v_max='[0,0]',
                      a_max='[0,0]'))
    finishedMode.set(finishedSet)
    control_sequence.append(finishedMode)

    return cookbook.sequence_of_modes_and_switches_with_saftyOn(
        control_sequence), rviz_frames
Exemplo n.º 44
0
    def callback(self, msg):
        if not self.active:
            return
        #print msg
        tag_infos = []
        trans_x_buf = 0.0
        trans_y_buf = 0.0
        trans_z_buf = 0.0
        rot_z_buf = 0.0
        yaw_buf = 0.0

        # Load tag detections message
        for detection in msg.detections:
            tag_id = int(detection.id)
            self.checkProtocol(tag_id)

            # Define the transforms
            veh_t_camxout = tr.translation_matrix(
                (self.camera_x, self.camera_y, self.camera_z))
            veh_R_camxout = tr.euler_matrix(0, self.camera_theta * np.pi / 180,
                                            0, 'rxyz')
            veh_T_camxout = tr.concatenate_matrices(
                veh_t_camxout,
                veh_R_camxout)  # 4x4 Homogeneous Transform Matrix

            camxout_T_camzout = tr.euler_matrix(-np.pi / 2, 0, -np.pi / 2,
                                                'rzyx')
            veh_T_camzout = tr.concatenate_matrices(veh_T_camxout,
                                                    camxout_T_camzout)

            tagzout_T_tagxout = tr.euler_matrix(-np.pi / 2, 0, np.pi / 2,
                                                'rxyz')

            #Load translation
            trans = detection.pose.pose.position
            rot = detection.pose.pose.orientation

            camzout_t_tagzout = tr.translation_matrix(
                (trans.x * self.scale_x, trans.y * self.scale_y,
                 trans.z * self.scale_z))
            camzout_R_tagzout = tr.quaternion_matrix(
                (rot.x, rot.y, rot.z, rot.w))
            camzout_T_tagzout = tr.concatenate_matrices(
                camzout_t_tagzout, camzout_R_tagzout)

            veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout,
                                                    camzout_T_tagzout,
                                                    tagzout_T_tagxout)

            # Overwrite transformed value
            (trans.x, trans.y,
             trans.z) = tr.translation_from_matrix(veh_T_tagxout)
            (rot.x, rot.y, rot.z,
             rot.w) = tr.quaternion_from_matrix(veh_T_tagxout)

            frame_name = '/tag' + str(tag_id)
            try:
                (trans_tf, rot_tf) = self.listener.lookupTransform(
                    '/ducky1', frame_name, rospy.Time(0))
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                continue
            rot_tf_rpy = tf.transformations.euler_from_quaternion(rot_tf)
            yaw = rot_tf_rpy[2] / np.pi
            # Check phase
            if tag_id == 3:
                # Back of the objet, phase confusion
                if rot.z < 0.0:
                    # turn around the coordinate frame to avoid confusion
                    yaw = -yaw

            # Publish tag id
            tag_id_msg = Int8()
            tag_id_msg.data = tag_id
            self.pub_tagId.publish(tag_id_msg)

            # Publish ack
            ack_msg = BoolStamped()
            if trans.x < 0.7:
                ack_msg.data = True
            else:
                ack_msg.data = False
            self.pub_ack.publish(ack_msg)

            #print detection
            print detection  #trans.x, trans.y, trans.z
            #self.checkProtocol(tag_id)
            #print rot.z - yaw
            #print tag_id, trans, rot
            rot_z_buf += rot.z
            yaw_buf += yaw
        if len(msg.detections) > 0:
            # Broadcast tranform
            self.broadcaster.sendTransform(
                (trans.x, trans.y, trans.z),
                tf.transformations.quaternion_from_euler(
                    0, 0,
                    np.pi *
                    (rot_z_buf - yaw_buf) / float(len(msg.detections))),
                rospy.Time.now(), "ducky1", "camera")
Exemplo n.º 45
0
    width = 0.02
    markers.publishPath(path, 'orange', width, 5.0) # path, color, width, lifetime


    # Plane / Rectangle:

    # Publish a rectangle between two points (thin, planar surface)
    # If the z-values are different, this will produce a cuboid
    point1 = Point(-1,0,0)
    point2 = Point(-2,-1,0) 
    markers.publishRectangle(point1, point2, 'blue', 5.0)

    # Publish a rotated plane using a numpy transform matrix
    R_y = transformations.rotation_matrix(0.3, (0,1,0)) # Rotate around y-axis by 0.3 radians
    T0 = transformations.translation_matrix((-3,-1.5,0))
    T = transformations.concatenate_matrices(T0, R_y)
    depth = 1.1
    width = 1.5
    markers.publishPlane(T, depth, width, 'purple', 5.0) # pose, depth, width, color, lifetime

    # Publish a plane using a ROS Pose Msg
    P = Pose(Point(-3,0,0),Quaternion(0,0,0,1))
    depth = 1.3
    width = 1.3
    markers.publishPlane(P, depth, width, 'brown', 5.0) # pose, depth, width, color, lifetime


    # Polygon:

    # Publish a polygon using a ROS Polygon Msg
    polygon = Polygon()
Exemplo n.º 46
0
 def space_coordinates(self, joints):
     T = tfs.identity_matrix()
     for idx in range(0, self.dof):
         T = tfs.concatenate_matrices(T, self.transformations[idx](joints[idx]))
     return self.space_from_matrix(T)
    def callback(self, msg):

        tag_infos = []

        # Load tag detections message
        for detection in msg.detections:

            # ------ start tag info processing

            new_info = TagInfo()
            new_info.id = int(detection.id)
            id_info = self.tags_dict[new_info.id]
            
            # Check yaml file to fill in ID-specific information
            new_info.tag_type = self.sign_types[id_info['tag_type']]
            if new_info.tag_type == self.info.S_NAME:
                new_info.street_name = id_info['street_name']
            elif new_info.tag_type == self.info.SIGN:
                new_info.traffic_sign_type = self.traffic_sign_types[id_info['traffic_sign_type']]
            elif new_info.tag_type == self.info.VEHICLE:
                new_info.vehicle_name = id_info['vehicle_name']
            
            # TODO: Implement location more than just a float like it is now.
            # location is now 0.0 if no location is set which is probably not that smart
            if self.loc == 226:
                l = (id_info['location_226'])
                if l is not None:
                    new_info.location = l
            elif self.loc == 316:
                l = (id_info['location_316'])
                if l is not None:
                    new_info.location = l
            
            tag_infos.append(new_info)
            # --- end tag info processing


            # Define the transforms
            veh_t_camxout = tr.translation_matrix((self.camera_x, self.camera_y, self.camera_z))
            veh_R_camxout = tr.euler_matrix(0, self.camera_theta*np.pi/180, 0, 'rxyz')
            veh_T_camxout = tr.concatenate_matrices(veh_t_camxout, veh_R_camxout)   # 4x4 Homogeneous Transform Matrix

            camxout_T_camzout = tr.euler_matrix(-np.pi/2,0,-np.pi/2,'rzyx')
            veh_T_camzout = tr.concatenate_matrices(veh_T_camxout, camxout_T_camzout)

            tagzout_T_tagxout = tr.euler_matrix(-np.pi/2, 0, np.pi/2, 'rxyz')

            #Load translation
            trans = detection.pose.pose.position
            rot = detection.pose.pose.orientation

            camzout_t_tagzout = tr.translation_matrix((trans.x*self.scale_x, trans.y*self.scale_y, trans.z*self.scale_z))
            camzout_R_tagzout = tr.quaternion_matrix((rot.x, rot.y, rot.z, rot.w))
            camzout_T_tagzout = tr.concatenate_matrices(camzout_t_tagzout, camzout_R_tagzout)

            veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout, camzout_T_tagzout, tagzout_T_tagxout)

            # Overwrite transformed value
            (trans.x, trans.y, trans.z) = tr.translation_from_matrix(veh_T_tagxout)
            (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_matrix(veh_T_tagxout)

            detection.pose.pose.position = trans
            detection.pose.pose.orientation = rot

        new_tag_data = AprilTagsWithInfos()
        new_tag_data.detections = msg.detections
        new_tag_data.infos = tag_infos
        # Publish Message
        self.pub_postPros.publish(new_tag_data)
Exemplo n.º 48
0
    objects_path = objects_path[0]

# Load the kitchen environment
kitchen_file = os.path.join(objects_path, 'pr_kitchen.env.xml')
env.Load(kitchen_file)

# Remove the wall kinbody
env.Remove(env.GetKinBody('walls'))

# Move the robot near the fridge
Rot_z = transformations.rotation_matrix(numpy.pi/4.0, (0,0,1))
trans = numpy.array([[1., 0., 0., 0.2],
                     [0., 1., 0., 0.5],
                     [0., 0., 1., 0.0],
                     [0., 0., 0., 1.]])
robot_pose = transformations.concatenate_matrices(trans, Rot_z)
robot.SetTransform(robot_pose)

# Get transform of the lower handle link,
# this is actually at the origin of the fridge
fridge = env.GetRobot('refrigerator')
fridge_handle = fridge.GetLink('lower_handle')
T_lower_handle = fridge_handle.GetTransform()

# Translate the lower handle axis to be at the actual handle
height = 0.925
forward = 0.427
sideways = -0.308
T_handle_offset = numpy.eye(4)
T_handle_offset[0:3,3] = numpy.array([forward, sideways, height])
T_handle = transformations.concatenate_matrices(T_lower_handle, T_handle_offset)
Exemplo n.º 49
0
def transform_from_quat(pos, quat):
    return tfms.concatenate_matrices(tfms.translation_matrix(pos),
                                     tfms.quaternion_matrix(quat))
Exemplo n.º 50
0
 def _makePreGrasp(self, box, objectName):
     '''Given a bounding box, identify an 
     approach vector, and designate a pregrasp position along that vector. Returns a MoveArmGoal
     message to move to the preGrasp position'''
     
     useX = not(box.box_dims.x > self.gripperOpenWidth or box.box_dims.x < self.gripperClosedWidth)
     useY = not(box.box_dims.y > self.gripperOpenWidth or box.box_dims.y < self.gripperClosedWidth)
      
     #Do all geometry in robot's base_link frame
     #Turn box pose into a tf for visualization
     self._tf_broadcaster.sendTransform(
             (box.pose.pose.position.x,box.pose.pose.position.y,box.pose.pose.position.z),
             (box.pose.pose.orientation.x,box.pose.pose.orientation.y,box.pose.pose.orientation.z,box.pose.pose.orientation.w),
             box.pose.header.stamp,
             objectName,
             box.pose.header.frame_id)
     #Turn box pose into a tf matrix in the proper frame
     boxPose = self._tf_listener.transformPose(self.frameID, box.pose)
     boxMat = transformations.quaternion_matrix([boxPose.pose.orientation.x,boxPose.pose.orientation.y,boxPose.pose.orientation.z,boxPose.pose.orientation.w])
     boxMat[:3, 3] = [boxPose.pose.position.x,boxPose.pose.position.y,boxPose.pose.position.z+.01] #Lift 1 cm above box CoM
     #Approach Vector is in the principal plane of the box most closely aligned with robot XZ. This plane will be tool YZ
     #Vector is at a downward 30 degree angle
     #Orientation of box should be (approximately) a pure z rotation from the robot base_link
     #Therefore the angle of rotation about the z axis is appoximately
     # 2 * acos(q_w)
     boxPose_base = self._tf_listener.transformPose('/base_link', boxPose)
     theta = 2 * math.acos(boxPose_base.pose.orientation.w)
     rospy.logdebug('Angle is %f',theta)
     #Determine what to rotate the pose quaternion by to get the vector quaternion
     #Start with a 30 degree downward rotation
     if useX and not useY:
         rospy.loginfo("Can only grab box along x axis")
         width = box.box_dims.x
         if theta >= pi/2 and theta <= 3*pi/2:
         	rotationMatrix = transformations.euler_matrix(0, 5*pi/6, 0, 'rzyz')
         else:
             rotationMatrix = transformations.euler_matrix(pi, 5*pi/6, 0, 'rzyz')
     elif useY and not useX:
         rospy.loginfo("Can only grab box along y axis")
         width = box.box_dims.y
         if theta >= pi/4 and theta <= 3*pi/4:
             rospy.loginfo("Grabbing box along -y axis")
             rotationMatrix = transformations.euler_matrix(pi/2, 7*pi/6, -pi/2, 'rzyz')
         else:
             rospy.loginfo("Grabbing box along y axis")
             rotationMatrix = transformations.euler_matrix(pi/2, self._makePreGraspPose(boxMat, 1), -pi/2, 'rzyz')
     else:
         #Can use either face for pickup, so pick the one best aligned to the robot
         #TODO only -x and -y are currently correct
         rospy.loginfo("Theta is %f",theta)
         if theta >= pi/4 and theta <= 3*pi/4:
             rospy.loginfo("Grabbing box along -y axis")
             width = box.box_dims.x
             rotationMatrix = transformations.euler_matrix(pi/2, 7*pi/6, -pi/2, 'rzyz')
         elif theta >= 3*pi/4 and theta <= 5*pi/4:
             rospy.loginfo("Grabbing box along -x axis")
             width = box.box_dims.y
             rotationMatrix = transformations.euler_matrix(pi, 5*pi/6, pi/2, 'rzyz')
         elif theta >= 5*pi/4 and theta <= 7*pi/4:
             rospy.loginfo("Grabbing box along y axis")
             width = box.box_dims.x
             rotationMatrix = transformations.euler_matrix(pi/2, self._makePreGraspPose(boxMat, 1), -pi/2, 'rzyz')
         else:
             rospy.loginfo("Grabbing box along x axis")
             width = box.box_dims.y
             rotationMatrix = transformations.euler_matrix(0, 5*pi/6, pi/2, 'rzyz')
     #Rotated TF for visualization
     self._tf_broadcaster.sendTransform(
             (0,0,0), 
             transformations.quaternion_from_matrix(rotationMatrix),
             boxPose.header.stamp,
             objectName+'_rotated',
             objectName)
     #Pregrasp TF is rotated box TF translated back along the z axis
     distance = self.preGraspDistance + self.gripperFingerLength
     preGraspMat = transformations.translation_matrix([0,0,-distance])
     fullMat = transformations.concatenate_matrices(boxMat, rotationMatrix, preGraspMat)
     p = transformations.translation_from_matrix(fullMat)
     q = transformations.quaternion_from_matrix(fullMat)
     self._tf_broadcaster.sendTransform(
             p,
             q,
             boxPose.header.stamp,
             objectName+'_pregrasp',
             self.frameID)
     
     #Create Orientation constraint object
     o_constraint = OrientationConstraint()
     o_constraint.header.frame_id = self.frameID
     o_constraint.header.stamp = rospy.Time.now()
     o_constraint.link_name = self.toolLinkName
     o_constraint.orientation = Quaternion(q[0],q[1],q[2],q[3])
     o_constraint.absolute_roll_tolerance = 0.04
     o_constraint.absolute_pitch_tolerance = 0.04
     o_constraint.absolute_yaw_tolerance = 0.04
     
     #Determine position and tolerance from vector and box size
     pos_constraint = PositionConstraint()
     pos_constraint.header = o_constraint.header
     pos_constraint.link_name = self.toolLinkName
     pos_constraint.position = Point(p[0],p[1],p[2])
     #Position tolerance (in final tool frame) is:
     #  x = gripper open width - box x width
     #  y = object height * precision multiplier (<=1)
     #  z = 1 cm
     #Note that this is probably in the wrong frame, negating the advantage of all of the thinking in the preceding 4 lines
     #TODO: If you're feeling ambitious, turn this into a mesh and make it the right shape
     pos_constraint.constraint_region_shape.type = Shape.SPHERE
     pos_constraint.constraint_region_shape.dimensions = [0.01]
     #    self.gripperOpenWidth - width, 
     #    box.box_dims.z*0.2,
     #    0.01]
     
     preGraspGoal = MoveArmGoal()
     preGraspGoal.planner_service_name = self.plannerServiceName
     motion_plan_request = MotionPlanRequest()
     motion_plan_request.group_name = self.armGroupName
     motion_plan_request.num_planning_attempts = 10
     motion_plan_request.planner_id = ""
     motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
     pos_constraint.weight = 1
     motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
     o_constraint.weight = 1
     motion_plan_request.goal_constraints.orientation_constraints.append(o_constraint)
     preGraspGoal.motion_plan_request = motion_plan_request
     
     #Turn off collision operations between the gripper and all objects
     for collisionName in self.gripperCollisionNames:
         collisionOperation = CollisionOperation(collisionName, 
                                 CollisionOperation.COLLISION_SET_ALL,
                                 0.0,
                                 CollisionOperation.DISABLE)
         preGraspGoal.operations.collision_operations.append(collisionOperation)
     return preGraspGoal
def generate_grasps(box,
                    num_grasps=4,
                    desired_approach_distance = 0.10,
                    min_approach_distance = 0.05,
                    effort = 50):
    """
    Generates grasps 

    Parameters:
    box: bounding box to genereate grasps for
    num_grasps: number of grasps to generate, spaced equally around the box
    desired_approach_distance: how far the pre-grasp should ideally be away from the grasp
    min_approach_distance: how much distance between pre-grasp and grasp must actually be feasible for the grasp not to be rejected

    Return: a list of grasps
    """

    rospy.loginfo("Generating grasps")
    grasps = []
    for i in range(num_grasps):
        g = Grasp()

        # align z-axis rotation to box
        euler = transformations.euler_from_quaternion([
            box.pose.pose.orientation.x,
            box.pose.pose.orientation.y,
            box.pose.pose.orientation.z,
            box.pose.pose.orientation.w])
        rot_ang = euler[2]

        # apply transformations based on rotations
        rot_ang += i * (2 * math.pi) / num_grasps
        t1 = transformations.translation_matrix([
            box.pose.pose.position.x,
            box.pose.pose.position.y,
            box.pose.pose.position.z + \
                box.box_dims.z * 0.05])
        r1 = transformations.rotation_matrix(rot_ang, [0, 0, 1])
        box_width = max(box.box_dims.x, box.box_dims.y)
        t2 = transformations.translation_matrix([-(box_width/2 + 0.12), 0, 0])
        mat = transformations.concatenate_matrices(t1, r1, t2)

        translation = transformations.translation_from_matrix(mat)
        g.grasp_pose.position.x = translation[0]
        g.grasp_pose.position.y = translation[1]
        g.grasp_pose.position.z = translation[2]

        q = transformations.quaternion_from_matrix(mat)
        g.grasp_pose.orientation.x = q[0]
        g.grasp_pose.orientation.y = q[1]
        g.grasp_pose.orientation.z = q[2]
        g.grasp_pose.orientation.w = q[3]

        g.desired_approach_distance = desired_approach_distance
        g.min_approach_distance = min_approach_distance

        pre_grasp_posture = JointState()
        pre_grasp_posture.header.stamp = rospy.Time.now()
        pre_grasp_posture.header.frame_id = 'base_link'
        pre_grasp_posture.name = ['r_wrist_flex_link']
        pre_grasp_posture.position = [0.8]
        pre_grasp_posture.effort = [effort]
        g.pre_grasp_posture = pre_grasp_posture

        grasp_posture = JointState()
        grasp_posture.header.stamp = rospy.Time.now()
        grasp_posture.header.frame_id = 'base_link'
        grasp_posture.name = ['r_wrist_flex_link']
        grasp_posture.position = [0.45]
        grasp_posture.effort = [effort]
        g.grasp_posture = grasp_posture

        grasps.append(g)
    np.random.shuffle(grasps)
    return grasps
Exemplo n.º 52
0
def transformBack(tf_xyzquat, pose):
    T_mat = tfm.concatenate_matrices( tfm.translation_matrix(tf_xyzquat[0:3]), tfm.quaternion_matrix(tf_xyzquat[3:7]))
    pose_mat = tfm.concatenate_matrices( tfm.translation_matrix(pose[0:3]),  tfm.quaternion_matrix(pose[3:7]) )
    new_pose_mat = np.dot(pose_mat, tfm.inverse_matrix(T_mat))
    return tfm.translation_from_matrix(new_pose_mat).tolist() + tfm.quaternion_from_matrix(new_pose_mat).tolist()
Exemplo n.º 53
0
def PathCalculations(bag):
    # here we get the tranformation between map and world reference and robot poses according to world coordinates
    tf_static = bag.read_messages(topics=['/tf_static', 'tf2_msgs/TFMessage'])
    vrpn_robot = bag.read_messages(
        topics=['/vrpn_client_node/husky/pose', 'geometry_msgs/PoseStamped'])

    for topic, msg, t in tf_static:
        # we invert the matrix to get world to map
        rot_w2m = msg.transforms[0].transform.rotation
        tras_w2m = msg.transforms[0].transform.translation

    m2w = tff.concatenate_matrices(
        tff.translation_matrix([tras_w2m.x, tras_w2m.y, tras_w2m.z]),
        tff.quaternion_matrix([rot_w2m.x, rot_w2m.y, rot_w2m.z, rot_w2m.w]))
    w2m = tff.inverse_matrix(m2w)

    world2map = TransformStamped()
    world2map.transform.rotation = rot_w2m
    world2map.transform.rotation.w = -1 * rot_w2m.w  # inverse rotation matrix just using quaternions
    world2map.transform.translation.x = w2m[0][3]
    world2map.transform.translation.y = w2m[1][3]
    world2map.transform.translation.z = w2m[2][3]

    t_old = rospy.Time(0)
    init = True
    dist = 0
    deltax = 0
    dfx_old = 0
    dfx = 0
    deltay = 0
    dfy_old = 0
    dfy = 0
    bte = 0

    global obs_dist_path
    global all_k
    print "start processing like crazy"

    for topic, msg, t in vrpn_robot:
        #sys.stdout.flush()
        robot_w = msg
        robot_m = tf2_geometry_msgs.do_transform_pose(
            robot_w,
            world2map)  #transform robot pose from world ref to map ref
        robot_px_m = robot_m.pose.position.x / map_scale  # robot pose in map image coordinates
        robot_py_m = robot_m.pose.position.y / map_scale

        robot_px_m_dist = int(robot_px_m + map_w / 2)
        robot_py_m_dist = int(robot_py_m + map_h / 2)

        robot_px_mc = (
            robot_px_m + dx / 2 - cx
        )  #*map_scale # robot pose in map cropped image coordinates [meters]
        robot_py_mc = (robot_py_m + dy / 2 - cy)  #*map_scale

        dt = abs((t.secs + t.nsecs / 10**9) -
                 (t_old.secs + t_old.nsecs / 10**9))

        if t > rospy.Time(t_stop):
            break

        if t < rospy.Time(t_start):
            pass
        else:
            # calculating total traveled distance and curvature
            if init:
                init = False
            else:
                #deltax = robot_m.pose.position.x - x_old
                deltax = robot_px_mc - x_old
                #deltay = robot_m.pose.position.y - y_old
                deltay = robot_py_mc - y_old
                dist = dist + math.sqrt(
                    deltax**2 + deltay**2)  # distance traveled

                if dt > 0.0:
                    dfx = deltax / dt  # first derivate for the curvature
                    dfy = deltay / dt
                    ddfx = (dfx - dfx_old) / dt  # second derivate
                    ddfy = (dfy - dfy_old) / dt
                    if abs(dfx) > 0.0 and abs(dfy) > 0.0:
                        k_num = abs(
                            dfx * ddfy -
                            dfy * ddfx)  # wikipedia s formula for curvature
                        k_den = (dfx**2 + dfy**2)**(1.5)
                        k = k_num / k_den

                    # print k_num , k_den
                    else:
                        k = 0
                    all_k = np.vstack([all_k, [k, t]])
                    bte = bte + k**2
            print robot_px_mc, robot_py_mc, t
        x_old = robot_px_mc
        y_old = robot_py_mc
        dfx_old = dfx
        dfy_old = dfy
        t_old = t

        # calculating minimum distance from obstacles with a sampling time
        '''
        if dt >= sample_time:
            obs_dist = calcMinDistFromObstacles(objectmap,robot_px_m_dist,robot_py_m_dist)
            obs_dist_path = np.vstack([obs_dist_path,[obs_dist,t]])
            t_old = t
        '''

    #min_obs_dist = np.amin(obs_dist_path[1:,0])
    #min_obs_dist_index = np.where(obs_dist_path[1:,0] == np.amin(obs_dist_path[1:,0]))
    #index_min = min_obs_dist_index + np.ones(len(min_obs_dist_index) ,np.uint8)
    #time_obs_dist = obs_dist_path[index_min,1]
    #print "Absolute minimum distance ", min_obs_dist , " at " , time_obs_dist
    print "Total distance traveled ", dist
    print "Total bending energy ", bte
    max_k = np.amax([all_k[:, 0]])
    print "Max curvature ", max_k

    with open('Estimation.txt', 'w') as file:
        file.write(
            '1 - obs_dist_path\n2 - dist\n3 - all_k\n with a sample time of ')
        file.write(str(sample_time))
        file.write('\n\n\n')
        #file.write(str(obs_dist_path[:,:]))
        #file.write('\n \n \n')
        file.write(str(dist))
        file.write('\n \n \n')
        file.write(str(all_k[:, :]))

    print "All finished. Check ./Estimation.txt file for details"
Exemplo n.º 54
0
# Add a table to the environment
table_file = os.path.join(objects_path, 'table.kinbody.xml')
table = env.ReadKinBodyXMLFile(table_file)
if table == None:
    print('Failed to load table kinbody')
    sys.exit()
env.AddKinBody(table)
# Rotate so table surface is horizontal
Rot_x = transformations.rotation_matrix(numpy.pi/2.0, (1,0,0))
Rot_z = transformations.rotation_matrix(numpy.pi/2.0, (0,0,1))
trans = numpy.array([[1., 0., 0., 0.0], # along base frame y-axis
                     [0., 1., 0., 0.0], # vertical in base frame
                     [0., 0., 1., 0.8], # along base frame x-axis
                     [0., 0., 0., 1.]])
table_pose = transformations.concatenate_matrices(Rot_z, Rot_x, trans)
table.SetTransform(table_pose)

# Set the Segway base to be static, so it doesn't fall over by gravity
robot.GetLink('segway').SetStatic(True)

with env:
    # Initialize the physics engine
    physics = openravepy.RaveCreatePhysicsEngine(env,'ode')
    env.SetPhysicsEngine(physics)
    physics.SetGravity(numpy.array((0,0,-9.81)))

    # Enable physics simulator
    env.StopSimulation()
    env.StartSimulation(timestep=0.001)
    def callback(self, msg):
        detect_msg = BoolStamped()
        detect_msg.data = True
        self.pub_detected.publish(detect_msg)
        tag_infos = []

        # Load tag detections message
        for detection in msg.detections:

            # ------ start tag info processing

            new_info = TagInfo()
            new_info.id = int(detection.id)
            id_info = self.tags_dict[new_info.id]
            if new_info.id == 1:
                self.turnright = True
                self.turnleft = False
                self.turnaround = False
                #print "---------Tag 1---------Turn Right-----------"
            elif new_info.id == 2:
                self.turnright = False
                self.turnleft = True
                self.turnaround = False
                #print "---------Tag 2---------Turn LEFT-----------"
            elif new_info.id == 3:
                self.turnright = False
                self.turnleft = False
                self.turnaround = True
                print "---------Tag 3---------Turn around-----------"
            # Check yaml file to fill in ID-specific information
            new_info.tag_type = self.sign_types[id_info['tag_type']]
            if new_info.tag_type == self.info.S_NAME:
                new_info.street_name = id_info['street_name']
            elif new_info.tag_type == self.info.SIGN:
                new_info.traffic_sign_type = self.traffic_sign_types[
                    id_info['traffic_sign_type']]
            elif new_info.tag_type == self.info.VEHICLE:
                new_info.vehicle_name = id_info['vehicle_name']

            # TODO: Implement location more than just a float like it is now.
            # location is now 0.0 if no location is set which is probably not that smart
            if self.loc == 226:
                l = (id_info['location_226'])
                if l is not None:
                    new_info.location = l
            elif self.loc == 316:
                l = (id_info['location_316'])
                if l is not None:
                    new_info.location = l

            tag_infos.append(new_info)
            # --- end tag info processing

            # Define the transforms
            veh_t_camxout = tr.translation_matrix(
                (self.camera_x, self.camera_y, self.camera_z))
            veh_R_camxout = tr.euler_matrix(0, self.camera_theta * np.pi / 180,
                                            0, 'rxyz')
            veh_T_camxout = tr.concatenate_matrices(
                veh_t_camxout,
                veh_R_camxout)  # 4x4 Homogeneous Transform Matrix

            camxout_T_camzout = tr.euler_matrix(-np.pi / 2, 0, -np.pi / 2,
                                                'rzyx')
            veh_T_camzout = tr.concatenate_matrices(veh_T_camxout,
                                                    camxout_T_camzout)

            tagzout_T_tagxout = tr.euler_matrix(-np.pi / 2, 0, np.pi / 2,
                                                'rxyz')

            #Load translation
            trans = detection.pose.pose.position
            rot = detection.pose.pose.orientation

            camzout_t_tagzout = tr.translation_matrix(
                (trans.x * self.scale_x, trans.y * self.scale_y,
                 trans.z * self.scale_z))
            camzout_R_tagzout = tr.quaternion_matrix(
                (rot.x, rot.y, rot.z, rot.w))
            camzout_T_tagzout = tr.concatenate_matrices(
                camzout_t_tagzout, camzout_R_tagzout)

            veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout,
                                                    camzout_T_tagzout,
                                                    tagzout_T_tagxout)

            # Overwrite transformed value
            (trans.x, trans.y,
             trans.z) = tr.translation_from_matrix(veh_T_tagxout)
            (rot.x, rot.y, rot.z,
             rot.w) = tr.quaternion_from_matrix(veh_T_tagxout)

            detection.pose.pose.position = trans
            detection.pose.pose.orientation = rot

        new_tag_data = AprilTagsWithInfos()
        new_tag_data.detections = msg.detections
        new_tag_data.infos = tag_infos
        # Publish Message
        self.pub_postPros.publish(new_tag_data)
Exemplo n.º 56
0
    def callback(self, msg):

        tag_infos = []

        new_tag_data = AprilTagsWithInfos()

        # Load tag detections message
        for detection in msg.detections:

            # ------ start tag info processing

            new_info = TagInfo()
            #Can use id 1 as long as no bundles are used
            new_info.id = int(detection.id[0])
            id_info = self.tags_dict[new_info.id]

            # Check yaml file to fill in ID-specific information
            new_info.tag_type = self.sign_types[id_info['tag_type']]
            if new_info.tag_type == self.info.S_NAME:
                new_info.street_name = id_info['street_name']
            elif new_info.tag_type == self.info.SIGN:
                new_info.traffic_sign_type = self.traffic_sign_types[
                    id_info['traffic_sign_type']]

                # publish for FSM
                # parking apriltag event
                msg_parking = BoolStamped()
                msg_parking.header.stamp = rospy.Time(0)
                if new_info.traffic_sign_type == TagInfo.PARKING:
                    msg_parking.data = True
                else:
                    msg_parking.data = False
                self.pub_postPros_parking.publish(msg_parking)

                # intersection apriltag event
                msg_intersection = BoolStamped()
                msg_intersection.header.stamp = rospy.Time(0)
                if (new_info.traffic_sign_type == TagInfo.FOUR_WAY) or (
                        new_info.traffic_sign_type == TagInfo.RIGHT_T_INTERSECT
                ) or (new_info.traffic_sign_type == TagInfo.LEFT_T_INTERSECT
                      ) or (new_info.traffic_sign_type
                            == TagInfo.T_INTERSECTION):
                    msg_intersection.data = True
                else:
                    msg_intersection.data = False
                self.pub_postPros_intersection.publish(msg_intersection)

            elif new_info.tag_type == self.info.VEHICLE:
                new_info.vehicle_name = id_info['vehicle_name']

            # TODO: Implement location more than just a float like it is now.
            # location is now 0.0 if no location is set which is probably not that smart
            if self.loc == 226:
                l = (id_info['location_226'])
                if l is not None:
                    new_info.location = l
            elif self.loc == 316:
                l = (id_info['location_316'])
                if l is not None:
                    new_info.location = l

            tag_infos.append(new_info)
            # --- end tag info processing

            # Define the transforms
            veh_t_camxout = tr.translation_matrix(
                (self.camera_x, self.camera_y, self.camera_z))
            veh_R_camxout = tr.euler_matrix(0, self.camera_theta * np.pi / 180,
                                            0, 'rxyz')
            veh_T_camxout = tr.concatenate_matrices(
                veh_t_camxout,
                veh_R_camxout)  # 4x4 Homogeneous Transform Matrix

            camxout_T_camzout = tr.euler_matrix(-np.pi / 2, 0, -np.pi / 2,
                                                'rzyx')
            veh_T_camzout = tr.concatenate_matrices(veh_T_camxout,
                                                    camxout_T_camzout)

            tagzout_T_tagxout = tr.euler_matrix(-np.pi / 2, 0, np.pi / 2,
                                                'rxyz')

            #Load translation
            trans = detection.pose.pose.pose.position
            rot = detection.pose.pose.pose.orientation
            header = detection.pose.header

            camzout_t_tagzout = tr.translation_matrix(
                (trans.x * self.scale_x, trans.y * self.scale_y,
                 trans.z * self.scale_z))
            camzout_R_tagzout = tr.quaternion_matrix(
                (rot.x, rot.y, rot.z, rot.w))
            camzout_T_tagzout = tr.concatenate_matrices(
                camzout_t_tagzout, camzout_R_tagzout)

            veh_T_tagxout = tr.concatenate_matrices(veh_T_camzout,
                                                    camzout_T_tagzout,
                                                    tagzout_T_tagxout)

            # Overwrite transformed value
            (trans.x, trans.y,
             trans.z) = tr.translation_from_matrix(veh_T_tagxout)
            (rot.x, rot.y, rot.z,
             rot.w) = tr.quaternion_from_matrix(veh_T_tagxout)

            new_tag_data.detections.append(
                AprilTagDetection(int(detection.id[0]),
                                  float(detection.size[0]),
                                  PoseStamped(header, Pose(trans, rot))))

        new_tag_data.infos = tag_infos
        # Publish Message
        self.pub_postPros.publish(new_tag_data)
Exemplo n.º 57
0
def homogeneous_matrix(trans, quat):
    return TR.concatenate_matrices(TR.translation_matrix(trans),
                                   TR.quaternion_matrix(quat))
Exemplo n.º 58
0
def homogeneous_matrix(trans, rot_ang, rot_dir):

    T = tfs.translation_matrix(trans)
    R = tfs.rotation_matrix(rot_ang, rot_dir)
    return tfs.concatenate_matrices(T, R)