Example #1
0
    def publish_goal(self, pose, lookahead_distance=0.0, stop_at_goal=False):
        goal = Goal()
        goal.pose.header.stamp = rospy.Time.now()
        goal.pose.header.frame_id = "odom"

        lookahead = tf_conversions.Frame(
            tf_conversions.Vector(lookahead_distance, 0, 0))

        # add the offset for navigating to the goal:
        # (offset * odom).Inverse() * goal = odom.Invserse() * pose
        # goal = (offset * odom) * odom.Inverse() * pose
        # goal = offset * pose
        original_pose_frame = tf_conversions.fromMsg(pose)
        pose_frame = self.zero_odom_offset * original_pose_frame
        original_pose_frame_lookahead = original_pose_frame * lookahead
        pose_frame_lookahead = pose_frame * lookahead

        goal.pose.pose.position.x = pose_frame_lookahead.p.x()
        goal.pose.pose.position.y = pose_frame_lookahead.p.y()
        goal.pose.pose.orientation = tf_conversions.toMsg(
            pose_frame_lookahead).orientation

        goal.stop_at_goal.data = stop_at_goal
        self.goal_pub.publish(goal)
        self.goal_plus_lookahead = tf_conversions.toMsg(
            original_pose_frame_lookahead)

        if self.publish_gt_goals:
            try:
                trans = self.tfBuffer.lookup_transform('map', 'odom',
                                                       rospy.Time())
                trans_frame = tf_conversions.Frame(
                    tf_conversions.Rotation(trans.rotation.x, trans.rotation.y,
                                            trans.rotation.z,
                                            trans.rotation.w),
                    tf_conversions.Vector(trans.translation.x,
                                          trans.translation.y,
                                          trans.translation.z))

                goal_pose = PoseStamped()
                goal_pose.header.stamp = rospy.Time.now()
                goal_pose.header.frame_id = "map"
                goal_pose.pose = tf_conversions.toMsg(trans_frame *
                                                      pose_frame_lookahead)
                self.goal_pose_pub.publish(goal_pose)
            except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                    tf2_ros.ExtrapolationException):
                print('Could not lookup transform from /map to /odom')
                pass
    def remove_anchor_pose(cls, anchor, data):

        # Convert anchor pose into a PyKDL.Frame: simplifies
        anchor_frame = tf_conversions.fromMsg(anchor.pose)
        anchor_frame.M = PyKDL.Rotation()  # NO ROTATION

        def subtract_pose(point, verbose=False):
            p = copy.deepcopy(point)

            # Find the difference in poses. NOTE we do not change anything other
            # than the pose (twist & wrench stay the same).
            point_frame = tf_conversions.fromMsg(point.pose)
            delta = anchor_frame.Inverse() * point_frame
            p.pose = tf_conversions.toMsg(delta)

            if verbose:
                print('{} {} {} -> {} {} {}'.format(point.pose.position.x,
                                                    point.pose.position.y,
                                                    point.pose.position.z,
                                                    p.pose.position.x,
                                                    p.pose.position.y,
                                                    p.pose.position.z))
            return p

        parsed = [subtract_pose(x) for x in data]

        # Create an identity translation/rotation for the new anchor pose.
        anchor_new = copy.deepcopy(anchor)
        identity = tf_conversions.Frame()
        anchor_new.pose = tf_conversions.toMsg(identity)

        return (anchor_new, parsed)
Example #3
0
def get_corrected_goal_offset(goal1, goal2, rotation_correction,
                              correction_length):
    '''apply rotation and length corrections to the offset from goal1 to goal2'''
    goal_offset = goal1.Inverse() * goal2
    goal_offset = tf_conversions.Frame(
        tf_conversions.Rotation.RotZ(rotation_correction)) * goal_offset
    goal_offset.p *= correction_length
    return goal_offset
Example #4
0
def integrate_corrected_poses(poses, origin, theta_corrections, path_corrections):
	corrected_poses = [poses.frames[0]]

	for i in range(1,len(poses.frames)):
		new_pose = poses.frames[i]
		old_pose = poses.frames[i-1]
		current_goal = corrected_poses[-1]

		pose_offset = old_pose.Inverse() * new_pose
		pose_offset.p = tf_conversions.Rotation.RotZ(current_goal.M.GetRPY()[2]) * pose_offset.p
		pose_offset_corrected = pose_offset
		if theta_corrections is not None and len(theta_corrections) > i:
			pose_offset_corrected = tf_conversions.Frame(tf_conversions.Rotation.RotZ(theta_corrections[i])) * pose_offset_corrected
		if path_corrections is not None and len(path_corrections) > i:
			pose_offset_corrected.p *= path_corrections[i]
		corrected_poses.append(tf_conversions.Frame(pose_offset_corrected.M * current_goal.M, pose_offset_corrected.p + current_goal.p))

	origin_offset = poses.frames[0].Inverse() * origin
	corrected_poses = [pose * origin_offset for pose in corrected_poses]

	return poses_to_np(corrected_poses)
def get_offset_px(goal, pose):
	visual_feature_range = 1#np.random.uniform(3.0, 5.0)
	visual_feature_angle = 0#np.random.uniform(-math.radians(10), math.radians(10))

	visual_feature_offset = tf_conversions.Frame()
	visual_feature_offset.M.DoRotZ(visual_feature_angle)
	visual_feature_offset.p.x(visual_feature_range)

	visual_feature = goal * visual_feature_offset

	offset = pose.Inverse() * visual_feature

	return -localiser.rad_to_px(visual_feature_angle - math.atan2(offset.p.y(), offset.p.x()))
Example #6
0
    def make_new_goal(self, rotation_correction=0.0, path_correction=1.0):
        old_goal_index = self.goal_index
        old_goal_frame_world = tf_conversions.fromMsg(
            self.poses[old_goal_index])
        current_goal_frame_odom = tf_conversions.fromMsg(self.goal)

        state = self.update_goal_index()

        if state == GOAL_STATE.finished:
            print('Localiser stopping. Reached final goal.')
            self.running = False
            return

        if state == GOAL_STATE.restart:
            # don't have a big offset from the end of the path, back to the start
            old_goal_frame_world = tf_conversions.Frame()

        new_goal_frame_world = tf_conversions.fromMsg(
            self.poses[self.goal_index])
        turning_goal = is_turning_goal(old_goal_frame_world,
                                       new_goal_frame_world)
        goal_offset = get_corrected_goal_offset(old_goal_frame_world,
                                                new_goal_frame_world,
                                                rotation_correction,
                                                path_correction)
        new_goal = current_goal_frame_odom * goal_offset

        sum_path_correction_ratio = (
            GOAL_DISTANCE_SPACING +
            self.sum_path_correction) / GOAL_DISTANCE_SPACING

        self.update_goal(new_goal, True, turning_goal)
        self.save_data_at_goal(self.last_odom_pose, new_goal,
                               new_goal_frame_world, self.sum_theta_correction,
                               sum_path_correction_ratio)
        self.goal_number += 1
        print('[%d] theta [%f]\tpath [%f]' %
              (old_goal_index, math.degrees(
                  self.sum_theta_correction), sum_path_correction_ratio))
        if turning_goal:
            print('turning goal:')
        self.sum_theta_correction = 0
        self.sum_path_correction = 0.0
def np_to_frame(pose_tuple):
    return tf_conversions.Frame(
        tf_conversions.Rotation.RotZ(pose_tuple[2]),
        tf_conversions.Vector(pose_tuple[0], pose_tuple[1], 0))
    LOOKAHEAD_DISTANCE * math.sin(goal[2]), 0
])
turning_goal = False

actual_goals_odom = []
actual_goals_world = []
update_locations = []
x_errors = []
continuous_offsets = []
continuous_expected_offsets = []
continuous_path_offsets = []

odom = Odometry()
odom.pose.pose.position.x = 0.0
odom.pose.pose.position.y = 0.0
q = tf_conversions.Frame().M.RotZ(math.radians(0)).GetQuaternion()
odom.pose.pose.orientation.z = q[2]
odom.pose.pose.orientation.w = q[3]
current_frame_odom = tf_conversions.fromMsg(odom.pose.pose)

world = Pose()
world.position.x = odom.pose.pose.position.x
world.position.y = odom.pose.pose.position.y
world.orientation.z = odom.pose.pose.orientation.z
world.orientation.w = odom.pose.pose.orientation.w
current_frame_world = tf_conversions.fromMsg(world)

N = 10000
xs = []
ys = []
thetas = []
Example #9
0
    def start(self):
        if self.global_localisation_init:
            if self.last_odom_pose is None or self.last_image is None:
                print(
                    'Global localisation - waiting for first odom and image messages'
                )
                while self.last_odom_pose is None or self.last_image is None:
                    time.sleep(0.2)  # sleep lets subscribers be processed

            offsets, correlations = self.calculate_image_pose_offset(
                0, len(self.poses))
            best_match = np.argmax(correlations)
            if best_match == 0 or (best_match == 1
                                   and correlations[0] > correlations[2]):
                goal_pose = tf_conversions.Frame()
                odom_pose = tf_conversions.fromMsg(self.last_odom_pose)
                self.zero_odom_offset = tf_conversions.fromMsg(
                    self.last_odom_pose)

                if self.poses[0].position.x == 0 and self.poses[
                        0].position.y == 0 and tf_conversions.fromMsg(
                            self.poses[0]).M.GetRPY()[2] == 0:
                    # the first pose is at 0,0,0 so we ignore it and set the first goal to the next pose
                    print('Localiser: starting at goal 1, goal 0 = [0,0,0]')
                    self.goal_index = 1
                else:
                    self.goal_index = 0
            else:
                if best_match == len(self.poses) - 1:
                    self.goal_index = len(self.poses) - 2
                elif correlations[best_match + 1] >= correlations[best_match -
                                                                  1]:
                    self.goal_index = best_match
                else:
                    self.goal_index = best_match - 1
                # goal = offset.Inverse() * odom
                # offset.Inverse() = goal * odom.Inverse()
                # offset = (goal * odom.Inverse()).Inverse()
                goal_pose = tf_conversions.fromMsg(self.poses[self.goal_index -
                                                              1])
                odom_pose = tf_conversions.fromMsg(self.last_odom_pose)
                self.zero_odom_offset = (goal_pose *
                                         odom_pose.Inverse()).Inverse()
            self.last_odom_pose = tf_conversions.toMsg(
                self.zero_odom_offset.Inverse() *
                tf_conversions.fromMsg(self.last_odom_pose))
            print(
                'Global localisation - best match at pose %d [correlation = %f]'
                % (self.goal_index, correlations[best_match]))
            if correlations[best_match] < self.min_init_correlation:
                print(
                    'Stop, best match [%.2f] did not exceed minimum correlation ([%.2f])'
                    % (correlations[best_match], self.min_init_correlation))
                return
        else:
            if self.last_odom_pose is None:
                print('Global localisation - waiting for first odom message')
                while self.last_odom_pose is None:
                    time.sleep(0.2)  # sleep lets subscribers be processed

            goal_pose = tf_conversions.Frame()
            odom_pose = tf_conversions.fromMsg(self.last_odom_pose)
            self.zero_odom_offset = tf_conversions.fromMsg(self.last_odom_pose)

            if self.poses[0].position.x == 0 and self.poses[
                    0].position.y == 0 and tf_conversions.fromMsg(
                        self.poses[0]).M.GetRPY()[2] == 0:
                # the first pose is at 0,0,0 so we ignore it and set the first goal to the next pose
                print('Localiser: starting at goal 1, goal 0 = [0,0,0]')
                self.goal_index = 1
            else:
                self.goal_index = 0

        self.update_goal(tf_conversions.fromMsg(self.poses[self.goal_index]))

        self.running = True
Example #10
0
    def setup_parameters(self):
        # Teach Repeat Params
        self.rotation_correction_gain = rospy.get_param(
            '~rotation_correction_gain', 0.01)
        self.path_correction_gain = rospy.get_param('~path_correction_gain',
                                                    0.01)

        # Wait for ready
        self.ready = not rospy.get_param('/wait_for_ready', False)
        self.global_localisation_init = rospy.get_param(
            '~global_localisation_init', False)
        self.min_init_correlation = rospy.get_param('~min_init_correlation',
                                                    0.0)
        self.running = False
        self.mutex = threading.Lock()

        # Odom
        self.load_dir = os.path.expanduser(
            rospy.get_param('/data_load_dir', '~/miro/data'))
        if self.load_dir[-1] != '/':
            self.load_dir += '/'
        self.sum_theta_correction = 0.0
        self.sum_path_correction = 0.0
        self.stop_at_end = rospy.get_param('~stop_at_end', True)
        self.discrete_correction = rospy.get_param('~discrete-correction',
                                                   False)

        # Load pose data
        pose_files = [
            self.load_dir + f for f in os.listdir(self.load_dir)
            if f[-9:] == '_pose.txt'
        ]
        pose_files.sort()
        self.poses = load_poses(pose_files)
        self.goal_index = 0
        self.goal = tf_conversions.toMsg(tf_conversions.Frame())
        self.last_goal = tf_conversions.toMsg(tf_conversions.Frame())
        self.goal_plus_lookahead = tf_conversions.toMsg(tf_conversions.Frame())
        self.last_odom_pose = None
        self.zero_odom_offset = None
        global GOAL_DISTANCE_SPACING, LOOKAHEAD_DISTANCE_RATIO, TURNING_TARGET_RANGE_DISTANCE_RATIO, GOAL_THETA_TOLERANCE
        GOAL_DISTANCE_SPACING = rospy.get_param('/goal_pose_seperation', 0.2)
        LOOKAHEAD_DISTANCE_RATIO = rospy.get_param('/lookahead_distance_ratio',
                                                   0.65)
        TURNING_TARGET_RANGE_DISTANCE_RATIO = rospy.get_param(
            '/turning_target_range_distance_ratio', 0.5)
        GOAL_THETA_TOLERANCE = rospy.get_param('/goal_theta_tolerance', 5)

        # Image
        self.resize = image_processing.make_size(
            height=rospy.get_param('/image_resize_height', None),
            width=rospy.get_param('/image_resize_width', None))
        if self.resize[1] is not None:
            global IMAGE_WIDTH
            IMAGE_WIDTH = self.resize[1]
        if self.resize[0] is None and self.resize[1] is None:
            self.resize = None
        self.last_image = None
        self.patch_size = image_processing.parse_patch_size_parameter(
            rospy.get_param('/patch_size', (9, 9)))
        global CORR_SUB_SAMPLING, FIELD_OF_VIEW_DEG
        self.image_recognition_threshold = rospy.get_param(
            '/image_recognition_threshold', 0.1)
        CORR_SUB_SAMPLING = rospy.get_param('/image_subsampling', 1)
        FIELD_OF_VIEW_DEG = rospy.get_param('/image_field_of_view_width_deg',
                                            2 * 60.6 + 2 * 27.0)
        FIELD_OF_VIEW_RAD = math.radians(FIELD_OF_VIEW_DEG)
        self.search_range = rospy.get_param('~search-range', 1)

        # data saving
        self.save_dir = os.path.expanduser(
            rospy.get_param('/data_save_dir',
                            '~/miro/data/follow-straight_tests/5'))
        self.save_full_res_images = rospy.get_param('/save_full_res_images',
                                                    True)
        self.save_full_res_image_at_goal = rospy.get_param(
            '/save_full_res_image_at_goal', True)
        self.last_full_res_image = None
        self.save_gt_data = rospy.get_param('/save_gt_data', True)
        self.publish_gt_goals = rospy.get_param('/publish_gt_goals', True)
        if self.save_dir[-1] != '/':
            self.save_dir += '/'
        if not os.path.isdir(self.save_dir):
            os.makedirs(self.save_dir)
        if not os.path.isdir(self.save_dir + 'norm/'):
            os.makedirs(self.save_dir + 'norm/')
        if not os.path.isdir(self.save_dir + 'pose/'):
            os.makedirs(self.save_dir + 'pose/')
        if not os.path.isdir(self.save_dir + 'offset/'):
            os.makedirs(self.save_dir + 'offset/')
        if not os.path.isdir(self.save_dir + 'correction/'):
            os.makedirs(self.save_dir + 'correction/')
        if self.save_full_res_images or self.save_full_res_image_at_goal:
            if not os.path.isdir(self.save_dir + 'full/'):
                os.makedirs(self.save_dir + 'full/')
        self.goal_number = 0

        self.save_params()