Example #1
0
    def publish(self, poses, lane_lines, points, lane_lines_points):
        path = Path()
        path.header.frame_id = self.frame_id
        path.header.stamp = rospy.Time(0)
        path.poses = poses
        self.path_pub.publish(path)

        # lane lines

        lane_line0 = Path()
        lane_line0.header.frame_id = self.frame_id
        lane_line0.header.stamp = rospy.Time(0)
        lane_line0.poses = lane_lines[0]
        self.lane_line0_pub.publish(lane_line0)

        lane_line1 = Path()
        lane_line1.header.frame_id = self.frame_id
        lane_line1.header.stamp = rospy.Time(0)
        lane_line1.poses = lane_lines[1]
        self.lane_line1_pub.publish(lane_line1)

        lane_line2 = Path()
        lane_line2.header.frame_id = self.frame_id
        lane_line2.header.stamp = rospy.Time(0)
        lane_line2.poses = lane_lines[2]
        self.lane_line2_pub.publish(lane_line2)

        lane_line3 = Path()
        lane_line3.header.frame_id = self.frame_id
        lane_line3.header.stamp = rospy.Time(0)
        lane_line3.poses = lane_lines[3]
        self.lane_line3_pub.publish(lane_line3)

        path_points = PointCloud()
        path_points.header.frame_id = self.frame_id
        path_points.header.stamp = rospy.Time(0)
        path_points.points = points
        self.path_points_pub.publish(path_points)

        # lane lines point cloud display

        lane_line0_points = PointCloud()
        lane_line0_points.header.frame_id = self.frame_id
        lane_line0_points.header.stamp = rospy.Time(0)
        lane_line0_points.points = lane_lines_points[0]
        self.lane_line0_points_pub.publish(lane_line0_points)

        lane_line1_points = PointCloud()
        lane_line1_points.header.frame_id = self.frame_id
        lane_line1_points.header.stamp = rospy.Time(0)
        lane_line1_points.points = lane_lines_points[1]
        self.lane_line1_points_pub.publish(lane_line1_points)

        lane_line2_points = PointCloud()
        lane_line2_points.header.frame_id = self.frame_id
        lane_line2_points.header.stamp = rospy.Time(0)
        lane_line2_points.points = lane_lines_points[2]
        self.lane_line2_points_pub.publish(lane_line2_points)

        lane_line3_points = PointCloud()
        lane_line3_points.header.frame_id = self.frame_id
        lane_line3_points.header.stamp = rospy.Time(0)
        lane_line3_points.points = lane_lines_points[3]
        self.lane_line3_points_pub.publish(lane_line3_points)
Example #2
0
def test_direct():
    pose_pub = rospy.Publisher(pose_topic, PoseStamped, queue_size=1)
    pose_pub_est = rospy.Publisher(pose_topic_est, PoseStamped, queue_size=1)
    path_pub = rospy.Publisher(path_topic, Path, queue_size=1)
    path_pub_est = rospy.Publisher(path_topic_est, Path, queue_size=1)
    rospy.init_node('mapping', anonymous=True)
    rospy.loginfo("Start Mapping")

    data_root = "/home/bobin/data/rgbd/tum/rgbd_dataset_freiburg1_xyz/"
    trajectory_filename = data_root + 'associate-rgb-tra.txt'
    image_path = data_root + 'associate-rgb-depth.txt'
    image_files = load_image_path(image_path)
    trajectory = load_trajectory(trajectory_filename)
    keys = None
    d = [0.2624, -0.9531, -0.0054, 0.0026, 1.1633]
    camera = pose.Frame.Camera(517.3, 516.5, 318.6, 255.3, 640, 480, d, None)
    pyr_camera = pose.Frame.PyrCamera(camera, 4)
    if len(trajectory) < len(image_files):
        keys = sorted(trajectory.iterkeys())
    else:
        keys = sorted(image_files.iterkeys())
    tracker = pose.track.Tracker(camera)
    path_msg = Path()
    path_msg_est = Path()
    for frame_id, key in enumerate(keys):
        if not image_files.has_key(key):
            continue
        image = cv2.imread(data_root + image_files[key][0],
                           cv2.IMREAD_GRAYSCALE)
        depth = cv2.imread(data_root + image_files[key][1],
                           cv2.IMREAD_UNCHANGED)
        if not trajectory.has_key(key):
            continue
        gtPose = trajectory[key]
        frame = pose.Frame.Frame(key, image, None, depth, pyr_camera)
        qx, qy, qz, qw = gtPose[3:]
        rot = pose.PyLie.Quaternion.quat2mat([qw, qx, qy, qz])
        gt_pose_mat = np.eye(4)
        gt_pose_mat[:3, :3] = rot
        gt_pose_mat[:3, 3] = gtPose[:3]
        frame.set_gt_pose(gt_pose_mat)
        frame.point_select_grid()
        pose_est = tracker.insert_frame(frame, frame_id)
        # H = tracker.LKTrack(frame)
        # tracker.PoseGaussianNewton()
        # 如何将当前帧添加到 frame array?
        # tracker.FrameArray[frame_id] = frame
        pose_msg = PoseStamped()
        pose_msg.pose.position.x = gtPose[0]
        pose_msg.pose.position.y = gtPose[1]
        pose_msg.pose.position.z = gtPose[2]
        w, x, y, z = data.pose_utils.rot2quat(rot)
        pose_msg.pose.orientation.w = w
        pose_msg.pose.orientation.x = x
        pose_msg.pose.orientation.y = y
        pose_msg.pose.orientation.z = z
        pose_msg.header = std_msgs.msg.Header()
        pose_msg.header.stamp = rospy.Time.now()
        pose_msg.header.frame_id = 'mapping'
        pose_pub.publish(pose_msg)

        pose_msg_est = PoseStamped()
        pose_msg_est.pose.position.x = pose_est[0, 3]
        pose_msg_est.pose.position.y = pose_est[1, 3]
        pose_msg_est.pose.position.z = pose_est[2, 3]
        w, x, y, z = data.pose_utils.rot2quat(pose_est[:3, :3])
        pose_msg_est.pose.orientation.w = w
        pose_msg_est.pose.orientation.x = x
        pose_msg_est.pose.orientation.y = y
        pose_msg_est.pose.orientation.z = z
        pose_msg_est.header = std_msgs.msg.Header()
        pose_msg_est.header.stamp = rospy.Time.now()
        pose_msg_est.header.frame_id = 'mapping'
        pose_pub_est.publish(pose_msg_est)

        path_msg.header = pose_msg.header
        path_msg.poses.append(pose_msg)
        path_pub.publish(path_msg)

        path_msg_est.header = pose_msg.header
        path_msg_est.poses.append(pose_msg_est)
        path_pub_est.publish(path_msg_est)

        rospy.sleep(0.1)
Example #3
0
# coding=utf-8
import re
import json
import io
from geometry_msgs.msg import PoseStamped
from rospy_message_converter import json_message_converter
from nav_msgs.msg import Path
if __name__ == '__main__':
    f = open("guiji/guiji1.txt", "r")
    a = f.readlines()
    str = ''
    y5 = {}
    i = 0
    ite = Path()
    for line in a[0:]:
        seq = re.compile(" ")
        lst = seq.split(line.strip())
        p = PoseStamped()
        ite.poses.append(p)
        ite.poses[i].pose.position.x = float(lst[0])
        ite.poses[i].pose.position.y = float(lst[1])
        i = i + 1
    path_json_ = json_message_converter.convert_ros_message_to_json(ite)
    print path_json_
    # print(type(str))
    #print(a[2:])
    f.close()
    #text=json.loads(str)
    #text
#!/usr/bin/env python

import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Odometry
from nav_msgs.msg import Path
from tf import transformations

pub_ = None
path_ = Path()
pub_path_ = None
regions_ = {'right': 0, 'fright': 0, 'front': 0, 'fleft': 0, 'left': 0}
state_ = 0
state_dict_ = {
    0: 'find the wall',
    1: 'turn left',
    2: 'follow the wall',
    3: 'fix follow the wall - left',
    4: 'fix follow the wall - right'
}


def find_wall():
    msg = Twist()
    msg.linear.x = 0.2
    msg.angular.z = 0.3

    return msg
	def __init__(self):
		self.node_name = rospy.get_name()
		self.pub_path = rospy.Publisher("/fusion_path", Path, queue_size = 20)
		self.sub_odom = rospy.Subscriber("/p3d_odom", Odometry, self.cb_odom, queue_size = 20)
		self.path = Path()
		rospy.loginfo("[%s] Initialized ..." %(self.node_name))
def pipeline_test():
    torch.set_grad_enabled(False)
    # Define model for embedding
    base_model = BaseModel(300, 300)
    net_vlad = NetVLAD(num_clusters=args.num_clusters,
                       dim=256,
                       alpha=1.0,
                       outdim=args.final_dim)
    model = EmbedNet(base_model, net_vlad)

    saved_model_file_spinetvlad = os.path.join(args.saved_model_path,
                                               'model-to-check-top1.pth.tar')
    model_checkpoint = torch.load(saved_model_file_spinetvlad,
                                  map_location=lambda storage, loc: storage)
    model.load_state_dict(model_checkpoint)
    print("Loaded spinetvlad checkpoints from \'{}\'.".format(
        saved_model_file_spinetvlad))

    # images_dir = os.path.join(args.dataset_dir, args.sequence)
    database_images_dir = os.path.join(args.dataset_dir, args.sequence)
    query_images_dir = os.path.join(args.dataset_dir, args.sequence)
    database_images_info = query_images_info = make_images_info(
        struct_filename=os.path.join(args.dataset_dir, 'struct_file_' +
                                     args.sequence + '.txt'))
    # database_images_info, query_images_info = train_test_split(images_info_validate, test_size=0.2,
    #                                                            random_state=10)

    if args.use_different_sequence:
        database_images_info = make_images_info(struct_filename=os.path.join(
            args.dataset_dir, 'struct_file_' + args.sequence_database +
            '.txt'))
        query_images_info = make_images_info(struct_filename=os.path.join(
            args.dataset_dir, 'struct_file_' + args.sequence_query + '.txt'))
        database_images_dir = os.path.join(args.dataset_dir,
                                           args.sequence_database)
        query_images_dir = os.path.join(args.dataset_dir, args.sequence_query)

    image_database = ImageDatabase(images_info=database_images_info,
                                   images_dir=database_images_dir,
                                   model=model,
                                   generate_database=True,
                                   transforms=input_transforms())

    config = {
        'superpoint': {
            'nms_radius': 4,
            'keypoint_threshold': 0.005,
            'max_keypoints': -1
        },
        'Superglue': {
            'weights': 'indoor',
            'sinkhorn_iterations': 100,
            'match_threshold': 0.2,
        }
    }
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    matching = Matching(config).eval().to(device)

    saved_model_file_superglue = os.path.join(
        args.saved_model_path, 'spsg-rotation-invariant.pth.tar')
    # saved_model_file_superglue = os.path.join(args.saved_model_path, 'superglue-juxin.pth.tar')

    model_checkpoint = torch.load(saved_model_file_superglue,
                                  map_location=lambda storage, loc: storage)
    matching.load_state_dict(model_checkpoint)
    print("Loaded superglue checkpoints from \'{}\'.".format(
        saved_model_file_superglue))

    translation_errors = []
    rotation_errors = []

    success_records = []
    accumulated_distance = 0
    last_T_w_source_gt = None

    true_count = 0

    rospy.init_node('global_localization', anonymous=False)
    pose_publisher = rospy.Publisher('query_spi_pose',
                                     PoseStamped,
                                     queue_size=10)
    gt_pose_publisher = rospy.Publisher('gt_query_spi_pose',
                                        PoseStamped,
                                        queue_size=10)
    position_offset = np.array([851, -332, 1204])
    T_offset_w = None

    path_estimation_publisher = rospy.Publisher('query_spi_path',
                                                Path,
                                                queue_size=10)
    path_gt_publisher = rospy.Publisher('gt_spi_path', Path, queue_size=10)
    path_estimated = Path()
    path_gt = Path()
    path_estimated.header.frame_id = 'velodyne'
    path_gt.header.frame_id = 'velodyne'

    for query_image_info in tqdm(query_images_info):
        query_results = image_database.query_image(image_filename=os.path.join(
            query_images_dir, query_image_info['image_file']),
                                                   num_results=args.top_k + 1)
        if args.use_different_sequence:
            # avoid the same image from database
            query_results = query_results[:args.top_k]
        else:
            query_results = query_results[1:args.top_k + 1]
        # print('query_result: \n{}'.format(query_results))
        best_score = -1
        T_w_source_best = None
        target_image_best = None

        min_inliers = 20
        max_inliers = 30
        # min_inliers = 0
        # max_inliers = 0
        resolution = int(100 / args.meters_per_pixel)
        source_image = Image.open(
            os.path.join(query_images_dir, query_image_info['image_file']))
        for query_result in query_results:
            target_image = Image.open(
                os.path.join(database_images_dir, query_result['image_file']))

            # source_image_displayed = cv2
            target_kpts, source_kpts, raw_target_kpts, raw_source_kpts = superglue_match(
                target_image, source_image, resolution, matching)
            target_kpts_in_meters = pts_from_pixel_to_meter(
                target_kpts, args.meters_per_pixel)
            source_kpts_in_meters = pts_from_pixel_to_meter(
                source_kpts, args.meters_per_pixel)
            # print("len of target_kpts_in_meters:", len(target_kpts_in_meters))

            T_target_source, score, matches = compute_relative_pose_with_ransac_test(
                target_kpts_in_meters,
                source_kpts_in_meters,
                output_matches=True)
            # T_target_source, score = compute_relative_pose_with_ransac(target_kpts_in_meters, source_kpts_in_meters)
            # T_target_source, score = compute_relative_pose(target_kpts_in_meters, source_kpts_in_meters), len(target_kpts)

            if score is None:
                continue
            if score > best_score:
                target_image_best = target_image
                best_target_kpts, best_source_kpts, best_raw_target_kpts, best_raw_source_kpts = target_kpts, source_kpts, raw_target_kpts, raw_source_kpts
            if score > best_score and score > min_inliers and best_score < max_inliers:
                best_score = score
                # TODO: the way we handle the se3 may be inappropriate
                T_target_source = np.array([[
                    T_target_source[0, 0], T_target_source[0, 1], 0,
                    T_target_source[0, 2]
                ],
                                            [
                                                T_target_source[1, 0],
                                                T_target_source[1, 1], 0,
                                                T_target_source[1, 2]
                                            ], [0, 0, 1, 0], [0, 0, 0, 1]])
                # T_target_source = np.array(
                #     [[1, 0, 0, 0],
                #      [0, 1, 0, 0],
                #      [0, 0, 1, 0],
                #      [0, 0, 0, 1]])
                T_w_target = np.hstack([
                    R.from_quat(query_result['orientation'][[1, 2, 3,
                                                             0]]).as_matrix(),
                    query_result['position'].reshape(3, 1)
                ])
                T_w_target = np.vstack([T_w_target, np.array([0, 0, 0, 1])])
                T_w_source_best = T_w_target @ T_target_source
            # print(T_target_source)
            INVERSE_AUGMENTATION = False
            if INVERSE_AUGMENTATION:
                # tf = superglue_input_transforms(args.meters_per_pixel, 180)
                target_image_inv = TF.rotate(target_image, 180)
                target_kpts_inv, source_kpts, _, _ = superglue_match(
                    target_image_inv, source_image, resolution, matching)
                target_kpts_in_meters_inv = pts_from_pixel_to_meter(
                    target_kpts_inv, args.meters_per_pixel)
                source_kpts_in_meters = pts_from_pixel_to_meter(
                    source_kpts, args.meters_per_pixel)
                # T_target_source, score = compute_relative_pose_with_ransac_test(target_kpts_in_meters_inv,
                #                                                                 source_kpts_in_meters)
                T_target_inv_source, score = compute_relative_pose_with_ransac(
                    target_kpts_in_meters_inv, source_kpts_in_meters)
                # T_target_inv_source, score = compute_relative_pose(target_kpts_in_meters_inv, source_kpts_in_meters), len(target_kpts)
                if score is None:
                    continue
                if score > best_score and score > min_inliers:
                    best_score = score
                    # Since the target image is rotated by 180 degrees, its pose is rotated in the same manner
                    T_target_source = np.array([[
                        -T_target_source[0, 0], -T_target_source[0, 1], 0,
                        -T_target_source[0, 2]
                    ],
                                                [
                                                    -T_target_source[1, 0],
                                                    -T_target_source[1, 1], 0,
                                                    -T_target_source[1, 2]
                                                ], [0, 0, 1, 0], [0, 0, 0, 1]])
                    # T_target_source = np.array(
                    #     [[1, 0, 0, 0],
                    #      [0, 1, 0, 0],
                    #      [0, 0, 1, 0],
                    #      [0, 0, 0, 1]])
                    T_w_target = np.hstack([
                        R.from_quat(
                            query_result['orientation'][[1, 2, 3,
                                                         0]]).as_matrix(),
                        query_result['position'].reshape(3, 1)
                    ])
                    T_w_target = np.vstack(
                        [T_w_target, np.array([0, 0, 0, 1])])
                    T_w_source_best = T_w_target @ T_target_source
            if best_score > max_inliers:
                break

        # display raw query spi
        query_image = np.array(source_image) / 255 * 10
        query_image = cv2.resize(query_image, (resolution, resolution),
                                 cv2.INTER_NEAREST)

        # cv2.imshow("query SPI", query_image)

        # display raw candidate spi
        candidate_image = np.array(target_image_best) / 255 * 10
        candidate_image = cv2.resize(candidate_image, (resolution, resolution),
                                     cv2.INTER_NEAREST)

        # cv2.imshow("database SPI", candidate_image)

        query_image = np.stack([query_image] * 3, -1)
        candidate_image = np.stack([candidate_image] * 3, -1)

        # display query spi with features
        query_image_with_poi = visualize_poi(query_image.copy(),
                                             best_raw_source_kpts,
                                             color=(255, 0, 0))
        # cv2.imshow("query spi with features", query_image_with_poi)

        # display candidate spi with features
        candidate_image_with_poi = visualize_poi(candidate_image.copy(),
                                                 best_raw_target_kpts,
                                                 color=(0, 255, 0))
        # cv2.imshow("candidate spi with features", candidate_image_with_poi)

        # display matching image
        match_image = visualize_matching(query_image_with_poi,
                                         candidate_image_with_poi,
                                         best_source_kpts,
                                         best_target_kpts,
                                         matches,
                                         threshold=min_inliers)
        # cv2.imshow("matching result", match_image)

        # display all images inside a window
        W, H = 480, 460
        h_margin = 10
        v_margin = 10
        window_image = np.ones((2 * H + 2 * v_margin, 2 * W + h_margin, 3))
        window_image[:H, :(W)] = cv2.resize(query_image, (W, H),
                                            cv2.INTER_NEAREST)
        window_image[:H, -W:] = cv2.resize(candidate_image, (W, H),
                                           cv2.INTER_NEAREST)
        window_image[H + v_margin:, :] = cv2.resize(
            match_image, (2 * W + h_margin, H + v_margin), cv2.INTER_NEAREST)
        cv2.imshow("LiDAR global localization using SPI", window_image)

        cv2.waitKey(1)

        # ground truch pose
        T_w_source_gt = np.hstack([
            R.from_quat(query_image_info['orientation'][[1, 2, 3,
                                                         0]]).as_matrix(),
            query_image_info['position'].reshape(3, 1)
        ])
        T_w_source_gt = np.vstack([T_w_source_gt, np.array([0, 0, 0, 1])])

        if T_offset_w is None:
            T_offset_w = np.linalg.inv(T_w_source_gt)

        # record travelled distance
        if last_T_w_source_gt is not None:
            T_last_current = np.linalg.inv(last_T_w_source_gt) @ T_w_source_gt
            accumulated_distance += np.sqrt(
                T_last_current[:3, 3] @ T_last_current[:3, 3])
        last_T_w_source_gt = T_w_source_gt

        if T_w_source_best is not None:

            delta_T_w_source = np.linalg.inv(T_w_source_best) @ T_w_source_gt
            delta_translation = np.sqrt(
                delta_T_w_source[:3, 3] @ delta_T_w_source[:3, 3])
            delta_degree = np.arccos(
                min(1, 0.5 *
                    (np.trace(delta_T_w_source[:3, :3]) - 1))) / np.pi * 180
            print('Translation error: {}'.format(delta_translation))
            print('Rotation error: {}'.format(delta_degree))
            translation_errors.append(delta_translation)
            rotation_errors.append(delta_degree)
            success_records.append((accumulated_distance, True))

            # publish estimated pose and path
            msg = PoseStamped()
            msg.header.stamp = rospy.Time.now()
            msg.header.frame_id = 'velodyne'
            T_offset_source_best = T_offset_w @ T_w_source_best
            msg.pose.position.x, msg.pose.position.y, msg.pose.position.z = T_offset_source_best[:
                                                                                                 3,
                                                                                                 3]
            quaternion = R.from_matrix(T_offset_source_best[:3, :3]).as_quat()
            msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w = quaternion
            pose_publisher.publish(msg)

            path_estimated.poses.append(msg)
            path_estimated.header.stamp = rospy.Time.now()
            path_estimation_publisher.publish(path_estimated)

        else:
            print('Global localization failed.')
            success_records.append((accumulated_distance, False))
            pass

        # publish ground truth pose and path
        gt_msg = PoseStamped()
        gt_msg.header.stamp = rospy.Time.now()
        gt_msg.header.frame_id = 'velodyne'
        T_offset_source_gt = T_offset_w @ T_w_source_gt
        gt_msg.pose.position.x, gt_msg.pose.position.y, gt_msg.pose.position.z = T_offset_source_gt[:
                                                                                                    3,
                                                                                                    3]
        quaternion = R.from_matrix(T_offset_source_gt[:3, :3]).as_quat()
        gt_msg.pose.orientation.x, gt_msg.pose.orientation.y, gt_msg.pose.orientation.z, gt_msg.pose.orientation.w = quaternion
        gt_pose_publisher.publish(gt_msg)

        path_gt.poses.append(gt_msg)
        path_gt.header.stamp = rospy.Time.now()
        path_gt_publisher.publish(path_gt)

        # translation_errors.append(float('nan'))
        # print('accumulated_distance', accumulated_distance)

    translation_errors = np.array(translation_errors)
    rotation_errors = np.array(rotation_errors)
    print('Mean translation error: {}'.format(translation_errors.mean()))
    for r in [0.1, 0.2, 0.3, 0.5, 0.8, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
        print('Percentage of translation errors under {} m: {}'.format(
            r, (translation_errors < r).sum() / len(translation_errors)))
    for theta in [1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
        print('Percentage of rotation errors under {} degrees: {}'.format(
            theta, (rotation_errors < theta).sum() / len(rotation_errors)))

    plt.scatter(
        np.linspace(0, len(translation_errors), num=len(translation_errors)),
        np.array(translation_errors))
    plt.xlabel("SPI id")
    plt.ylabel("translation error")
    plt.show()

    travelled_distances = [
        0.2, 0.4, 0.6, 0.8, 1.0, 1.5, 2, 3, 4, 5, 6, 8, 10, 15, 20, 25, 30, 35,
        40, 45, 50
    ]
    probabilities = []
    for thres_distance in travelled_distances:
        probabilities.append(
            localization_probability(accumulated_distance,
                                     np.array(success_records),
                                     thres_distance))
    plt.plot(travelled_distances, probabilities, lw=1)
    # plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label="Luck")
    plt.xlabel("travelled distance")
    plt.ylabel("probabilities")
    # plt.show()

    translation_errors = translation_errors[~np.isnan(translation_errors)]
    rotation_errors = rotation_errors[~np.isnan(rotation_errors)]

    trans_err_avg = translation_errors.mean()
    trans_err_std = translation_errors - trans_err_avg
    trans_err_std = np.sqrt((trans_err_std * trans_err_std).mean())
    print("average translation error: {}".format(trans_err_avg))
    print("standard deviation of translation error: {}".format(trans_err_std))

    rotation_err_avg = rotation_errors.mean()
    rotation_err_std = rotation_errors - rotation_err_avg
    rotation_err_std = np.sqrt((rotation_err_std * rotation_err_std).mean())
    print("average rotation_errors error: {}".format(rotation_err_avg))
    print("standard deviation of rotation_errors error: {}".format(
        rotation_err_std))

    print("recall: {}".format(
        len(translation_errors) / len(query_images_info)))

    pass
Example #7
0
#!/usr/bin/env python
import rospy
import time
import math
import geometry_msgs
import numpy as np
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from geometry_msgs.msg import PoseStamped, Pose, Point
from nav_msgs.msg import Path
from std_msgs.msg import Header
import numpy as np

path_progress = 0
current_pose = Pose()
iteration = 0
path_output = Path()
path_output.header.frame_id = "my_frame"
velocity = 1.5


def get_yaw(msg):  #radians
    global roll, pitch, yaw
    orientation_q = msg.orientation
    orientation_list = [
        orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w
    ]
    (roll, pitch, yaw) = euler_from_quaternion(orientation_list)
    return yaw


#def find
Example #8
0
def vp_callback(data):
    global T_wc
    global R_wc
    global sj_xyz

    vc_pose = data.data

    cx = -vc_pose[2]
    cy = -vc_pose[0]
    cz = vc_pose[1]
    flag = vc_pose[3]

    if cz < 0:
        print("cz < 0: %f", cz)
        return

    des_x_c = beta * cx + sj_xyz[0]
    des_y_c = beta * cy + sj_xyz[1]
    des_z_c = beta * cz + sj_xyz[2]

    des_wyz_c = np.asarray([des_x_c, des_y_c, des_z_c])
    des_xyz_w = np.matmul(R_wc, des_wyz_c) + T_wc
    sj_xyz_w = np.matmul(R_wc,
                         np.asarray(sj_xyz).reshape(3, 1)).reshape(1, 3) + T_wc

    #print(des_xyz_w, sj_xyz_w)
    im_waypoints = Slerp_waypoints(T_wc, des_xyz_w, sj_xyz_w)
    #im_waypoints = [np.asarray([des_x, des_y, des_z]).reshape(1,3)]

    path = Path()
    for i in range(num_wps):
        im_waypoint = im_waypoints[num_wps - i - 1]

        waypoint = PoseStamped()
        waypoint.pose.position.x = im_waypoint[0, 0]
        waypoint.pose.position.y = im_waypoint[0, 1]
        waypoint.pose.position.z = im_waypoint[0, 2]
        #print(im_waypoint)

        yaw = calc_yaw(im_waypoint, sj_xyz_w)

        waypoint.pose.orientation = geometry_msgs.msg.Quaternion(
            *tf_conversions.transformations.quaternion_from_euler(0, 0, yaw))

        #        waypoint.pose.orientation.x = 0
        #        waypoint.pose.orientation.y = 0
        #        waypoint.pose.orientation.z = 0
        #        waypoint.pose.orientation.w = 1

        path.poses.append(waypoint)

    wps = MarkerArray()

    for i in range(len(path.poses)):
        wp = path.poses[i]
        mk = Marker()
        mk.header.frame_id = "world"
        mk.header.stamp = rospy.Time.now()
        mk.type = Marker.SPHERE
        mk.action = Marker.ADD
        mk.pose.orientation.x = 0.0
        mk.pose.orientation.y = 0.0
        mk.pose.orientation.z = 0.0
        mk.pose.orientation.w = 1.0
        mk.scale.x = 0.15
        mk.scale.y = 0.15
        mk.scale.z = 0.15

        mk.ns = "wp"
        mk.color.a = 1.0
        mk.color.r = 1
        mk.color.g = 0
        mk.color.b = 0

        mk.pose.position.x = wp.pose.position.x
        mk.pose.position.y = wp.pose.position.y
        mk.pose.position.z = wp.pose.position.z
        mk.id = i
        wps.markers.append(mk)

    if flag > 0:
        wp_pub.publish(path)
    visual_wp_pub.publish(wps)
Example #9
0
import sys
import copy
import rospy
import actionlib
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
from armf import armtakeoff
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal

msg = Path()
current_position = PoseStamped()

rospy.init_node('points', anonymous=True)


def movebase_client():

    client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
    client.wait_for_server()
    print("Enter coordinates x and y :")
    x = input()
    y = input()

    goal = MoveBaseGoal()
    goal.target_pose.header.frame_id = "map"
    goal.target_pose.header.stamp = rospy.Time.now()
    goal.target_pose.pose.position.x = float(x)
    goal.target_pose.pose.position.y = float(y)
    goal.target_pose.pose.orientation.w = 1.0

    client.send_goal(goal)
Example #10
0
 def __init__(self):
     self.map = OccupancyGrid()
     self.pub_global_path = rospy.Publisher('/robot0/global_path',
                                            Path,
                                            queue_size=1)
     self.path = Path()
Example #11
0
    def goal_cb(self, goal):
        quaternion_world2goal = (goal.pose.orientation.x,
                                 goal.pose.orientation.y,
                                 goal.pose.orientation.z,
                                 goal.pose.orientation.w)
        euler_angle = tf.transformations.euler_from_quaternion(
            quaternion_world2goal, axes='sxyz')
        rospy.loginfo(
            "Goal set to x = {0} [m], y = {1} [m], yaw = {2} [deg])".format(
                str(goal.pose.position.x), str(goal.pose.position.y),
                str(euler_angle[2] / math.pi * 180)))

        # A star algorithm
        if is_a_star:
            sx = 0.0  # [m]
            sy = 0.0  # [m]
            gx = goal.pose.position.x  # [m]
            gy = goal.pose.position.y  # [m]

            grid_size = self.map.info.resolution  # [m]
            offset_x = self.map.info.origin.position.x
            offset_y = self.map.info.origin.position.y
            robot_size = 0.5  # [m]

            ox, oy = [], []

            for i in range(self.map.info.height):
                for j in range(self.map.info.width):
                    if self.map.data[i * self.map.info.width + j] > 0:
                        ox.append(j * grid_size + offset_x)
                        oy.append(i * grid_size + offset_y)

            if show_animation:
                plt.plot(ox, oy, ".k")
                plt.plot(sx, sy, "xr")
                plt.plot(gx, gy, "xb")
                plt.grid(True)
                plt.axis("equal")

            rx, ry = a_star.a_star_planning(sx, sy, gx, gy, ox, oy, grid_size,
                                            robot_size)
            ryaw = []
            for i in range(len(rx) - 1):
                ryaw.append(math.atan2(ry[i] - ry[i + 1], rx[i] - rx[i + 1]))
            ryaw.append(ryaw[-1])

            if show_animation:
                plt.plot(rx, ry, "-r")
                plt.show()

        if is_reeds_sheep:
            start_x = 0.0  # [m]
            start_y = 0.0  # [m]
            start_yaw = math.radians(0.0)  # [rad]

            end_x = goal.pose.position.x  # [m]
            end_y = goal.pose.position.y  # [m]
            end_yaw = euler_angle[2]  # [rad]

            curvature = 1.0
            step_size = 0.1

            rx, ry, ryaw, mode, clen = reeds_shepp.reeds_shepp_path_planning(
                start_x, start_y, start_yaw, end_x, end_y, end_yaw, curvature,
                step_size)

            if show_animation:
                plt.cla()
                plt.plot(rx, ry, label="final course " + str(mode))

                # plotting
                reeds_shepp.plot_arrow(start_x, start_y, start_yaw)
                reeds_shepp.plot_arrow(end_x, end_y, end_yaw)

                plt.legend()
                plt.grid(True)
                plt.xlim((min(start_x, end_x) - 3, max(start_x, end_x) + 3))
                plt.ylim((min(start_y, end_y) - 3, max(start_y, end_y) + 3))
                plt.show()

        for ix, iy, iyaw in zip(rx, ry, ryaw):
            quaternion_path = tf.transformations.quaternion_from_euler(
                0, 0, iyaw, axes='sxyz')
            pose = PoseStamped()
            pose.header.frame_id = "world"
            pose.pose.position.x = float(ix)
            pose.pose.position.y = float(iy)
            pose.pose.position.z = 0.0  # Debug: float(iyaw/math.pi*180)
            pose.pose.orientation.x = float(quaternion_path[0])
            pose.pose.orientation.y = float(quaternion_path[1])
            pose.pose.orientation.z = float(quaternion_path[2])
            pose.pose.orientation.w = float(quaternion_path[3])
            pose.header.seq = self.path.header.seq + 1
            self.path.header.frame_id = "world"
            self.path.header.stamp = rospy.Time.now()
            pose.header.stamp = self.path.header.stamp
            self.path.poses.append(pose)
            self.pub_global_path.publish(self.path)

        self.path = Path()
Example #12
0
import rospy
import tf
import cv2

# numpy and scipy
import numpy as np
import cv2.aruco as aruco
from std_msgs.msg import Empty
from std_msgs.msg import String
from nav_msgs.msg import Odometry, Path
from geometry_msgs.msg import PoseWithCovarianceStamped, PoseStamped
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3

drone_pose = Odometry()

path_drone = Path()

msg_aruco = "Empty"

landing = True

msg = """
Keyboard commands for Autonomous Landing of the Quadcopter
----------------------------------------------------------
TakeOff      - Press 1
Landing      - Press 2
MoveCamera   - Press 3
MoveUp       - Press 4
MoveDown     - Press 5
Auto-Landing - Press 6
----------------------------------------------------------
 def __init__(self):
     self.last_pos = [0, 0, 0]
     self.path = Path()
     self.meas_data = np.array([0, 0, 0], ndmin=2)
     self.lines = []
Example #14
0
    def get_velocity_plan(self, velocity_command):

        # Get the stating motion point for the time that the velocity is desired
        start_point = self._get_start_point(
            velocity_command.target_twist.header.stamp,
            velocity_command.start_position, velocity_command.start_velocity)

        self._last_stamp = velocity_command.target_twist.header.stamp

        p_start = msg_to_np(start_point.motion_point.pose.position)
        v_start = msg_to_np(start_point.motion_point.twist.linear)
        v_desired = msg_to_np(velocity_command.target_twist.twist.linear)
        v_delta = v_desired - v_start


        acceleration = self._TARGET_ACCEL if velocity_command.acceleration is None \
                                          else velocity_command.acceleration

        if acceleration > self._MAX_TARGET_ACCEL:
            acceleration = self._MAX_TARGET_ACCEL
            rospy.logerr('iarc7_motion: linear motion profile generator \
                          requested acceleration is too large. \
                          Requested: {} Limit {}'.format(
                acceleration, self._MAX_TARGET_ACCEL))

        # Assign a direction to the target acceleration
        a_target = acceleration * v_delta / np.linalg.norm(v_delta)

        # Find the time required to accelerate to the desired velocity
        acceleration_time = min(
            np.linalg.norm(v_delta) / acceleration, self._PLAN_DURATION)

        # Use the rest of the profile duration to hold the velocity
        steady_velocity_time = self._PLAN_DURATION - acceleration_time

        # Calculate the number of discrete steps spent in each state
        accel_steps = np.floor(acceleration_time / self._PROFILE_TIMESTEP)
        vel_steps = np.floor(steady_velocity_time / self._PROFILE_TIMESTEP)

        # Initialize the velocities array with the starting velocity
        velocities = []

        # Generate velocities corresponding to the acceleration period
        if accel_steps > 0:
            accel_times = np.linspace(0.0,
                                      accel_steps * self._PROFILE_TIMESTEP,
                                      accel_steps + 1)
            velocities.extend([
                a_target * accel_time + v_start for accel_time in accel_times
            ])

        # Add the velocities for the steady velocity period
        velocities.extend([v_desired for i in range(0, int(vel_steps))])

        velocities = np.array(velocities)

        # Differentiate velocities to get the accelerations
        # This results in an array one element shorter than
        # the velocities array
        accelerations = np.diff(velocities, axis=0) / self._PROFILE_TIMESTEP

        for i in range(0, accelerations.shape[0]):
            if np.linalg.norm(accelerations[i]) > 1.01 * acceleration:
                rospy.logerr(
                    'Linear Motion Profile generator produced an acceleration greater than the maximum allowed'
                )
                rospy.logerr('i {} accel steps {} vel steps {}'.format(
                    i, accel_steps, vel_steps))
                rospy.logerr('accel step times {}'.format(accel_times))
                rospy.logerr('v_d {} v_s {} a_t {}'.format(
                    v_desired, v_start, a_target))
                rospy.logerr('acceleration time {} v time {}'.format(
                    acceleration_time, steady_velocity_time))
                rospy.logerr('acceleration {} velocity {} {}'.format(
                    accelerations[i], velocities[i], velocities[i + 1]))

        # Integrate the velocities to get the position deltas
        # This results in an array the same length as the velocities array
        # But the positions correspond to the velocities one index higher than
        # the given positio index
        positions = (np.cumsum(velocities, axis=0) * self._PROFILE_TIMESTEP +
                     p_start)

        plan = MotionPointStampedArray()
        pose_only_plan = Path()
        pose_only_plan.header.stamp = rospy.Time.now()
        pose_only_plan.header.frame_id = 'map'

        # Fill out the first motion point since it follows different
        # rules than the rest
        motion_point = MotionPointStamped()
        motion_point.header.stamp = start_point.header.stamp
        np_to_msg(accelerations[0], motion_point.motion_point.accel.linear)
        np_to_msg(v_start, motion_point.motion_point.twist.linear)
        np_to_msg(p_start, motion_point.motion_point.pose.position)

        plan.motion_points.append(motion_point)
        pose = PoseStamped()
        pose.header.stamp = motion_point.header.stamp
        pose.pose = motion_point.motion_point.pose
        pose_only_plan.poses.append(pose)

        # Generate the motion profile for all the remaining velocities and accelerations
        for i in range(1, velocities.shape[0] - 1):
            motion_point = MotionPointStamped()
            motion_point.header.stamp = (
                start_point.header.stamp +
                rospy.Duration(i * self._PROFILE_TIMESTEP))
            np_to_msg(accelerations[i], motion_point.motion_point.accel.linear)
            np_to_msg(velocities[i], motion_point.motion_point.twist.linear)
            np_to_msg(positions[i - 1],
                      motion_point.motion_point.pose.position)
            plan.motion_points.append(motion_point)
            pose = PoseStamped()
            pose.header.stamp = motion_point.header.stamp
            pose.pose = motion_point.motion_point.pose
            pose_only_plan.poses.append(pose)

        self._last_motion_plan = plan
        return plan, pose_only_plan
Example #15
0
    def image_callback(self,data):
            self.show = self.convert_image(data)
            self.time_header = data.header

            if self.show is None: return
  
            # image = Image.fromarray(frame)
            step1 = time.time()
            frame_rgb = cv2.cvtColor(self.show, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb,
                                   (darknet.network_width(netMain),
                                    darknet.network_height(netMain)),
                                   interpolation=cv2.INTER_LINEAR)

            darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())

            detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
            #detections is in x y w h in real pixel size already

            #boxs should receive in format similar to  [[584, 268, 160, 316]]

            if detections is None: return
##            image = cvDrawBoxes(detections, frame_resized)
##            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
##            cv2.imshow('Demo', image)
##            cv2.waitKey(1)

            
            detections = np.array(detections)
            detections = detections[ np.where( detections[:,0]==b'person' ) ]
            if detections is None: return
            #print("detections",detections)

            if len(detections)==0: return
            c = np.delete(detections,[0,1],1).squeeze() #remove id and conf, only bbox
            #print("c",c)
            #r =  #resize to coords on original image to compensate for the frame_resized
            if len(c.shape)==0:
                boxs = np.array( [ c.tolist() ] )
            else:   
                boxs = np.array([list(item) for item in c]) #formating
            #print("boxs",boxs)

            self.fps_count += 1
            self.frame_count += 1

##            boxs = xyxy_to_xywh(transformed)#.astype(np.uint8)
##
            
            boxs[:,2] = (boxs[:,2] /yolo_filter_size) * width  #w
            boxs[:,3] = (boxs[:,3] /yolo_filter_size) * height #h

            boxs[:,0] = (boxs[:,0] /yolo_filter_size) * width   - boxs[:,2]/2#x
            boxs[:,1] = (boxs[:,1] /yolo_filter_size) * height  - boxs[:,3]/2#y
            
            print("time for inference =>"+str(time.time()-step1))
            #print(darknet.network_width(netMain),darknet.network_height(netMain)) #608 #608
            # print("box_num",len(boxs))
            features = encoder(self.show,boxs)
            
            # score to 1.0 here).
            detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
            
            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]
            
            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                
                self.posid_array = Path()
                
                bbox = track.to_tlbr()
                try:
                    cv2.rectangle(self.show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
                except ValueError:
                    break
                cv2.putText(self.show, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)


            # 显示实时FPS值
            if (time.time() - self.start_time) > self.fps_interval:
                # 计算这个interval过程中的帧数,若interval为1秒,则为FPS
                self.realtime_fps = self.fps_count / (time.time() - self.start_time)
                self.fps_count = 0  # 帧数清零
                self.start_time = time.time()
            fps_label = 'FPS:{0:.2f}'.format(self.realtime_fps)
            cv2.putText(self.show, fps_label, (width-160, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)


            # 显示目前的运行时长及总帧数
            if self.frame_count == 1:
                self.run_timer = time.time()
            run_time = time.time() - self.run_timer
            time_frame_label = '[Time:{0:.2f} | Frame:{1}]'.format(run_time, self.frame_count)
            cv2.putText(self.show, time_frame_label, (5, height-15), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
            
            self.posid_pub.publish(Int32(data=len(boxes)))
            self.track_pub.publish(self.bridge.cv2_to_imgmsg(self.show, "bgr8"))
    robotPath.header.frame_id = "odom"
    robotPath.header.stamp = rospy.Time.now()
    pose = PoseStamped()
    pose.header = msg.header
    pose.pose = msg.pose.pose
    robotPath.poses.append(pose)


rospy.init_node("speed_controller")

sub = rospy.Subscriber("/odom", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
robot_path_pub = rospy.Publisher("/robot_path", Path, queue_size=10)
r = rospy.Rate(4)
speed = Twist()  #Speed
robotPath = Path()  #Robot path
goal = Point()  #Desired location

while not rospy.is_shutdown():
    #xd = odom.pose.pose.position.x

    goal.x = pathX[cnt]
    goal.y = pathY[cnt]
    print('target x:', pathX[cnt], ' robot x: ', x)
    print('target y:', pathY[cnt], ' robot y: ', y)

    inc_x = goal.x - x
    inc_y = goal.y - y

    #print('y',inc_y)
Example #17
0
    def image_callback(self,data):
            self.show = self.convert_image(data)
            self.time_header = data.header

            if self.show is None: return
  
            # image = Image.fromarray(frame)
            step1 = time.time()
            frame_rgb = cv2.cvtColor(self.show, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb,
                                   (darknet.network_width(netMain),
                                    darknet.network_height(netMain)),
                                   interpolation=cv2.INTER_LINEAR)

            darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())

            detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
            #detections is in x y w h in real pixel size already

            #boxs should receive in format similar to  [[584, 268, 160, 316]]

            if detections is None: return
##            image = cvDrawBoxes(detections, frame_resized)
##            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
##            cv2.imshow('Demo', image)
##            cv2.waitKey(1)

            
            detections = np.array(detections)
            detections = detections[ np.where( detections[:,0]==b'person' ) ]
            if detections is None: return
            #print("detections",detections)

            if len(detections)==0: return
            c = np.delete(detections,[0,1],1).squeeze() #remove id and conf, only bbox
            #print("c",c)
            #r =  #resize to coords on original image to compensate for the frame_resized
            if len(c.shape)==0:
                boxs = np.array( [ c.tolist() ] )
            else:   
                boxs = np.array([list(item) for item in c]) #formating
            #print("boxs",boxs)

            self.fps_count += 1
            self.frame_count += 1

            #boxs = xyxy_to_xywh(boxs)#.astype(np.uint8)
            boxs = xywh_to_xyxy(boxs)
            
            print(boxs)         

            boxs[:,2] = (boxs[:,2] /yolo_filter_size) * width  #w
            boxs[:,3] = (boxs[:,3] /yolo_filter_size) * height #h

            boxs[:,0] = (boxs[:,0] /yolo_filter_size) * width   #- boxs[:,2]/2#x
            boxs[:,1] = (boxs[:,1] /yolo_filter_size) * height  #- boxs[:,3]/2#y
            
            print("time for inference =>"+str(time.time()-step1))
            #print(darknet.network_width(netMain),darknet.network_height(netMain)) #608 #608
            # print("box_num",len(boxs))
            
            for bbox in boxs:

                box_width  = bbox[2] - bbox[0]
                box_height = bbox[3] - bbox[1]

                bbox[0] = bbox[0] - int(box_width/2)
                bbox[1] = bbox[1] - int(box_height/2)
                
                self.posid_array = Path()
            
                try:
                    cv2.rectangle(self.show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
                except ValueError:
                    break
                #cv2.putText(self.show, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)


            # 显示实时FPS值
            if (time.time() - self.start_time) > self.fps_interval:
                # 计算这个interval过程中的帧数,若interval为1秒,则为FPS
                self.realtime_fps = self.fps_count / (time.time() - self.start_time)
                self.fps_count = 0  # 帧数清零
                self.start_time = time.time()
            fps_label = "FPS:"+str(self.realtime_fps)
            cv2.putText(self.show, fps_label, (width-160, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)


            # 显示目前的运行时长及总帧数
            if self.frame_count == 1:
                self.run_timer = time.time()
            run_time = time.time() - self.run_timer
            time_frame_label = "Frame:"+str(self.frame_count)
            self.posid_pub.publish(Int32(data=len(boxs)))
            self.track_pub.publish(self.bridge.cv2_to_imgmsg(self.show, "bgr8"))
    def __init__(self, beam_num, index, num_env):
        self.index = index
        self.num_env = num_env
        node_name = 'Gazebo_Env_' + str(index)
        rospy.init_node(node_name, anonymous=None)

        self.beam_mum = beam_num
        self.laser_cb_num = 0
        self.scan = None

        # used in reset_world
        self.self_speed = [0.0, 0.0]
        self.step_goal = [0., 0.]
        self.step_r_cnt = 0.

        # used in generate goal point
        self.map_size = np.array([8., 8.], dtype=np.float32)  # 20x20m
        self.goal_size = 0.5

        self.robot_value = 10.
        self.goal_value = 0.

        # lidar
        self.lidar_danger = 0.2

        cmd_pose_topic = 'robot_' + str(index) + '/cmd_pose'
        self.cmd_pose = rospy.Publisher(cmd_pose_topic, Pose, queue_size=10)

        # -----------Publisher and Subscriber-------------
        #cmd_vel_topic = 'robot_' + str(index) + '/cmd_vel'
        cmd_vel_topic = '/cmd_vel'
        self.cmd_vel = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)

        #object_state_topic = 'robot_' + str(index) + '/base_pose_ground_truth'

        #object_state_topic = '/amcl_pose'
        object_state_topic = '/odom'
        self.object_state_sub = rospy.Subscriber(object_state_topic, Odometry,
                                                 self.ground_truth_callback)

        #laser_topic = 'robot_' + str(index) + '/base_scan'
        laser_topic = '/scan'
        self.laser_sub = rospy.Subscriber(laser_topic, LaserScan,
                                          self.laser_scan_callback)

        odom_topic = '/odom'
        self.odom_sub = rospy.Subscriber(odom_topic, Odometry,
                                         self.odometry_callback)

        crash_topic = 'robot_' + str(index) + '/is_crashed'
        self.check_crash = rospy.Subscriber(crash_topic, Int8,
                                            self.crash_callback)

        self.sim_clock = rospy.Subscriber('clock', Clock,
                                          self.sim_clock_callback)

        # -----------Gazebo data----------------------------
        goal_topic = 'move_base_simple/goal'
        self.goal_sub = rospy.Subscriber(goal_topic, PoseStamped,
                                         self.goal_callback)

        self.set_state = rospy.ServiceProxy('/gazebo/set_model_state',
                                            SetModelState)
        # -----------rviz----------------------

        ininit_amcl_topic = 'initialpose'
        self.pub_amcl_init = rospy.Publisher(ininit_amcl_topic,
                                             PoseWithCovarianceStamped,
                                             queue_size=10)

        # -----------Service-------------------
        self.reset_stage = rospy.ServiceProxy('reset_positions', Empty)
        self.reset_gazebo = rospy.ServiceProxy('/gazebo/reset_simulation',
                                               Empty)

        self.is_sub_goal = False

        # -----------Look A head --------------

        lah_topic = '/lookAhead_point'
        self.lah_sub = rospy.Subscriber(lah_topic, PoseStamped,
                                        self.lah_callback)

        # -----------test--------------------
        waypoint_pose_topic = '/waypoint_pose'
        self.waypoint_pose = rospy.Publisher(waypoint_pose_topic,
                                             Pose,
                                             queue_size=10)

        self.tf_listener = tf.TransformListener()

        self.PathMsg = Path()

        # # Wait until the first callback
        self.speed = None
        self.state = None
        self.speed_GT = None
        self.state_GT = None
        self.is_crashed = None
        self.is_collision = 0

        self.global_goal_point = [0, 0]
        self.goal_point = [0, 0]

        self.pre_distance = 0
        self.distance = 0
        self.robot_radius = 0.4

        self.way_Path = []
        self.waypoint_gen_flag = 0
        self.way_goal_index = 0

        self.goal_model = Respawn(self.goal_point[0], self.goal_point[1],
                                  'goal')
        self.waypoint_model = Respawn(0, 0, 'waypoint')

        while self.scan is None or self.speed is None or self.state is None\
                or self.speed_GT is None or self.state_GT is None:
            pass

        rospy.sleep(1.)
Example #19
0
#!/usr/bin/env python
from sslib import *
from nav_msgs.msg import Path
from std_msgs.msg import Header 


path = Path()


def vicon_callback(msg):
    path.header = Header()
    path.header.frame_id = "vicon"
    path.header.stamp = rospy.Time.now()
    path.poses.append(msg)
    pub.publish(path)

if __name__ == '__main__':
    rospy.init_node('plot', anonymous=True)

    rospy.Subscriber("/viconXbee_node/mocap/pose", PoseStamped, vicon_callback)
    pub = rospy.Publisher('path_vicon', Path, queue_size=0)

    rospy.spin()

Example #20
0
from nav_msgs.msg import Path
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped, Pose, Vector3, PoseWithCovarianceStamped
from sensor_msgs.msg import Imu

pub = rospy.Publisher('pose_current_2', PoseStamped, queue_size=1)
path_pub = rospy.Publisher('/path', Path, queue_size=1)

path_pub2 = rospy.Publisher('/path2', Path, queue_size=1)
lagging_pub = rospy.Publisher('/lagging_pose', PoseStamped, queue_size=1)
pose_stamped_cov = rospy.Publisher('/waypoints',
                                   PoseWithCovarianceStamped,
                                   queue_size=1)
imu_pub = rospy.Publisher('/imu_data', Imu, queue_size=1)
path = Path()
path2 = Path()
current_pose = Pose()
current_pose2 = Pose()
counter_a = 0  #used for showing location ten spots ago

gps_x = 0
gps_y = 0
gps_z = 0


def gps_location_callback(msg):
    global gps_x
    gps_x = msg.x
    global gps_y
    gps_y = msg.y