예제 #1
0
def get_tfbuf(bag, duration=8000000):
    """ given a bag, load all the transforms into a tf buffer. """
    tfbuf = tf2_ros.BufferCore(rospy.Duration(duration))
    for topic, msg, ts in bag.read_messages('/tf'):
        for tfi in msg.transforms:
            tfbuf.set_transform(copy.deepcopy(tfi), 'default')
    return tfbuf
예제 #2
0
 def __init__(self, continuous=False, tag_id=None, parent_frame_name=None):
     """
     :param continuous: <bool>
         * If True, it will keep updating the position of the cameras with respect to the detected tag.
         * If False, the reading is only done once, and it keeps broadcasting the same tf.
     :param tag_id: <int> id of the tag used to calibrate
     :param parent_frame_name: <str> Name of the frame to set as parent for the detected tags. If None, it is the same as calibration_frame
     """
     self.tag_id = tag_id
     self.continuous = continuous
     self.calibration_frame_name = 'calibration_frame'
     if parent_frame_name is None:
         self.broadcast_parent_frame_name = self.calibration_frame_name
     else:
         self.broadcast_parent_frame_name = parent_frame_name
     self.lock = threading.Lock()
     rospy.init_node('camera_tf_fusion')
     self.tf_buffer = tf.BufferCore() # provides coordinate transforms between any two frames in a system
     self.transformer = tf.TransformListener(self.tf_buffer)
     self.tf_broadcaster = tf.TransformBroadcaster()
     self.detections = {} # stores the detections -- (shared result)
     self.tfs = {}
     self.subscribers = self._get_subscribers()
     self.broadcast_tfs()
     rospy.spin()
예제 #3
0
def get_tfbuf_bagfiles(bag_fnames, duration=8000000):
    tfbuf = tf2_ros.BufferCore(rospy.Duration(duration))
    for bag_fname in bag_fnames:
        with rosbag.Bag(bag_fname, 'r') as bag:
            for topic, msg, ts in bag.read_messages('/tf'):
                for tfi in msg.transforms:
                    tfbuf.set_transform(copy.deepcopy(tfi), 'default')
    return tfbuf
예제 #4
0
def comparePosition(leverup_pose, old_pcb_pose, new_pcb_pose):
    ### Lever Up Pose
    import tf
    global lever_up_effects
    buffer_core = tf2_ros.BufferCore(rospy.Duration(1))
    ts1 = TransformStamped()
    ts1.header.stamp = rospy.Time(0)
    ts1.header.frame_id = 'map'
    ts1.child_frame_id = 'frame1'
    ts1.transform.translation = old_pcb_pose.position

    direction_theta = tf.transformations.euler_from_quaternion([
        leverup_pose.orientation.x, leverup_pose.orientation.y,
        leverup_pose.orientation.z, leverup_pose.orientation.w
    ])[2] / np.pi * 180
    if np.abs(direction_theta + 90) <= 45:
        direction = 0
    elif direction_theta + 90 < -45:
        direction = -np.pi / 2
    elif direction_theta + 90 > 45:
        direction = np.pi / 2
    quaternion = tf.transformations.quaternion_from_euler(0, 0, direction)

    ts1.transform.rotation.x = quaternion[0]  # 0
    ts1.transform.rotation.y = quaternion[1]  # 0
    ts1.transform.rotation.z = quaternion[2]  # 0
    ts1.transform.rotation.w = quaternion[3]  # 1.0

    buffer_core.set_transform(ts1, "default_authority")
    rospack = rospkg.RosPack()

    import tf
    direction_theta = tf.transformations.euler_from_quaternion([
        leverup_pose.orientation.x, leverup_pose.orientation.y,
        leverup_pose.orientation.z, leverup_pose.orientation.w
    ])[2] / np.pi * 180

    pcb_bounding_values = rospy.get_param('pcb_bounding_values')
    pcb_size_x = pcb_bounding_values[0]  ## ROSPARAM
    pcb_size_y = pcb_bounding_values[1]  ## ROSPARAM

    selection_criterias = ('no-wall',
                           max(0, min(9, int((pcb_size_y - 0.03) / 0.005))))

    if np.abs(direction_theta + 90) > 45:
        selection_criterias = ('wall',
                               max(0, min(6, int(
                                   (pcb_size_x - 0.05) / 0.005))))
    effect = lever_up_effects[selection_criterias]  ### Trajectory

    ts2 = TransformStamped()
    ts2.header.stamp = rospy.Time(0)
    ts2.header.frame_id = 'frame1'
    ts2.child_frame_id = 'frame2'

    ts2.transform.translation.y = (effect[-1, 0] - effect[0, 0])
    ts2.transform.translation.x = (effect[-1, 1] - effect[0, 1])
    ts2.transform.rotation.w = 1.0
    buffer_core.set_transform(ts2, "default_authority")

    transformation = buffer_core.lookup_transform_core('map', 'frame2',
                                                       rospy.Time(0))
    return np.linalg.norm([
        new_pcb_pose.position.x - transformation.transform.translation.x,
        new_pcb_pose.position.y - transformation.transform.translation.y
    ])
예제 #5
0
def main(bag_path, out_path):
    """Converts rosbag files into KITTI format files.

    :param bag_path: directory, where bag files are stored
    :param out_path: exported data will be in directory: out_path/kitti
    :return: nothing
    """

    # BufferCore documentation in C++
    # https://docs.ros.org/en/jade/api/tf2/html/classtf2_1_1BufferCore.html#a8f3900f749ab24dd824b14c0e453d1a6
    tf_buffer = tf2_ros.BufferCore(rospy.Duration.from_sec(3600.0))

    clouds_per_bag_filename = 'clouds_per_bag.txt'
    clouds_per_bag_file = open(clouds_per_bag_filename, 'w+')
    clouds_per_bag_file.write(
        'numer of bag, last scanned pointcloud in this bag\n')

    i_cloud = 0
    i_image = 0
    i_bag = 0
    robot_name = 'X1'
    camera_frames = utils.get_camera_frames()
    cam_info_0_saved = False
    cam_info_1_saved = False
    cam_info_2_saved = False
    cam_info_3_saved = False
    p0 = None

    # go through bag files in given directory one-by-one
    for bag_file in os.listdir(bag_path):
        bag_file = bag_path + bag_file

        logging.info("processing file: " + bag_file)

        with rosbag.Bag(bag_file, 'r') as bag:
            info = bag.get_type_and_topic_info()

            # Get tf_buffer
            for topic, msg, t in bag:
                # Use rosbag info to determine msg type as
                # type(msg) gives a rosbag-specific helper type.
                dtype = info[1][topic][0]

                if dtype == 'tf2_msgs/TFMessage':
                    logging.info('Got transform at %s' % topic)
                    msg = TFMessage(*slots(msg))
                    for tf in msg.transforms:
                        # Robot pose has to be stored as tf not tf_static
                        if tf.child_frame_id == robot_name:
                            robot_parent_name = tf.header.frame_id
                            tf_buffer.set_transform(tf, 'default_authority')
                            # logging.info('%s -> %s set' % (tf.header.frame_id, tf.child_frame_id))
                        elif topic == '/tf':
                            tf_buffer.set_transform(tf, 'default_authority')
                            # logging.info('%s -> %s set' % (tf.header.frame_id, tf.child_frame_id))
                        elif topic == '/tf_static':
                            tf_buffer.set_transform_static(
                                tf, 'default_authority')
                            # logging.info('static %s -> %s set' % (tf.header.frame_id, tf.child_frame_id))
                elif dtype == 'sensor_msgs/CameraInfo':
                    msg = CameraInfo(*slots(msg))
                    if msg.header.frame_id == camera_frames[
                            0] and not cam_info_0_saved:
                        p0 = msg.P
                        R0 = msg.R
                        cam_info_0_saved = True
                    elif (msg.header.frame_id
                          == camera_frames[1]) and not cam_info_1_saved:
                        K1 = msg.K
                        cam_info_1_saved = True
                    elif (msg.header.frame_id
                          == camera_frames[2]) and not cam_info_2_saved:
                        K2 = msg.K
                        cam_info_2_saved = True
                    elif (msg.header.frame_id
                          == camera_frames[3]) and not cam_info_3_saved:
                        K3 = msg.K
                        cam_info_3_saved = True
            calibration_matrices = [K1, K2, K3]

        with rosbag.Bag(bag_file, 'r') as bag:
            info = bag.get_type_and_topic_info()

            # Get Image and PointCloud2 data
            for topic, msg, t in bag:
                dtype = info[1][topic][0]
                if dtype == 'sensor_msgs/PointCloud2':
                    logging.info('Got cloud at %s' % topic)
                    # Create proper ROS msg type for ros_numpy.
                    msg = PointCloud2(*slots(msg))
                    # Convert to structured numpy array.
                    cloud = numpify(msg)
                    # Convert to 3xN array.
                    pts = np.stack([cloud[f] for f in ('x', 'y', 'z')])
                    # Add some default reflectance (assuming it is not provided, otherwise use it).
                    refl = np.zeros((1, cloud.shape[0], cloud.shape[1]),
                                    pts.dtype)
                    pts = np.concatenate((pts, refl))
                    # Save transform
                    transforms = utils.lookup_transforms_to_artifacts(
                        msg, tf_buffer)
                    if len(transforms) != 0:
                        # Save transforms
                        # Save bounding boxes
                        bboxs = utils.artifacts_in_pointcloud(pts, transforms)
                        label_filename = os.path.join(out_path, 'KITTI',
                                                      'label_2',
                                                      '%06i.txt' % i_cloud)
                        utils.save_bbox_data(bboxs, label_filename)
                        # Save calibration file
                        calib_filename = os.path.join(out_path, 'KITTI',
                                                      'calib',
                                                      '%06i.txt' % i_cloud)
                        utils.save_calib_file(calib_filename,
                                              calibration_matrices, p0,
                                              tf_buffer, msg.header.stamp)
                        # Save pointcloud
                        cloud_filename = os.path.join(out_path, 'KITTI',
                                                      'velodyne',
                                                      '%06i.bin' % i_cloud)
                        with open(cloud_filename, 'wb') as file:
                            pts.T.tofile(file)
                        i_cloud += 1
                elif dtype == 'sensor_msgs/Image':
                    logging.info('Got image at %s' % topic)
                    # Create proper ROS msg type for ros_numpy.
                    msg = Image(*slots(msg))
                    if (msg.header.frame_id != "X1/camera_0/camera_0_optical"):
                        continue
                    # Convert to structured numpy array.
                    img = numpify(msg)
                    transforms = utils.lookup_transforms_to_artifacts(
                        msg, tf_buffer)
                    if len(transforms) != 0:
                        # Save image
                        image_filename = os.path.join(out_path, 'KITTI',
                                                      'image_2',
                                                      '%06i.png' % i_image)
                        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                        cv2.imwrite(image_filename, img)
                        i_image += 1

            tf_buffer.clear()
            i_bag += 1
            clouds_per_bag_file.write(bag_file + ", " + str(i_cloud) + '\n')

    clouds_per_bag_file.close()
예제 #6
0
 def __init__(self, parent_frame_name=None):
     self.tf_buffer = tf.BufferCore()
     self.tf_brodacaster = tf.TransformBroadcaster()
     self.subscribers = self._get_subscribers()
     self.pointcloud = {}
예제 #7
0
#!/usr/bin/env python
import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped

buffer_core = tf2_ros.BufferCore(rospy.Duration(10.0))
ts1 = TransformStamped()
ts1.header.stamp = rospy.Time(0)
ts1.header.frame_id = 'map'
ts1.child_frame_id = 'frame1'
ts1.transform.translation.x = 2.71828183
ts1.transform.rotation.w = 1.0
# TODO(lucasw) does the authority matter at all?  Could it be set to anything?
buffer_core.set_transform(ts1, "default_authority")

# print(dir(buffer_core))
# why no lookup_transform(
a = buffer_core.lookup_transform_core('map', 'frame1', rospy.Time(0))
print(a)
# ((2.71828183, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))
b = buffer_core.lookup_transform_core('frame1', 'map', rospy.Time(0))
print(b)
# ((-2.71828183, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))

ts2 = TransformStamped()
ts2.header.stamp = rospy.Time(0)
ts2.header.frame_id = 'frame1'
ts2.child_frame_id = 'frame2'
ts2.transform.translation.x = 0
ts2.transform.translation.y = 0.5
# TODO(lucasw) example rotation using transform3d/transformations.py
예제 #8
0
#!/usr/bin/env python
import rospy

# Because of transformations
import tf_conversions
from tf.transformations import euler_from_quaternion
import tf2_ros
import geometry_msgs.msg
import math

if __name__=='__main__':
    rospy.init_node('Bot_Position',anonymous=True)
    t=tf2_ros.BufferCore(rospy.Duration(1))
    tfBuffer=tf2_ros.Buffer()
    listener=tf2_ros.TransformListener(tfBuffer)

    pospub=rospy.Publisher('Bot_Pos0',geometry_msgs.msg.Vector3, queue_size=1)
    rate=rospy.Rate(10)

    while not rospy.is_shutdown():
        try:
            trans_front=tfBuffer.lookup_transform('camera','front',rospy.Time(),rospy.Duration(1))
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
            rate.sleep()
            continue
        timer=trans_front.header.stamp.secs

        if (rospy.get_time()-timer)<1.5:
            bot_pos=geometry_msgs.msg.Vector3()

            quat=[trans_front.transform.rotation.x,trans_front.transform.rotation.y,trans_front.transform.rotation.z,trans_front.transform.rotation.w]
예제 #9
0
def main(bagfile,
         out='maps/{i:04d}_{tag}.png',
         step=1,
         use_tf=False,
         trueyaml='mapfinal_01.yaml',
         truemap='mapfinal_01.pgm',
         erode_frac_min=0.01,
         erode_frac_max=0.04,
         diff_map_lag=1,
         min_diff_mask=0.02,
         out_shape=[256, 256]):
    if use_tf:
        bag = rosbag.Bag(bagfile)
        buffer = tf2_ros.BufferCore(rospy.Duration(10000))
        for i, (topic, msg, t) in enumerate(bag.read_messages(topics=['/tf'])):
            for tfm in msg.transforms:
                buffer.set_transform(tfm, 'bag')

    bag = rosbag.Bag(bagfile)
    i = 0
    queue = deque(maxlen=diff_map_lag)
    last_sim_odom = None
    for topic, msg, t in bag.read_messages(topics=[
            '/robot0/gmapping_map', '/robot0/sim_odom', '/robot0/odom'
    ]):
        if topic == '/robot0/sim_odom':
            last_sim_odom, last_sim_odom_time = msg, t
            continue
        elif topic == '/robot0/odom':
            last_odom, last_odom_time = msg, t
            continue
        elif last_sim_odom is None:
            continue
        else:
            i = i + 1

        # print("Odom Time diff: {}".format(np.abs((t - last_odom_time).secs)))
        # print("Sim Odom Time diff: {}".format(np.abs((t - last_sim_odom_time).secs)))
        # print("Odom Time diff (nsec): {}".format(np.abs((t - last_odom_time).nsecs) / 1e9))
        # print("Sim Odom Time diff (nsec): {}".format(np.abs((t - last_sim_odom_time).nsecs) / 1e9))

        if use_tf:
            try:
                base_T_simmap = buffer.lookup_transform_core(
                    'sim_map', 'robot0/sim_base', t)
                base_T_realmap = buffer.lookup_transform_core(
                    'robot0/map', 'robot0/base', t)
            except tf2_ros.ExtrapolationException as e:
                print(e)
                continue

        if use_tf:
            base_R_simmap = transform_rotation_np_rotation(
                base_T_simmap.transform.rotation)
            base_R_realmap = transform_rotation_np_rotation(
                base_T_realmap.transform.rotation)
        else:
            base_R_simmap = transform_rotation_np_rotation(
                last_sim_odom.pose.pose.orientation)
            base_R_realmap = transform_rotation_np_rotation(
                last_odom.pose.pose.orientation)

        sim_R_real = base_R_simmap.T.dot(base_R_realmap)

        if use_tf:
            base_t_sim = translation_transform_to_numpy(
                base_T_simmap.transform.translation)
            base_t_real = translation_transform_to_numpy(
                base_T_realmap.transform.translation)
        else:
            base_t_sim = translation_transform_to_numpy(
                last_sim_odom.pose.pose.position)
            base_t_real = translation_transform_to_numpy(
                last_odom.pose.pose.position)
        sim_t_real = base_R_simmap.T.dot(base_t_real - base_t_sim)

        if i % step != 0:
            continue

        npmsg = np.array(msg.data)
        Dx, Dy, resolution = msg.info.width, msg.info.height, msg.info.resolution
        occgrid = npmsg.reshape(Dx, Dy)
        #xindices, yindices = np.mgrid[0:Dy, 0:Dy]
        #xyindx = np.concatenate((xindices[..., np.newaxis], yindices[..., np.newaxis]), axis=-1)
        #sim_xyidx = sim_R_real[:2, :2].dot(xyindx.T.reshape(2, -1)).T.reshape(Dx, Dx, 2) + sim_t_real[:2]

        # Reducing the amount of prediction needed. If we have too much to
        # predict then the inpainting algorithms fail
        mask = occgrid == -1
        if len(queue) > 0:
            # During the test scenarios, old mask represents what is known
            # and the current map represents future map to be predicted. Thus
            # diff_mask = (mask - old_mask) represents the area which is to be
            # predicted and is unknown. However, mask will not be known exactly,
            # so the diff_mask >= (mask - oldmask). We acommplish this by
            # diff_mask = (expand_mask - old_mask). We cannot diminish old_mask
            # because that is already known. We can only be more ambitious and
            # ask inpainting model to predict expanded masks.
            old_mask = queue[0].astype(np.bool)  # [0, 1] # bool
            if old_mask.shape != occgrid.shape:
                LOG.warning(
                    "the occupancygrid shape has changed {} <=> {}".format(
                        old_mask.shape, occgrid.shape))
                queue.append(mask)
                continue
            else:
                erode_frac = erode_frac_min + np.random.rand() * (
                    erode_frac_max - erode_frac_min)
                kernel = np.ones((int(erode_frac * Dx), int(erode_frac * Dy)),
                                 np.uint8)
                expand_mask = cv2.erode(mask.astype(np.uint8), kernel)
                diff_mask = old_mask & (~expand_mask)
        else:
            diff_mask = ~mask
        if np.sum(diff_mask) < min_diff_mask * (Dx * Dy):
            LOG.info("""Diff mask too small. Try increasing
            diff_map_lag={diff_map_lag} or min_diff_mask={min_diff_mask}.
            Diff fraction={diff_fraction}""".format(
                diff_map_lag=diff_map_lag,
                min_diff_mask=min_diff_mask,
                diff_fraction=np.sum(diff_mask) / (Dx * Dy)))
            queue.append(mask)
            continue

        occgrid[mask] = occgrid.max() / 2
        occgrid = occgrid * 255 / occgrid.max()
        occgrid = occgrid.astype(np.uint8)
        sim_occgrid = occgrid.copy()
        sim_occgrid_cropped = shift_image_to_robot_as_center(
            Image.fromarray(sim_occgrid[::-1, :]),
            R=base_R_realmap,
            t=(int(-base_t_real[0] / resolution),
               int(base_t_real[1] / resolution)))

        sim_occgrid_cropped_bgr = cv2.cvtColor(np.asarray(sim_occgrid_cropped),
                                               cv2.COLOR_GRAY2BGR)
        imsize = sim_occgrid_cropped_bgr.shape[:2]
        #cv2.arrowedLine(sim_occgrid_cropped_bgr,
        #                (-10 + imsize[1]//2, imsize[0]//2),
        #                (10+imsize[1]//2, imsize[0]//2),
        #                (255, 0, 0), thickness=3)
        estimated_outfile = out.format(i=i, tag='estimated')
        outdir = os.path.dirname(estimated_outfile)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        LOG.info("Writing file {}".format(estimated_outfile))
        sim_occgrid_cropped_bgr = cv2.resize(sim_occgrid_cropped_bgr,
                                             tuple(out_shape))
        cv2.imwrite(estimated_outfile, sim_occgrid_cropped_bgr)

        diff_mask = (diff_mask * 255).astype(np.uint8)
        mask_cropped = shift_image_to_robot_as_center(
            Image.fromarray(diff_mask[::-1, :]),
            R=base_R_realmap,
            t=(int(-base_t_real[0] / resolution),
               int(base_t_real[1] / resolution)))
        mask_cropped = cv2.cvtColor(np.asarray(mask_cropped),
                                    cv2.COLOR_GRAY2BGR)
        mask_cropped = cv2.resize(mask_cropped, tuple(out_shape))
        cv2.imwrite(out.format(i=i, tag='mask'), mask_cropped)

        true_params = yaml.safe_load(open(trueyaml))
        true_occgrid_img = Image.open(truemap)
        true_occgrid = np.array(true_occgrid_img)
        true_occgrid_img_scaled = true_occgrid_img.resize(
            (int(true_occgrid.shape[1] * true_params['resolution'] /
                 resolution),
             int(true_occgrid.shape[0] * true_params['resolution'] /
                 resolution)))
        true_occgrid_img_sim_sized = np.zeros_like(sim_occgrid)
        true_origin_in_sim_pixels = np.array(
            true_params['origin']) / resolution
        true_map_image = Image.fromarray(true_occgrid_img_sim_sized)
        true_map_image.paste(true_occgrid_img_scaled,
                             box=(int(sim_occgrid.shape[0] / 2 +
                                      true_origin_in_sim_pixels[0]),
                                  int(sim_occgrid.shape[1] / 2 -
                                      true_occgrid_img_scaled.size[1] -
                                      true_origin_in_sim_pixels[1])))

        true_map_image_cropped = shift_image_to_robot_as_center(
            true_map_image,
            R=base_R_realmap,
            t=(int(-base_t_real[0] / resolution),
               int(base_t_real[1] / resolution)))

        #true_map_image = np.asarray(true_map_image) * 0.8 + np.asarray(sim_occgrid) * 0.2
        true_map_image_bgr = cv2.cvtColor(np.asarray(true_map_image_cropped),
                                          cv2.COLOR_GRAY2BGR)
        imsize = true_map_image_bgr.shape[:2]
        #cv2.arrowedLine(true_map_image_bgr,
        #                (-10 + imsize[1]//2, imsize[0]//2),
        #                (10+imsize[1]//2, imsize[0]//2),
        #                (255, 0, 0), thickness=3)

        true_map_image_bgr = cv2.resize(true_map_image_bgr, tuple(out_shape))
        cv2.imwrite(out.format(i=i, tag='true'), true_map_image_bgr)
        #sim_occgrid[
        #    truncate(sim_xyidx[..., 1].astype(np.int32), 0, Dy-1),
        #    truncate(sim_xyidx[..., 0].astype(np.int32), 0, Dx-1)] = occgrid

        #Image.fromarray(sim_occgrid[::-1, :]).save(out.format(i=i, tag='shifted'))

        #Image.fromarray(occgrid[::-1, :]).save(out.format(i=i, tag='map'))
        queue.append(mask)