Ejemplo n.º 1
0
 def __init__(self, R=np.eye(3), t=np.zeros(3)):
     """
     Pose is defined as p_cw (pose of the world wrt camera)
     """
     p = RigidTransform.from_Rt(R, t)
     RigidTransform.__init__(self, xyzw=p.quat.to_xyzw(), tvec=p.tvec)
     self.__cached_inverse = None
Ejemplo n.º 2
0
def tsukuba_load_poses(fn): 
    """ 
    Retrieve poses
    X Y Z R P Y - > X -Y -Z R -P -Y
    
    np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
        p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]

    """ 
    P = np.loadtxt(os.path.expanduser(fn), dtype=np.float64, delimiter=',')
    return [ RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) * \
             RigidTransform.from_rpyxyz(
                 np.deg2rad(p[3]),np.deg2rad(p[4]),np.deg2rad(p[5]),
                 p[0]*.01,p[1]*.01,p[2]*.01, axes='sxyz') * \
             RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) for p in P ]
Ejemplo n.º 3
0
    def __init__(self):
        self._channel_uid = dict()
        self._sensor_pose = dict()

        self.reset_visualization()
        self.publish_sensor_frame('camera',
                                  pose=VisualizationMsgsPub.CAMERA_POSE)
        self.publish_sensor_frame('origin', pose=RigidTransform.identity())
Ejemplo n.º 4
0
 def set_pose(self, left_pose):
     """
     Provide extrinsics to the left camera
     """
     super(StereoCamera, self).set_pose(left_pose)
     self.right.set_pose(
         self.left.oplus(
             RigidTransform.from_rpyxyz(0, 0, 0, self.baseline, 0, 0)))
Ejemplo n.º 5
0
 def from_left_with_baseline(cls, lcamera, baseline):
     """ Left extrinsics is assumed to be identity """
     rcamera = Camera.from_intrinsics_extrinsics(
         lcamera.intrinsics,
         CameraExtrinsic.from_rigid_transform(
             lcamera.oplus(
                 RigidTransform.from_rpyxyz(0, 0, 0, baseline, 0, 0))))
     return cls(lcamera, rcamera, baseline=baseline)
Ejemplo n.º 6
0
 def load_poses(fn):
     """
     poses: image_id tx ty tz qx qy qz qw
     """
     X = (np.fromfile(fn, dtype=np.float64, sep=' ')).reshape(-1, 8)
     abs_poses = map(lambda p: RigidTransform(xyzw=p[4:], tvec=p[1:4]),
                     X)
     rel_poses = [abs_poses[0].inverse() * p for p in abs_poses]
     return rel_poses
Ejemplo n.º 7
0
 def predict(self, im, return_rigid_transform=True):
     self.forward(im)
     predicted_q = self.extract(layer='cls3_fc_wpqr')
     predicted_x = self.extract(layer='cls3_fc_xyz')
     
     predicted_q_norm = predicted_q / np.linalg.norm(predicted_q)
     if return_rigid_transform: 
         return RigidTransform(Quaternion.from_wxyz(predicted_q_norm), tvec=predicted_x)
     return (predicted_q_norm, predicted_x)
Ejemplo n.º 8
0
    def read_pose(gt): 
        cam_dir, cam_up = gt.cam_dir, gt.cam_up
        z = cam_dir / np.linalg.norm(cam_dir)
        x = np.cross(cam_up, z)
        y = np.cross(z, x)

        R = np.vstack([x, y, z]).T
        t = gt.cam_pos / 1000.0
        return RigidTransform.from_Rt(R, t)
Ejemplo n.º 9
0
 def drawParticles2d(self,
                     V,
                     frame_id='origin',
                     name='particles',
                     lname='yaw',
                     c=['b'],
                     lc=['r'],
                     ll=0.05):
     LD = []
     if len(V) < 1:
         print 'drawParticles2d -- no data? skip'
         return
     d, N = V[0].shape
     l = len(V)
     for v in V:
         Dv = np.vstack((v[0:2, :], np.zeros((1, N)))).T
         LD.append(Dv)
     publish_cloud(name, LD, c=c, frame_id=frame_id)
     if d > 2:
         count = -1
         DV = np.zeros((d, l * N))
         DT = np.zeros((d, l * N))
         for v in V:
             tt = np.zeros((d, N))
             for i in range(0, N):
                 t = Pose.from_rigid_transform(
                     0,
                     RigidTransform(xyzw=Quaternion.from_rpy(
                         0, 0, v[2, i]).to_xyzw(),
                                    tvec=[v[0, i], v[1, i], 0]))
                 pert = Pose.from_rigid_transform(
                     0, RigidTransform(tvec=[ll, 0, 0]))
                 tv = (t.oplus(pert)).tvec
                 tt[0:2, i] = tv[0:2]
             count += 1
             DT[0:d, (count * N):((count + 1) * N)] = tt
             Dv = np.vstack((v[0:2, :], np.zeros((1, N))))
             DV[0:d, (count * N):((count + 1) * N)] = Dv
         publish_line_segments(lname,
                               DV.T,
                               DT.T,
                               c=lc[0],
                               frame_id=frame_id)
Ejemplo n.º 10
0
        def load_poses(fn, version):
            """ Retrieve poses for each scene """

            if version == 'v1':
                P = np.loadtxt(os.path.expanduser(fn),
                               usecols=(2, 3, 4, 5, 6, 7, 8),
                               dtype=np.float64)
                return map(
                    lambda p: RigidTransform(Quaternion.from_wxyz(p[:4]), p[4:]
                                             ), P)
            elif version == 'v2':
                P = np.loadtxt(os.path.expanduser(fn), dtype=np.float64)
                return map(
                    lambda p: RigidTransform(Quaternion.from_wxyz(p[:4]), p[4:]
                                             ), P)
            else:
                raise ValueError(
                    '''Version %s not supported. '''
                    '''Check dataset and choose either v1 or v2 scene dataset'''
                    % version)
Ejemplo n.º 11
0
    def decode(self, msg):
        if self.__gazebo_index is None:
            for j, name in enumerate(msg.name):
                if name == 'mobile_base':
                    self.__gazebo_index = j
                    break

        pose = msg.pose[self.__gazebo_index]
        tvec, ori = pose.position, pose.orientation
        return RigidTransform(xyzw=[ori.x, ori.y, ori.z, ori.w],
                              tvec=[tvec.x, tvec.y, tvec.z])
Ejemplo n.º 12
0
    def on_tags_detection_cb(self, tag_array, tf_dict=None):
        if self.gotkf == 0:
            return
        elif self.gotkf == 1:
            self.gotkf -= 1
        else:
            ValueError("Tags and keyframes out of sync")
        #print "DETECTING TAGS"
        detections = tag_array.detections
        if len(detections) == 0:
            return # don't do anything
        tag_ids = [d.id for d in detections]
        frame_ct = tag_array.detections[0].pose.header.frame_id # frame of the tag in the camera ref
        rospy.logerr('Tag detections of {}'.format(tag_ids))
        poses_ct =[RigidTransform(xyzw=[d.pose.pose.orientation.x,
                                         d.pose.pose.orientation.y,
                                         d.pose.pose.orientation.z,
                                         d.pose.pose.orientation.w],
                                   tvec=[d.pose.pose.position.x,
                                         d.pose.pose.position.y,
                                         d.pose.pose.position.z]) for d in detections]
        if not tf_dict:
            if (self.transformer.frameExists('/base_link') and self.transformer.frameExists(frame_ct)):
                t = self.transformer.getLatestCommonTime(frame_ct,'/base_link')
                try:
                    (trans, rot) = self.transformer.lookupTransform('/base_link', frame_ct,  t)
                    pose_bc = RigidTransform(xyzw=rot, tvec=trans) # pose of the camera in the body ref frame
                except:
                    print sys.exc_info()
                    rospy.logerr('could not get camera_rgb to base_link transform')
                    return
        else:
            e = tf_dict[0]#tf_dict[from, to] = (trans, rot) in the form of a RigidTransform
            pose_bc = RigidTransform(xyzw=e.quat, tvec=e.tvec).inverse() #pose of camera in body frame
        poses = [pose_bc*pose_ct for pose_ct in poses_ct] # rigidtransforms for tag_ids, in body frame

        for i in range(len(tag_ids)):
            x = poses[i].t[0]
            y = poses[i].t[1]
            self.neo4j_iface.add_measurement(bearingRangeFactorPose2Point2(tag_ids[i], math.atan2(y,x), math.sqrt(x*x + y*y), bearingRangeNoise))
Ejemplo n.º 13
0
 def on_odom_cb(self, data):
     r = RigidTransform(xyzw=[data.pose.pose.orientation.x,
                              data.pose.pose.orientation.y,
                              data.pose.pose.orientation.z,
                              data.pose.pose.orientation.w],
                        tvec=[data.pose.pose.position.x,
                              data.pose.pose.position.y,
                              data.pose.pose.position.z]) # current odom
     if not self.old_odom:
         self.old_odom = r
         # no odom diff set
     else:
         self.odom_diff = self.old_odom.inverse()*r
Ejemplo n.º 14
0
    def get_noise():
        if len(noise) == 6:
            xyz = np.random.normal(0, noise[:3])
            rpy = np.random.normal(0, noise[3:])
        elif len(noise) == 2:
            xyz = np.random.normal(0, noise[0], size=3) \
                  if noise[0] > 0 else np.zeros(3)
            rpy = np.random.normal(0, noise[1], size=3) \
                  if noise[1] > 0 else np.zeros(3)
        else:
            raise ValueError('Unknown noise length, either 2, or 6')

        return RigidTransform.from_rpyxyz(rpy[0], rpy[1], rpy[2], xyz[0],
                                          xyz[1], xyz[2])
Ejemplo n.º 15
0
        def visualize_ground_truth(self):
            import pybot.externals.lcm.draw_utils as draw_utils

            # Publish ground truth poses, and aligned point clouds
            draw_utils.publish_pose_list('ground_truth_poses', self.poses)

            # carr = get_color_by_label(scene.map_info.labels)
            draw_utils.publish_cloud(
                'aligned_cloud', [obj.points for obj in self.map_info.objects],
                c=[obj.colors for obj in self.map_info.objects])
            draw_utils.publish_pose_list(
                'aligned_poses', [
                    RigidTransform(tvec=obj.center)
                    for obj in self.map_info.objects
                ],
                texts=[str(obj.label) for obj in self.map_info.objects])
Ejemplo n.º 16
0
def draw_laser_frustum(pose, zmin=0.0, zmax=10, fov=np.deg2rad(60)):

    N = 30
    curve = np.vstack([(RigidTransform.from_rpyxyz(0, 0, rad, 0, 0, 0) *
                        np.array([[zmax, 0, 0]]))
                       for rad in np.linspace(-fov / 2, fov / 2, N)])

    curve_w = pose * curve

    faces, edges = [], []
    for cpt1, cpt2 in zip(curve_w[:-1], curve_w[1:]):
        faces.extend([pose.translation, cpt1, cpt2])
        edges.extend([cpt1, cpt2])

    # Connect the last pt in the curve w/ the current pose,
    # then connect the the first pt in the curve w/ the curr. pose
    edges.extend([edges[-1], pose.translation])
    edges.extend([edges[0], pose.translation])

    faces = np.vstack(faces)
    edges = np.vstack(edges)
    return (faces, edges)
Ejemplo n.º 17
0
    def oxts_process_cb(self, fn):
        X = np.fromfile(fn, dtype=np.float64, sep=' ')
        packet = AttrDict({fmt: x
                           for (fmt, x) in zip(self.oxts_formats, X)})

        # compute scale from first lat value
        if self.scale_ is None:
            self.scale_ = np.cos(packet.lat * np.pi / 180.)

        # Use a Mercator projection to get the translation vector
        tx = self.scale_ * packet.lon * np.pi * EARTH_RADIUS / 180.
        ty = self.scale_ * EARTH_RADIUS * \
             np.log(np.tan((90. + packet.lat) * np.pi / 360.))
        tz = packet.alt
        t = np.array([tx, ty, tz])

        # We want the initial position to be the origin, but keep the ENU
        # coordinate system
        rx, ry, rz = packet.roll, packet.pitch, packet.yaw
        Rx = np.float32([1,0,0, 0,
                         np.cos(rx), -np.sin(rx), 0,
                         np.sin(rx), np.cos(rx)]).reshape(3,3)
        Ry = np.float32([np.cos(ry),0,np.sin(ry),
                         0, 1, 0,
                         -np.sin(ry), 0, np.cos(ry)]).reshape(3,3)
        Rz = np.float32([np.cos(rz), -np.sin(rz), 0,
                         np.sin(rz), np.cos(rz), 0,
                         0, 0, 1]).reshape(3,3)
        R = np.dot(Rz, Ry.dot(Rx))
        pose = RigidTransform.from_Rt(R, t)

        if self.p_init_ is None:
            self.p_init_ = pose.inverse()

        # Use the Euler angles to get the rotation matrix
        return AttrDict(packet=packet, pose=self.p_init_ * pose)
Ejemplo n.º 18
0
def kitti_load_poses(fn):
    X = (np.fromfile(fn, dtype=np.float64, sep=' ')).reshape(-1,12)
    return [RigidTransform.from_Rt(p[:3,:3], p[:3,3])
            for p in [x.reshape(3,4) for x in X]]
Ejemplo n.º 19
0
    def on_tags_detection_cb(self, tag_array, tf_dict=None):
        #print "DETECTING TAGS"
        detections = tag_array.detections
        if len(detections) == 0:
            return  # don't do anything
        tag_ids = [d.id for d in detections]
        frame_ct = tag_array.detections[
            0].pose.header.frame_id  # frame of the tag in the camera ref
        rospy.logerr('Tag detections of {}'.format(tag_ids))
        poses_ct = [
            RigidTransform(xyzw=[
                d.pose.pose.orientation.x, d.pose.pose.orientation.y,
                d.pose.pose.orientation.z, d.pose.pose.orientation.w
            ],
                           tvec=[
                               d.pose.pose.position.x, d.pose.pose.position.y,
                               d.pose.pose.position.z
                           ]) for d in detections
        ]
        if not tf_dict:
            if (self.transformer.frameExists('/base_link')
                    and self.transformer.frameExists(frame_ct)):
                t = self.transformer.getLatestCommonTime(
                    frame_ct, '/base_link')
                try:
                    (trans, rot) = self.transformer.lookupTransform(
                        '/base_link', frame_ct, t)
                    pose_bc = RigidTransform(
                        xyzw=rot,
                        tvec=trans)  # pose of the camera in the body ref frame
                except:
                    print sys.exc_info()
                    rospy.logerr(
                        'could not get camera_rgb to base_link transform')
                    return
        else:
            e = tf_dict[
                0]  #tf_dict[from, to] = (trans, rot) in the form of a RigidTransform
            pose_bc = RigidTransform(
                xyzw=e.quat,
                tvec=e.tvec).inverse()  #pose of camera in body frame
        poses = [pose_bc * pose_ct for pose_ct in poses_ct
                 ]  # rigidtransforms for tag_ids, in body frame

        for i in range(len(tag_ids)):
            x = poses[i].t[0]
            y = poses[i].t[1]
            #rospy.logerr('\n\n tag is {}'.format(tag_ids[i]))
            self.session.run(
                "MERGE (l1:NEWDATA:LAND:SESSTURTLE {frtend: {land_info}}) "
                "MERGE (o1:POSE:NEWDATA:SESSTURTLE {frtend: {var_info}}) "
                "MERGE (f:FACTOR:NEWDATA:SESSTURTLE {frtend: {fac_info}}) "  # odom-landmark factor
                "MERGE (o1)-[:DEPENDENCE]->(f) "
                "MERGE (f)-[:DEPENDENCE]->(l1) "
                "RETURN id(o1)",
                {
                    "land_info":
                    json.dumps({
                        "t": "L",
                        "tag_id": tag_ids[i],
                        "userready": 0
                    }),
                    "var_info":
                    json.dumps({
                        "t": "P",
                        "uid": self.idx_,
                        "userready": 0
                    }),
                    "fac_info":
                    json.dumps({
                        "t":
                        "F",
                        "lklh":
                        "BR G 2",
                        "meas":
                        str(math.atan2(y, x)) + " " +
                        str(math.sqrt(x * x + y * y)) + " 1e-3 0 1e-2",
                        "userready":
                        0,
                        "btwn":
                        str(self.idx_) + " " + str(tag_ids[i])
                    })
                })
Ejemplo n.º 20
0
 def odom_decode(data):
     tvec, ori = data.pose.position, data.pose.orientation
     return RigidTransform(xyzw=[ori.x, ori.y, ori.z, ori.w],
                           tvec=[tvec.x, tvec.y, tvec.z])
Ejemplo n.º 21
0
 def __repr__(self):
     return 'CameraExtrinsic =======>\npose = {:}'.format(
         RigidTransform.__repr__(self))
Ejemplo n.º 22
0
    def establish_tfs(self, relations):
        """
        Perform a one-time look up of all the requested
        *static* relations between frames (available via /tf)
        """

        # Init node and tf listener
        rospy.init_node(self.__class__.__name__, disable_signals=True)
        tf_listener = tf.TransformListener()

        # Create tf decoder
        tf_dec = TfDecoderAndPublisher(channel='/tf')

        # Establish tf relations
        print('{} :: Establishing tfs from ROSBag'.format(
            self.__class__.__name__))
        for self.idx, (channel, msg,
                       t) in enumerate(self.log.read_messages(topics='/tf')):
            tf_dec.decode(msg)
            for (from_tf, to_tf) in relations:

                # If the relations have been added already, skip
                if (from_tf, to_tf) in self.relations_map_:
                    continue

                # Check if the frames exist yet
                if not tf_listener.frameExists(
                        from_tf) or not tf_listener.frameExists(to_tf):
                    continue

                # Retrieve the transform with common time
                try:
                    tcommon = tf_listener.getLatestCommonTime(from_tf, to_tf)
                    (trans, rot) = tf_listener.lookupTransform(
                        from_tf, to_tf, tcommon)
                    self.relations_map_[(from_tf,
                                         to_tf)] = RigidTransform(tvec=trans,
                                                                  xyzw=rot)
                    # print('\tSuccessfully received transform: {:} => {:} {:}'
                    #       .format(from_tf, to_tf, self.relations_map_[(from_tf,to_tf)]))
                except (tf.LookupException, tf.ConnectivityException,
                        tf.ExtrapolationException) as e:
                    # print e
                    pass

            # Finish up once we've established all the requested tfs
            if len(self.relations_map_) == len(relations):
                break

        try:
            tfs = [
                self.relations_map_[(from_tf, to_tf)]
                for (from_tf, to_tf) in relations
            ]
            for (from_tf, to_tf) in relations:
                print(
                    '\tSuccessfully received transform:\n\t\t {:} => {:} {:}'.
                    format(from_tf, to_tf,
                           self.relations_map_[(from_tf, to_tf)]))

        except:
            raise RuntimeError('Error concerning tf lookup')
        print('{} :: Established {:} relations\n'.format(
            self.__class__.__name__, len(tfs)))

        return tfs
Ejemplo n.º 23
0
class VisualizationMsgsPub:
    """
    Visualization publisher class
    """
    CAMERA_POSE = RigidTransform.from_rpyxyz(-np.pi / 2,
                                             0,
                                             -np.pi / 2,
                                             0,
                                             0,
                                             2,
                                             axes='sxyz')
    XZ_POSE = RigidTransform.from_rpyxyz(np.pi / 2, 0, 0, 0, 0, 0, axes='sxyz')

    def __init__(self):
        self._channel_uid = dict()
        self._sensor_pose = dict()

        self.reset_visualization()
        self.publish_sensor_frame('camera',
                                  pose=VisualizationMsgsPub.CAMERA_POSE)
        self.publish_sensor_frame('origin', pose=RigidTransform.identity())
        # self.publish_sensor_frame('origin_xz', pose=VisualizationMsgsPub.XZ_POSE)

    def publish(self, channel, data):
        publish(channel, data)

    def list_frames(self):
        return self._sensor_pose

    def channel_uid(self, channel):
        uid = self._channel_uid.setdefault(channel, len(self._channel_uid))
        return uid + 1000

    # def sensor_uid(self, channel):
    #     uid = self._sensor_uid.setdefault(channel, len(self._sensor_uid))
    #     return uid
    def has_sensor_pose(self, channel):
        return channel in self._sensor_pose

    def get_sensor_pose(self, channel):
        return self._sensor_pose[channel].copy()

    def set_sensor_pose(self, channel, pose):
        self._sensor_pose[channel] = pose

    def reset_visualization(self):
        print('{} :: Reseting Visualizations'.format(self.__class__.__name__))
        msg = vs.reset_collections_t()
        self.publish("RESET_COLLECTIONS", serialize(msg))

    def publish_sensor_frame(self, channel, pose=None):
        """
        Publish sensor frame in which the point clouds
        are drawn with reference to. sensor_frame_msg.id is hashed
        by its channel (may be collisions since its right shifted by 32)
        """
        # Sensor frames msg
        msg = vs.obj_collection_t()
        msg.id = self.channel_uid(channel)
        msg.name = 'BOTFRAME_' + channel
        msg.type = vs.obj_collection_t.AXIS3D
        msg.reset = True

        # Send sensor pose
        pose_msg = vs.obj_t()
        roll, pitch, yaw, x, y, z = pose.to_rpyxyz(axes='sxyz')
        pose_msg.id = 0
        pose_msg.x, pose_msg.y, pose_msg.z, \
            pose_msg.roll, pose_msg.pitch, pose_msg.yaw = x, y, z, roll, pitch, yaw

        # Save pose
        self.set_sensor_pose(channel, pose)

        # msg.objs = [pose_msg]
        msg.objs.extend([pose_msg])
        msg.nobjs = len(msg.objs)
        self.publish("OBJ_COLLECTION", serialize(msg))
Ejemplo n.º 24
0
def rt_from_geom_pose(msg):
    xyzw = msg.orientation
    xyz = msg.position
    return RigidTransform(tvec=np.array([xyz.x, xyz.y, xyz.z]),
                          xyzw=[xyzw.x, xyzw.y, xyzw.z, xyzw.w])
Ejemplo n.º 25
0
import time
import numpy as np

from pybot.geometry.rigid_transform import RigidTransform, Pose
import pybot.externals.draw_utils as draw_utils

if __name__ == "__main__":

    print('Reset')
    time.sleep(1)

    print('Create new sensor frame')
    draw_utils.publish_sensor_frame('new_frame', RigidTransform(tvec=[0,0,1]))

    poses = [RigidTransform.from_rpyxyz(np.pi/180 * 10*j, 0, 0, j * 1.0, 0, 0)
             for j in range(10)]
    draw_utils.publish_pose_list('poses', poses, frame_id='origin')
    draw_utils.publish_pose_list('poses_new_frame', poses, frame_id='new_frame')

    print('Create point cloud with colors')
    X = np.random.rand(1000, 3)
    C = np.hstack((np.random.rand(1000, 1), np.zeros((1000,2))))
    draw_utils.publish_cloud('cloud', X, c='b',
                             frame_id='new_frame', reset=True)
    draw_utils.publish_line_segments('lines', X[:-1], X[1:],
                                     c='y', frame_id='new_frame')
    time.sleep(2)

    print('Overwriting point cloud')
    X = np.random.rand(1000, 3) + np.float32([5, 0, 0])
    C = np.hstack((np.random.rand(1000, 1), np.zeros((1000,2))))
Ejemplo n.º 26
0
def kitti_load_poses(fn): 
    X = (np.fromfile(fn, dtype=np.float64, sep=' ')).reshape(-1,12)
    return map(lambda p: RigidTransform.from_Rt(p[:3,:3], p[:3,3]), 
                map(lambda x: x.reshape(3,4), X))