def tsukuba_load_poses(fn): """ Retrieve poses X Y Z R P Y - > X -Y -Z R -P -Y np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]), p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ] """ P = np.loadtxt(os.path.expanduser(fn), dtype=np.float64, delimiter=',') return [ RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) * \ RigidTransform.from_rpyxyz( np.deg2rad(p[3]),np.deg2rad(p[4]),np.deg2rad(p[5]), p[0]*.01,p[1]*.01,p[2]*.01, axes='sxyz') * \ RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) for p in P ]
def set_pose(self, left_pose): """ Provide extrinsics to the left camera """ super(StereoCamera, self).set_pose(left_pose) self.right.set_pose( self.left.oplus( RigidTransform.from_rpyxyz(0, 0, 0, self.baseline, 0, 0)))
def from_left_with_baseline(cls, lcamera, baseline): """ Left extrinsics is assumed to be identity """ rcamera = Camera.from_intrinsics_extrinsics( lcamera.intrinsics, CameraExtrinsic.from_rigid_transform( lcamera.oplus( RigidTransform.from_rpyxyz(0, 0, 0, baseline, 0, 0)))) return cls(lcamera, rcamera, baseline=baseline)
def get_noise(): if len(noise) == 6: xyz = np.random.normal(0, noise[:3]) rpy = np.random.normal(0, noise[3:]) elif len(noise) == 2: xyz = np.random.normal(0, noise[0], size=3) \ if noise[0] > 0 else np.zeros(3) rpy = np.random.normal(0, noise[1], size=3) \ if noise[1] > 0 else np.zeros(3) else: raise ValueError('Unknown noise length, either 2, or 6') return RigidTransform.from_rpyxyz(rpy[0], rpy[1], rpy[2], xyz[0], xyz[1], xyz[2])
def draw_laser_frustum(pose, zmin=0.0, zmax=10, fov=np.deg2rad(60)): N = 30 curve = np.vstack([(RigidTransform.from_rpyxyz(0, 0, rad, 0, 0, 0) * np.array([[zmax, 0, 0]])) for rad in np.linspace(-fov / 2, fov / 2, N)]) curve_w = pose * curve faces, edges = [], [] for cpt1, cpt2 in zip(curve_w[:-1], curve_w[1:]): faces.extend([pose.translation, cpt1, cpt2]) edges.extend([cpt1, cpt2]) # Connect the last pt in the curve w/ the current pose, # then connect the the first pt in the curve w/ the curr. pose edges.extend([edges[-1], pose.translation]) edges.extend([edges[0], pose.translation]) faces = np.vstack(faces) edges = np.vstack(edges) return (faces, edges)
import time import numpy as np from pybot.geometry.rigid_transform import RigidTransform, Pose import pybot.externals.draw_utils as draw_utils if __name__ == "__main__": print('Reset') time.sleep(1) print('Create new sensor frame') draw_utils.publish_sensor_frame('new_frame', RigidTransform(tvec=[0,0,1])) poses = [RigidTransform.from_rpyxyz(np.pi/180 * 10*j, 0, 0, j * 1.0, 0, 0) for j in range(10)] draw_utils.publish_pose_list('poses', poses, frame_id='origin') draw_utils.publish_pose_list('poses_new_frame', poses, frame_id='new_frame') print('Create point cloud with colors') X = np.random.rand(1000, 3) C = np.hstack((np.random.rand(1000, 1), np.zeros((1000,2)))) draw_utils.publish_cloud('cloud', X, c='b', frame_id='new_frame', reset=True) draw_utils.publish_line_segments('lines', X[:-1], X[1:], c='y', frame_id='new_frame') time.sleep(2) print('Overwriting point cloud') X = np.random.rand(1000, 3) + np.float32([5, 0, 0]) C = np.hstack((np.random.rand(1000, 1), np.zeros((1000,2))))
class VisualizationMsgsPub: """ Visualization publisher class """ CAMERA_POSE = RigidTransform.from_rpyxyz(-np.pi / 2, 0, -np.pi / 2, 0, 0, 2, axes='sxyz') XZ_POSE = RigidTransform.from_rpyxyz(np.pi / 2, 0, 0, 0, 0, 0, axes='sxyz') def __init__(self): self._channel_uid = dict() self._sensor_pose = dict() self.reset_visualization() self.publish_sensor_frame('camera', pose=VisualizationMsgsPub.CAMERA_POSE) self.publish_sensor_frame('origin', pose=RigidTransform.identity()) # self.publish_sensor_frame('origin_xz', pose=VisualizationMsgsPub.XZ_POSE) def publish(self, channel, data): publish(channel, data) def list_frames(self): return self._sensor_pose def channel_uid(self, channel): uid = self._channel_uid.setdefault(channel, len(self._channel_uid)) return uid + 1000 # def sensor_uid(self, channel): # uid = self._sensor_uid.setdefault(channel, len(self._sensor_uid)) # return uid def has_sensor_pose(self, channel): return channel in self._sensor_pose def get_sensor_pose(self, channel): return self._sensor_pose[channel].copy() def set_sensor_pose(self, channel, pose): self._sensor_pose[channel] = pose def reset_visualization(self): print('{} :: Reseting Visualizations'.format(self.__class__.__name__)) msg = vs.reset_collections_t() self.publish("RESET_COLLECTIONS", serialize(msg)) def publish_sensor_frame(self, channel, pose=None): """ Publish sensor frame in which the point clouds are drawn with reference to. sensor_frame_msg.id is hashed by its channel (may be collisions since its right shifted by 32) """ # Sensor frames msg msg = vs.obj_collection_t() msg.id = self.channel_uid(channel) msg.name = 'BOTFRAME_' + channel msg.type = vs.obj_collection_t.AXIS3D msg.reset = True # Send sensor pose pose_msg = vs.obj_t() roll, pitch, yaw, x, y, z = pose.to_rpyxyz(axes='sxyz') pose_msg.id = 0 pose_msg.x, pose_msg.y, pose_msg.z, \ pose_msg.roll, pose_msg.pitch, pose_msg.yaw = x, y, z, roll, pitch, yaw # Save pose self.set_sensor_pose(channel, pose) # msg.objs = [pose_msg] msg.objs.extend([pose_msg]) msg.nobjs = len(msg.objs) self.publish("OBJ_COLLECTION", serialize(msg))