def get_T_cam_world(self, from_frame, to_frame, config_path='./'): """ get transformation from camera frame to world frame""" transform_filename = config_path + "/" + from_frame + '_' + to_frame + '.tf' if os.path.exists(transform_filename): return RigidTransform.load(transform_filename) time = 0 trans = None qat = None self.tl = tf.TransformListener() while not rospy.is_shutdown(): try: time = self.tl.getLatestCommonTime(to_frame, from_frame) (trans, qat) = self.tl.lookupTransform(to_frame, from_frame, time) break except (tf.Exception, tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): print 'try again' continue RT = RigidTransform() #print qat qat_wxyz = [qat[-1], qat[0], qat[1], qat[2]] #rot = RT.rotation_from_quaternion(qat_wxyz) #print trans #print rot #return RigidTransform(rot, trans, from_frame, to_frame) ans = RigidTransform(translation=trans, rotation=qat_wxyz, from_frame=from_frame, to_frame=to_frame) ans.save(transform_filename) return ans
k += 1 import IPython IPython.embed() R_cb_world = best_R_cb_world T_corrected_cb_world = RigidTransform(rotation=R_cb_world, from_frame='world', to_frame='world') R_cb_world = R_cb_world.dot(T_cb_world.rotation) T_cb_world = RigidTransform(rotation=R_cb_world, translation=T_cb_world.translation, from_frame=T_cb_world.from_frame, to_frame=T_cb_world.to_frame) T_camera_world = T_cb_world * reg_result.T_camera_cb T_cb_world.save(config['chessboard_tf']) # vis if config['vis_points']: _, depth_im, _ = sensor.frames() points_world = T_camera_world * ir_intrinsics.deproject(depth_im) true_robot_points_world = PointCloud(np.array([T.translation for T in robot_poses]).T, frame=ir_intrinsics.frame) est_robot_points_world = T_camera_world * PointCloud(np.array(robot_points_camera).T, frame=ir_intrinsics.frame) mean_est_robot_point = np.mean(est_robot_points_world.data, axis=1).reshape(3,1) est_robot_points_world._data = est_robot_points_world._data - mean_est_robot_point + mean_true_robot_point fixed_robot_points_world = T_corrected_cb_world * est_robot_points_world mean_fixed_robot_point = np.mean(fixed_robot_points_world.data, axis=1).reshape(3,1) fixed_robot_points_world._data = fixed_robot_points_world._data - mean_fixed_robot_point + mean_true_robot_point vis3d.figure()
return LA.norm(m_p - base_p) if __name__ == "__main__": """ Run to register camera make sure cheesboard can be seen in image """ logging.getLogger().setLevel(logging.INFO) cfg_filename = 'cfg/tools/register_webcam.yaml' cfg = YamlConfig(cfg_filename) for sensor_frame, sensor_config in cfg['sensors'].iteritems(): if sensor_config['use']: logging.info("Registering {0}".format(sensor_frame)) cid = sensor_config['device_num'] reg = RegWC(cfg, cid) T_mat = reg.register_chessboard() T = RigidTransform(rotation=T_mat[:3, :3], translation=T_mat[:, 3], from_frame='{0}'.format(sensor_frame), to_frame='world') output_dir = os.path.join(cfg['calib_dir'], sensor_frame) if not os.path.exists(output_dir): os.makedirs(output_dir) output_filename = os.path.join( output_dir, '{0}_to_world.tf'.format(sensor_frame)) logging.info("Saving to {0}".format(output_filename)) T.save(output_filename)