Example #1
0
 def _fill_ros_marker_msg_with_pose(self, marker_msg, pose):
     marker_msg.pose.position = ros_utils.point_numpy_to_ros(
         pose.location())
     orientation_quat = math_utils.convert_rpy_to_quat(
         pose.orientation_rpy())
     marker_msg.pose.orientation = ros_utils.quaternion_numpy_to_ros(
         orientation_quat)
 def perform_insert_point_cloud_rpy(self,
                                    location,
                                    orientation_rpy,
                                    point_cloud,
                                    simulate=False):
     orientation_quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     return self.perform_insert_point_cloud_quat(location, orientation_quat,
                                                 point_cloud, simulate)
Example #3
0
 def get_view_direction_world(self):
     """Return camera viewing direction in world frame.
     Assuming x-axis points forward, y-axis left and z-axis up.
     """
     orientation_rpy = self.get_orientation_rpy()
     quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     x_axis = [1, 0, 0]
     view_direction = math_utils.rotate_vector_with_quaternion(quat, x_axis)
     return view_direction
Example #4
0
 def _pose_to_ros_pose_msg(self, pose, time=None):
     pose_msg = PoseStamped()
     pose_msg.header.frame_id = self._world_frame
     if time is None:
         pose_msg.header.stamp = rospy.Time.now()
     pose_msg.pose.position = ros_utils.point_numpy_to_ros(pose.location())
     orientation_quat = math_utils.convert_rpy_to_quat(
         pose.orientation_rpy())
     pose_msg.pose.orientation = ros_utils.quaternion_numpy_to_ros(
         orientation_quat)
     return pose_msg
 def perform_query_subvolume_rpy(self,
                                 center,
                                 orientation_rpy,
                                 level,
                                 size_x,
                                 size_y,
                                 size_z,
                                 axis_mode=0):
     orientation_quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     return self.perform_query_subvolume_quat(center, orientation_quat,
                                              level, size_x, size_y, size_z,
                                              axis_mode)
Example #6
0
 def get_depth_point_cloud_world_rpy(self,
                                     location,
                                     orientation_rpy,
                                     filter=True):
     """Return point cloud (from depth image) in world coordinate frame"""
     points = self.get_depth_point_cloud_rpy(location,
                                             orientation_rpy,
                                             filter=filter)
     world_quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     world_trans = location
     world_rot_mat = transformations.quaternion_matrix(world_quat)[:3, :3]
     points_world = points.dot(world_rot_mat.T) + world_trans
     return points_world
 def perform_raycast_camera_rpy(self,
                                location,
                                orientation_rpy,
                                width,
                                height,
                                focal_length,
                                ignore_unknown_voxels=False,
                                max_range=-1):
     orientation_quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     return self.perform_raycast_camera_quat(location, orientation_quat,
                                             width, height, focal_length,
                                             ignore_unknown_voxels,
                                             max_range)
 def perform_insert_depth_map_rpy(self,
                                  location,
                                  orientation_rpy,
                                  depth_image,
                                  intrinsics,
                                  downsample_to_grid=False,
                                  simulate=False,
                                  max_depth=-1):
     orientation_quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     return self.perform_insert_depth_map_quat(location, orientation_quat,
                                               depth_image, intrinsics,
                                               downsample_to_grid, simulate,
                                               max_depth)
Example #9
0
 def get_ray_direction_image_world(self,
                                   width,
                                   height,
                                   focal_length,
                                   orientation_rpy=None):
     """Return camera ray for given width, height and focal length in world frame.
     Assuming x-axis points forward, y-axis left and z-axis up.
     """
     ray_directions = self.get_ray_direction_image(width, height,
                                                   focal_length)
     # Transform ray directions into world-frame
     if orientation_rpy is None:
         orientation_rpy = self.get_orientation_rpy()
     orientation_quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     rot_mat = transformations.quaternion_matrix(orientation_quat)[:3, :3]
     ray_directions_world = ray_directions.dot(rot_mat.T)
     return ray_directions_world
Example #10
0
    def get_depth_image(self):
        location, orientation_rpy = self._base_engine.get_pose_rpy()
        quat = math_utils.convert_rpy_to_quat(orientation_rpy)
        stereo_offset = np.array([0, -self._stereo_baseline, 0])
        stereo_offset = math_utils.rotate_vector_with_quaternion(
            quat, stereo_offset)
        # print(stereo_offset)
        pose1 = (location, orientation_rpy)
        pose2 = (location + stereo_offset, orientation_rpy)
        self._base_engine.set_pose_rpy(pose1, wait_until_set=True)
        rgb_image1 = self._base_engine.get_rgb_image()
        rgb_image1 = cv2.cvtColor(rgb_image1, cv2.COLOR_RGB2GRAY)
        rgb_image1 = (255 * rgb_image1).astype(np.uint8)
        self._base_engine.set_pose_rpy(pose2, wait_until_set=True)
        rgb_image2 = self._base_engine.get_rgb_image()
        # rgb_image2 = environment.base.get_engine().get_normal_image()
        rgb_image2 = (255 * rgb_image2).astype(np.uint8)
        rgb_image2 = cv2.cvtColor(rgb_image2, cv2.COLOR_RGB2GRAY)

        disp_img = self._compute_disparity_image(rgb_image1, rgb_image2)
        print("Minimum disparity: {}".format(np.min(disp_img)))
        print("Maximum disparity: {}".format(np.max(disp_img)))
        # disp_img[:50, :] = 0.5
        depth_img = self._compute_depth_image(disp_img)

        # disparity_img = self._stereo_matcher.compute(rgb_image1, rgb_image2)
        # # OpenCV disparity image is represented as a fixed point disparity map with 4 fractional bits
        # disparity_img = disparity_img.astype(np.float32) / 16.0
        # print("Minimum disparity: {}".format(np.min(disparity_img)))
        # print("Maximum disparity: {}".format(np.max(disparity_img)))
        # # print("focal_length: {}".format(self._focal_length))
        # depth_img = self._stereo_baseline * self._focal_length / disparity_img
        # depth_img[:, :self._num_disparities] = 0
        # depth_img[disparity_img <= 0] = np.finfo(np.float32).max
        # # depth_img[disparity_img == 0] = 10.0
        self._base_engine.set_pose_rpy(pose1, wait_until_set=True)
        gt_depth_img = self._base_engine.get_depth_image()
        # depth_img[depth_img <= self._min_depth] = -1
        # depth_img[:, :self._num_disparities] = -1\

        # # Overwrite depth for far away pixels (problem with sky and block matching)
        # mask = gt_depth_img > 100
        # depth_img[mask] = gt_depth_img[mask]
        # depth_img[:, :self._num_disparities] = 0

        # depth_img[depth_img > 10] = 10.0
        # gt_depth_img[gt_depth_img > 10] = 10.0
        # error_img = np.abs(depth_img - gt_depth_img)
        # print(np.max(error_img))

        depth_img_copy = depth_img.copy()
        depth_img_copy[depth_img_copy > 10] = 10.0
        gt_depth_img_copy = gt_depth_img.copy()
        gt_depth_img_copy[gt_depth_img_copy > 10] = 10.0
        cv2.imwrite("disp_img_{}.png".format(self._stereo_method),
                    disp_img.astype(np.uint8))
        cv2.imwrite("depth_img_{}.png".format(self._stereo_method),
                    255 * depth_img_copy / 10.)
        cv2.imwrite("gt_depth_img_{}.png".format(self._stereo_method),
                    255 * gt_depth_img_copy / 10.)
        # cv2.imwrite("gt_depth_img.png", 255 * gt_depth_img / 10.)
        # cv2.imwrite("error_img.png", 255 * error_img / 10.)
        cv2.imwrite("rgb_image1_{}.png".format(self._stereo_method),
                    rgb_image1)
        cv2.imwrite("rgb_image2_{}.png".format(self._stereo_method),
                    rgb_image2)
        # from matplotlib import pyplot as plt
        # plt.figure()
        # plt.hist(disparity_img.flatten())
        # plt.figure()
        # plt.hist(depth_img.flatten())
        # plt.show()
        # return None
        # dsize = (64, 64)
        dsize = (self._width, self._height)
        # depth_img = gt_depth_img
        depth_img = cv2.resize(depth_img,
                               dsize=dsize,
                               interpolation=cv2.INTER_NEAREST)
        return depth_img
 def get_vector_from_pose(self, pose):
     location = pose.location()
     quat = math_utils.convert_rpy_to_quat(pose.orientation_rpy())
     pose_vector = np.concatenate([location, quat])
     return pose_vector
 def _get_transform_msg_rpy(self, location, orientation_rpy):
     quat = math_utils.convert_rpy_to_quat(orientation_rpy)
     return self._get_transform_msg_quat(location, quat)