Exemple #1
0
def get_train_scene_list(config):
  """Function to get the list of scenes.

  Args:
    config: experiment config.

  Returns:
    scene_path_list: list of scenes.
  """

  if config.dataset.name == "ff_epipolar":
    corrupted_and_test_list = [
        "howardzhou_010_internal_drawing_vase", "howardzhou_059_narcissus",
        "howardzhou_087_yellow_chain_links",
        "howardzhou_089_whilte_bmw_x3_front", "howardzhou_085_sweet_onions",
        "qq18", "qq33", "data2_fernvlsb", "data2_hugetrike", "data2_trexsanta",
        "data3_orchid", "data5_leafscene", "data5_lotr", "data5_redflower"
    ]
    scene_path_list = file_utils.listdir(config.dataset.ff_base_dir)
    scene_path_list = list(set(scene_path_list) - set(corrupted_and_test_list))

  elif config.dataset.name == "dtu":
    with file_utils.open_file(
        os.path.join(config.dataset.dtu_base_dir, "configs", "lists",
                     "dtu_train_all.txt")) as f:
      scene_path_list = [
          line.rstrip().decode("utf-8") for line in f.readlines()
      ]
  elif config.dataset.name == "blender_rot":
    scene_path_list = ["lego"]

  return scene_path_list
    def _load_images(self, imgdir, w, h):
        """Function to load all images.

    Args:
      imgdir: Location of images.
      w: image width.
      h: image height.

    Returns:
      images: Loaded images.
    """
        def imread(fs):
            if fs.endswith("png"):
                with file_utils.open_file(fs) as f:
                    return imageio.imread(f, ignoregamma=True)
            else:
                with file_utils.open_file(fs) as f:
                    return imageio.imread(f)

        def load_single_image(f):
            return cv2.resize(imread(f)[Ellipsis, :3], dsize=(w, h))

        if not file_utils.file_exists(imgdir):
            raise ValueError("Image folder {} doesn't exist.".format(imgdir))

        imgfiles = [
            path.join(imgdir, f) for f in sorted(file_utils.listdir(imgdir))
            if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
        ]

        images = [load_single_image(f) for f in imgfiles]
        images = np.stack(images, axis=-1)
        return images
    def _load_renderings(self, args):
        """Load images and camera information."""

        self.cam_transform = np.array([[1, 0, 0, 0], [0, -1, 0, 0],
                                       [0, 0, -1, 0], [0, 0, 0, 1]])
        #-------------------------------------------
        # Load images.
        #-------------------------------------------
        basedir = path.join(args.dataset.eval_ff_dir, self.scene)
        img0 = [
            os.path.join(basedir, "images", f) for f in sorted(
                file_utils.listdir(os.path.join(basedir, "images")))
            if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
        ][0]
        with file_utils.open_file(img0) as f:
            sh = imageio.imread(f).shape
        if sh[0] / sh[
                1] != args.dataset.eval_ff_image_height / args.dataset.eval_ff_image_width:
            raise ValueError("not expected height width ratio")

        factor = sh[0] / args.dataset.eval_ff_image_height

        sfx = "_4"
        imgdir = os.path.join(basedir, "images" + sfx)
        if not file_utils.file_exists(imgdir):
            imgdir = os.path.join(basedir, "images")
            if not file_utils.file_exists(imgdir):
                raise ValueError("{} does not exist".format(imgdir))

        images = self._load_images(imgdir, args.dataset.eval_ff_image_width,
                                   args.dataset.eval_ff_image_height)

        #-------------------------------------------
        # Load poses and bds.
        #-------------------------------------------
        with file_utils.open_file(path.join(basedir, "poses_bounds.npy"),
                                  "rb") as fp:
            poses_arr = np.load(fp)

        # Get the intrinsic matrix
        with file_utils.open_file(path.join(basedir, "hwf_cxcy.npy"),
                                  "rb") as fp:
            self.intrinsic_arr = np.load(fp)

        # Update the intrinsic matix to accounto for resizing
        self.intrinsic_arr = self.intrinsic_arr * 1. / factor

        # poses_arr contains an array consisting of a 3x4 pose matrices and
        # 2 depth bounds for each image. The pose matrix contain [R t] as the
        # left 3x4 matrix
        # pose_arr has shape (...,14) {3x4 + 2}
        poses = poses_arr[:, :-2].reshape([-1, 3, 4]).transpose([1, 2, 0])
        bds = poses_arr[:, -2:].transpose([1, 0])

        # Convert R matrix from the form [down right back] to [right up back]
        poses = np.concatenate(
            [poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)

        # Transpose such that the first dimension is number of images
        images = np.moveaxis(images, -1, 0)
        poses = np.moveaxis(poses, -1, 0).astype(np.float32)
        bds = np.moveaxis(bds, -1, 0).astype(np.float32)

        if args.dataset.normalize:
            scale = 1. / bds.max()
        else:
            scale = 1. / (bds.min() * .75)

        poses[:, :3, 3] *= scale
        bds *= scale
        poses_copy = poses.copy()
        poses_copy = pose_utils.recenter_poses(poses, None)
        poses = pose_utils.recenter_poses(poses, self.cam_transform)

        # Get the min and max depth of the scene
        self.min_depth = np.array([bds.min()])
        self.max_depth = np.array([bds.max()])
        # Use this to set the near and far plane
        args.model.near = self.min_depth.item()
        args.model.far = self.max_depth.item()

        if self.split == "test":
            self.render_poses = pose_utils.generate_spiral_poses(
                poses_copy, bds, self.cam_transform)

        # Select the split.
        if args.eval.mvsn_style:
            with file_utils.open_file(
                    os.path.join(os.path.dirname(basedir), "pairs.npz")) as f:
                img_ids = np.load(f)
                i_test = img_ids["{}_{}".format(os.path.basename(basedir),
                                                "test")]
                i_train = img_ids["{}_{}".format(os.path.basename(basedir),
                                                 "train")]
        else:
            i_test = np.arange(images.shape[0])[::args.dataset.llffhold]
            i_train = np.array([
                i for i in np.arange(int(images.shape[0])) if i not in i_test
            ])

        if self.split == "train":
            indices = i_train
        else:
            indices = i_test
        images = images[indices]
        poses = poses[indices]

        self.images = images
        self.camtoworlds = poses[:, :3, :4]

        # intrinsic arr has H, W, fx, fy, cx, cy
        self.focal = self.intrinsic_arr[2][0]
        self.h, self.w = images.shape[1:3]
        self.resolution = self.h * self.w

        if args.dataset.render_path and self.split == "test":
            self.n_examples = self.render_poses.shape[0]
        else:
            self.n_examples = images.shape[0]

        _, _, fx, fy, cx, cy = self.intrinsic_arr[:, 0]
        self.intrinsic_matrix = np.array([
            [fx, 0, cx, 0],
            [0, fy, cy, 0],
            [0, 0, 1, 0],
        ]).astype(np.float32)