예제 #1
0
    def _get_model_points(self, dataset_name):
        """convert to label based keys."""
        if dataset_name in self.model_points:
            return self.model_points[dataset_name]

        dset_meta = MetadataCatalog.get(dataset_name)
        ref_key = dset_meta.ref_key
        data_ref = ref.__dict__[ref_key]
        objs = dset_meta.objs
        cfg = self.cfg

        cur_model_points = {}
        num = np.inf
        for i, obj_name in enumerate(objs):
            obj_id = data_ref.obj2id[obj_name]
            model_path = osp.join(data_ref.model_dir, f"obj_{obj_id:06d}.ply")
            model = inout.load_ply(model_path,
                                   vertex_scale=data_ref.vertex_scale)
            cur_model_points[i] = pts = model["pts"]
            if pts.shape[0] < num:
                num = pts.shape[0]

        num = min(num, cfg.MODEL.CDPN.PNP_NET.NUM_PM_POINTS)
        for i in range(len(cur_model_points)):
            keep_idx = np.arange(num)
            np.random.shuffle(keep_idx)  # random sampling
            cur_model_points[i] = cur_model_points[i][keep_idx, :]

        self.model_points[dataset_name] = cur_model_points
        return self.model_points[dataset_name]
예제 #2
0
def load_meshes_sixd(obj_files, vertex_tmp_store_folder, recalculate_normals=False):

    hashed_file_name = (
        hashlib.md5(("".join(obj_files) + "load_meshes_sixd" + str(recalculate_normals)).encode("utf-8")).hexdigest()
        + ".npy"
    )

    out_file = os.path.join(vertex_tmp_store_folder, hashed_file_name)
    if os.path.exists(out_file):
        return np.load(out_file, allow_pickle=True)
    else:

        attributes = []
        for model_path in tqdm(obj_files):
            model = inout.load_ply(model_path)
            vertices = np.array(model["pts"]).astype(np.float32)
            if recalculate_normals:
                normals = calc_normals(vertices)
            else:
                normals = np.array(model["normals"]).astype(np.float32)
            faces = np.array(model["faces"]).astype(np.uint32)
            if "colors" in model:
                colors = np.array(model["colors"]).astype(np.uint32)
                attributes.append((vertices, normals, colors, faces))
            else:
                attributes.append((vertices, normals, faces))
        os.makedirs(vertex_tmp_store_folder, exist_ok=True)
        np.save(out_file, attributes)
        return attributes
예제 #3
0
    def models(self):
        """Load models into a list."""
        cache_path = osp.join(self.cache_dir,
                              "models_{}.pkl".format("_".join(self.objs)))
        if osp.exists(cache_path) and self.use_cache:
            # dprint("{}: load cached object models from {}".format(self.name, cache_path))
            return mmcv.load(cache_path)

        models = []
        for obj_name in self.objs:
            model = inout.load_ply(
                osp.join(self.models_root,
                         f"obj_{ref.lm_full.obj2id[obj_name]:06d}.ply"),
                vertex_scale=self.scale_to_meter,
            )
            # NOTE: the bbox3d_and_center is not obtained from centered vertices
            # for BOP models, not a big problem since they had been centered
            model["bbox3d_and_center"] = misc.get_bbox3d_and_center(
                model["pts"])

            models.append(model)
        logger.info("cache models to {}".format(cache_path))
        mmcv.mkdir_or_exist(osp.dirname(cache_path))
        mmcv.dump(models, cache_path, protocol=4)
        return models
예제 #4
0
    def _get_extents(self, dataset_name):
        """label based keys."""
        if dataset_name in self.extents:
            return self.extents[dataset_name]

        dset_meta = MetadataCatalog.get(dataset_name)
        try:
            ref_key = dset_meta.ref_key
        except:
            # FIXME: for some reason, in distributed training, this need to be re-registered
            register_datasets([dataset_name])
            dset_meta = MetadataCatalog.get(dataset_name)
            ref_key = dset_meta.ref_key

        data_ref = ref.__dict__[ref_key]
        objs = dset_meta.objs
        cfg = self.cfg

        cur_extents = {}
        for i, obj_name in enumerate(objs):
            obj_id = data_ref.obj2id[obj_name]
            model_path = osp.join(data_ref.model_dir, f"obj_{obj_id:06d}.ply")
            model = inout.load_ply(model_path, vertex_scale=data_ref.vertex_scale)
            pts = model["pts"]
            xmin, xmax = np.amin(pts[:, 0]), np.amax(pts[:, 0])
            ymin, ymax = np.amin(pts[:, 1]), np.amax(pts[:, 1])
            zmin, zmax = np.amin(pts[:, 2]), np.amax(pts[:, 2])
            size_x = xmax - xmin
            size_y = ymax - ymin
            size_z = zmax - zmin
            cur_extents[i] = np.array([size_x, size_y, size_z], dtype="float32")

        self.extents[dataset_name] = cur_extents
        return self.extents[dataset_name]
예제 #5
0
def calc_texture_uv_emb_proj(uv_model_path_or_model,
                             R,
                             T,
                             K,
                             height=480,
                             width=640):
    """calculate uv map emb via projection
        it seems to be better not to use expand
        the models are generated by blender, where texture_u, texture_v are provided
    """
    if isinstance(uv_model_path_or_model, str):
        model = load_ply(uv_model_path_or_model)
    else:
        model = uv_model_path_or_model

    points = model["pts"]
    uv_gb = model["texture_uv"]
    points_2d, z = points_to_2D(points, R, T, K)
    image_points = np.round(points_2d).astype(np.int32)
    # image_points = (points_2d + 0.5).astype(np.int32)
    uv_ProjEmb = np.zeros((height, width, 2)).astype(np.float32)
    depth = np.zeros((height, width, 1)).astype(np.float32)
    for i, (x, y) in enumerate(image_points):
        if x >= width or y >= height or x < 0 or y < 0:
            continue
        if depth[y, x, 0] == 0:
            depth[y, x, 0] = z[i]
            uv_ProjEmb[y, x] = uv_gb[i]
        elif z[i] < depth[y, x, 0]:
            depth[y, x, 0] = z[i]
            uv_ProjEmb[y, x] = uv_gb[i]
        else:
            pass
    # print("ProjEmb: min {} max {}".format(ProjEmb.min(), ProjEmb.max()))
    return uv_ProjEmb
예제 #6
0
def test_draw_3d_bbox():
    cur_dir = os.path.abspath(os.path.dirname(__file__))
    K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899],
                  [0, 0, 1]])  # LM6d
    model_dir = os.path.join(cur_dir, "../../datasets/BOP_DATASETS/lm/models")
    class_name = "ape"
    cls_idx = 1
    model_path = os.path.join(model_dir, "obj_{:06d}.ply".format(cls_idx))
    pts_3d = load_ply(model_path, vertex_scale=0.001)['pts']
    corners_3d = get_3D_corners(pts_3d)
    image_path = os.path.join(
        cur_dir, "../../datasets/BOP_DATASETS/lm/test/000001/rgb/000011.png")
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)

    gt_dict = mmcv.load(
        os.path.join(
            cur_dir,
            '../../datasets/BOP_DATASETS/lm/test/000001/scene_gt.json'))
    R = np.array(gt_dict['11'][0]['cam_R_m2c']).reshape(3, 3)
    t = np.array(gt_dict['11'][0]['cam_t_m2c']) / 1000.0

    corners_2d, _ = points_to_2D(corners_3d, R, t, K)
    # print(pts_2d.shape)
    image_3dbb = draw_projected_box3d(image, corners_2d, thickness=1)
    cv2.imshow("image with 3d bbox", image_3dbb)
    cv2.waitKey()
예제 #7
0
파일: misc.py 프로젝트: hz-ants/GDR-Net
def calc_uv_emb_proj(uv_model_path_or_model, R, T, K, height=480, width=640, expand=False):
    """calculate uv map emb via projection it seems to be better not to use
    expand."""
    if isinstance(uv_model_path_or_model, str):
        model = load_ply(uv_model_path_or_model)
        if expand:
            model = ply_vtx_color_expand(model)
    else:
        model = uv_model_path_or_model
    if expand:
        points = model["pts_expand"]
        uv_gb = model["colors_expand"][:, [1, 2]]
    else:
        points = model["pts"]
        uv_gb = model["colors"][:, [1, 2]]
    points_2d, z = points_to_2D(points, R, T, K)
    image_points = np.round(points_2d).astype(np.int32)
    # image_points = (points_2d + 0.5).astype(np.int32)
    uv_ProjEmb = np.zeros((height, width, 2)).astype(np.float32)
    depth = np.zeros((height, width, 1)).astype(np.float32)
    for i, (x, y) in enumerate(image_points):
        if x >= width or y >= height or x < 0 or y < 0:
            continue
        if depth[y, x, 0] == 0:
            depth[y, x, 0] = z[i]
            uv_ProjEmb[y, x] = uv_gb[i]
        elif z[i] < depth[y, x, 0]:
            depth[y, x, 0] = z[i]
            uv_ProjEmb[y, x] = uv_gb[i]
        else:
            pass
    # print("ProjEmb: min {} max {}".format(ProjEmb.min(), ProjEmb.max()))
    return uv_ProjEmb
예제 #8
0
def main():
    vertex_scale = 0.001
    fps_dict = {}
    for obj_id in tqdm(id2obj):
        print(obj_id)
        model_path = osp.join(model_dir, f"obj_{obj_id:06d}.ply")
        model = inout.load_ply(model_path, vertex_scale=vertex_scale)
        fps_dict[str(obj_id)] = {}
        fps_dict[str(obj_id)]["fps4_and_center"] = get_fps_and_center(
            model["pts"], num_fps=4, init_center=True)
        fps_dict[str(obj_id)]["fps8_and_center"] = get_fps_and_center(
            model["pts"], num_fps=8, init_center=True)
        fps_dict[str(obj_id)]["fps12_and_center"] = get_fps_and_center(
            model["pts"], num_fps=12, init_center=True)
        fps_dict[str(obj_id)]["fps16_and_center"] = get_fps_and_center(
            model["pts"], num_fps=16, init_center=True)
        fps_dict[str(obj_id)]["fps20_and_center"] = get_fps_and_center(
            model["pts"], num_fps=20, init_center=True)
        fps_dict[str(obj_id)]["fps32_and_center"] = get_fps_and_center(
            model["pts"], num_fps=32, init_center=True)
        fps_dict[str(obj_id)]["fps64_and_center"] = get_fps_and_center(
            model["pts"], num_fps=64, init_center=True)
        fps_dict[str(obj_id)]["fps128_and_center"] = get_fps_and_center(
            model["pts"], num_fps=128, init_center=True)
        fps_dict[str(obj_id)]["fps256_and_center"] = get_fps_and_center(
            model["pts"], num_fps=256, init_center=True)

    save_path = osp.join(model_dir, "fps_points.pkl")
    mmcv.dump(fps_dict, save_path)
    print(f"saved to {save_path}")
예제 #9
0
    def __init__(self, cfg, dataset_name, distributed, output_dir, train_objs=None):
        self.cfg = cfg
        self._distributed = distributed
        self._output_dir = output_dir

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        # if test objs are just a subset of train objs
        self.train_objs = train_objs

        self._metadata = MetadataCatalog.get(dataset_name)
        self.data_ref = ref.__dict__[self._metadata.ref_key]
        self.obj_names = self._metadata.objs
        self.obj_ids = [self.data_ref.obj2id[obj_name] for obj_name in self.obj_names]
        # with contextlib.redirect_stdout(io.StringIO()):
        #     self._coco_api = COCO(self._metadata.json_file)
        self.model_paths = [
            osp.join(self.data_ref.model_dir, "obj_{:06d}.ply".format(obj_id)) for obj_id in self.obj_ids
        ]
        self.models_3d = [
            inout.load_ply(model_path, vertex_scale=self.data_ref.vertex_scale) for model_path in self.model_paths
        ]

        # eval cached
        if cfg.VAL.EVAL_CACHED or cfg.VAL.EVAL_PRINT_ONLY:
            eval_cached_results(self.cfg, self._output_dir, obj_ids=self.obj_ids)
예제 #10
0
    def __init__(self,
                 cfg,
                 dataset_name,
                 distributed,
                 output_dir,
                 train_objs=None):
        self.cfg = cfg
        self._distributed = distributed
        self._output_dir = output_dir

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        # if test objs are just a subset of train objs
        self.train_objs = train_objs

        self._metadata = MetadataCatalog.get(dataset_name)
        self.data_ref = ref.__dict__[self._metadata.ref_key]
        self.obj_names = self._metadata.objs
        self.obj_ids = [
            self.data_ref.obj2id[obj_name] for obj_name in self.obj_names
        ]
        # with contextlib.redirect_stdout(io.StringIO()):
        #     self._coco_api = COCO(self._metadata.json_file)
        self.model_paths = [
            osp.join(self.data_ref.model_dir, "obj_{:06d}.ply".format(obj_id))
            for obj_id in self.obj_ids
        ]
        self.models_3d = [
            inout.load_ply(model_path, vertex_scale=self.data_ref.vertex_scale)
            for model_path in self.model_paths
        ]
        if cfg.DEBUG:
            from lib.render_vispy.model3d import load_models
            from lib.render_vispy.renderer import Renderer

            self.ren = Renderer(size=(self.data_ref.width,
                                      self.data_ref.height),
                                cam=self.data_ref.camera_matrix)
            self.ren_models = load_models(
                model_paths=self.data_ref.model_paths,
                scale_to_meter=0.001,
                cache_dir=".cache",
                texture_paths=self.data_ref.texture_paths,
                center=False,
                use_cache=True,
            )

        # eval cached
        if cfg.VAL.EVAL_CACHED or cfg.VAL.EVAL_PRINT_ONLY:
            eval_cached_results(self.cfg,
                                self._output_dir,
                                obj_ids=self.obj_ids)
예제 #11
0
    def __init__(self,
                 cfg,
                 dataset_name,
                 distributed,
                 output_dir,
                 train_objs=None):
        self.cfg = cfg
        self._distributed = distributed
        self._output_dir = output_dir
        mmcv.mkdir_or_exist(output_dir)

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        # if test objs are just a subset of train objs
        self.train_objs = train_objs

        self.dataset_name = dataset_name
        self._metadata = MetadataCatalog.get(dataset_name)
        self.data_ref = ref.__dict__[self._metadata.ref_key]
        self.obj_names = self._metadata.objs
        self.obj_ids = [
            self.data_ref.obj2id[obj_name] for obj_name in self.obj_names
        ]
        # with contextlib.redirect_stdout(io.StringIO()):
        #     self._coco_api = COCO(self._metadata.json_file)
        self.model_paths = [
            osp.join(self.data_ref.model_eval_dir,
                     "obj_{:06d}.ply".format(obj_id))
            for obj_id in self.obj_ids
        ]
        self.diameters = [
            self.data_ref.diameters[self.data_ref.objects.index(obj_name)]
            for obj_name in self.obj_names
        ]
        self.models_3d = [
            inout.load_ply(model_path, vertex_scale=self.data_ref.vertex_scale)
            for model_path in self.model_paths
        ]

        self.eval_precision = cfg.VAL.get("EVAL_PRECISION", False)
        self._logger.info(f"eval precision: {self.eval_precision}")
        # eval cached
        self.use_cache = False
        if cfg.VAL.EVAL_CACHED or cfg.VAL.EVAL_PRINT_ONLY:
            self.use_cache = True
            if self.eval_precision:
                self._eval_predictions_precision()
            else:
                self._eval_predictions()  # recall
            exit(0)
예제 #12
0
def test_qrot_points():
    from lib.pysixd.inout import load_ply

    data_root = osp.normpath(osp.join(cur_dir, "../../datasets/BOP_DATASETS/lm/"))
    models_cad_files = [osp.join(data_root, "models/obj_{:06d}.ply".format(i)) for i in range(1, 15 + 1)]
    obj_id = 0
    points = load_ply(models_cad_files[obj_id])["pts"]
    axis = np.array([1, 2, 0])
    rot = axangle2mat(axis, -pi / 3)
    quat = mat2quat(rot)
    points_q = qrot_points_th(torch.from_numpy(quat), torch.from_numpy(points))
    # N = points.shape[0]
    # points_q = qrot_torch(torch.from_numpy(quat).expand(N, 4), torch.from_numpy(points))
    points_r = rot.dot(points.T).T
    print(np.allclose(points_q.numpy(), points_r))
예제 #13
0
    def __init__(self,
                 cfg,
                 dataset_name,
                 distributed,
                 output_dir,
                 train_objs=None):
        self.cfg = cfg
        self._distributed = distributed
        self._output_dir = output_dir
        mmcv.mkdir_or_exist(output_dir)

        self._cpu_device = torch.device("cpu")
        self._logger = logging.getLogger(__name__)

        # if test objs are just a subset of train objs
        self.train_objs = train_objs

        self.dataset_name = dataset_name
        self._metadata = MetadataCatalog.get(dataset_name)
        self.data_ref = ref.__dict__[self._metadata.ref_key]
        self.obj_names = self._metadata.objs
        self.obj_ids = [
            self.data_ref.obj2id[obj_name] for obj_name in self.obj_names
        ]
        # with contextlib.redirect_stdout(io.StringIO()):
        #     self._coco_api = COCO(self._metadata.json_file)
        self.model_paths = [
            osp.join(self.data_ref.model_eval_dir,
                     "obj_{:06d}.ply".format(obj_id))
            for obj_id in self.obj_ids
        ]
        self.diameters = [
            self.data_ref.diameters[self.data_ref.objects.index(obj_name)]
            for obj_name in self.obj_names
        ]
        self.models_3d = [
            inout.load_ply(model_path, vertex_scale=self.data_ref.vertex_scale)
            for model_path in self.model_paths
        ]

        if cfg.DEBUG:
            from lib.render_vispy.model3d import load_models
            from lib.render_vispy.renderer import Renderer

            self.get_gts()

            self.kpts_3d = [
                misc.get_bbox3d_and_center(m["pts"]) for m in self.models_3d
            ]
            self.kpts_axis_3d = [
                misc.get_axis3d_and_center(m["pts"], scale=0.5)
                for m in self.models_3d
            ]

            self.ren = Renderer(size=(self.data_ref.width,
                                      self.data_ref.height),
                                cam=self.data_ref.camera_matrix)
            self.ren_models = load_models(
                model_paths=self.data_ref.model_paths,
                scale_to_meter=0.001,
                cache_dir=".cache",
                texture_paths=self.data_ref.texture_paths,
                center=False,
                use_cache=True,
            )

        self.eval_precision = cfg.VAL.get("EVAL_PRECISION", False)
        self._logger.info(f"eval precision: {self.eval_precision}")
        # eval cached
        self.use_cache = False
        if cfg.VAL.EVAL_CACHED or cfg.VAL.EVAL_PRINT_ONLY:
            self.use_cache = True
            if self.eval_precision:
                self._eval_predictions_precision()
            else:
                self._eval_predictions()  # recall
            exit(0)
예제 #14
0
    def add_object(self, obj_id, model_path, **kwargs):
        """See base class."""
        # Color of the object model (the original color saved with the object model
        # will be used if None).
        surf_color = None
        if "surf_color" in kwargs:
            surf_color = kwargs["surf_color"]

        # Load the object model.
        model = inout.load_ply(model_path)
        self.models[obj_id] = model

        # Calculate the 3D bounding box of the model (will be used to set the near
        # and far clipping plane).
        bb = misc.calc_3d_bbox(model["pts"][:, 0], model["pts"][:, 1],
                               model["pts"][:, 2])
        self.model_bbox_corners[obj_id] = np.array([
            [bb[0], bb[1], bb[2]],
            [bb[0], bb[1], bb[2] + bb[5]],
            [bb[0], bb[1] + bb[4], bb[2]],
            [bb[0], bb[1] + bb[4], bb[2] + bb[5]],
            [bb[0] + bb[3], bb[1], bb[2]],
            [bb[0] + bb[3], bb[1], bb[2] + bb[5]],
            [bb[0] + bb[3], bb[1] + bb[4], bb[2]],
            [bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]],
        ])

        # Set texture/color of vertices.
        self.model_textures[obj_id] = None

        # Use the specified uniform surface color.
        if surf_color is not None:
            colors = np.tile(
                list(surf_color) + [1.0], [model["pts"].shape[0], 1])

            # Set UV texture coordinates to dummy values.
            texture_uv = np.zeros((model["pts"].shape[0], 2), np.float32)

        # Use the model texture.
        elif "texture_file" in self.models[obj_id].keys():
            model_texture_path = os.path.join(
                os.path.dirname(model_path),
                self.models[obj_id]["texture_file"])
            model_texture = inout.load_im(model_texture_path)

            # Normalize the texture image.
            if model_texture.max() > 1.0:
                model_texture = model_texture.astype(np.float32) / 255.0
            model_texture = np.flipud(model_texture)
            self.model_textures[obj_id] = model_texture

            # UV texture coordinates.
            texture_uv = model["texture_uv"]

            # Set the per-vertex color to dummy values.
            colors = np.zeros((model["pts"].shape[0], 3), np.float32)

        # Use the original model color.
        elif "colors" in model.keys():
            assert model["pts"].shape[0] == model["colors"].shape[0]
            colors = model["colors"]
            if colors.max() > 1.0:
                colors /= 255.0  # Color values are expected in range [0, 1].

            # Set UV texture coordinates to dummy values.
            texture_uv = np.zeros((model["pts"].shape[0], 2), np.float32)

        # Set the model color to gray.
        else:
            colors = np.ones((model["pts"].shape[0], 3), np.float32) * 0.5

            # Set UV texture coordinates to dummy values.
            texture_uv = np.zeros((model["pts"].shape[0], 2), np.float32)

        # Set the vertex data.
        if self.mode == "depth":
            vertices_type = [("a_position", np.float32, 3),
                             ("a_color", np.float32, colors.shape[1])]
            vertices = np.array(list(zip(model["pts"], colors)), vertices_type)
        else:
            if self.shading == "flat":
                vertices_type = [
                    ("a_position", np.float32, 3),
                    ("a_color", np.float32, colors.shape[1]),
                    ("a_texcoord", np.float32, 2),
                ]
                vertices = np.array(
                    list(zip(model["pts"], colors, texture_uv)), vertices_type)
            elif self.shading == "phong":
                vertices_type = [
                    ("a_position", np.float32, 3),
                    ("a_normal", np.float32, 3),
                    ("a_color", np.float32, colors.shape[1]),
                    ("a_texcoord", np.float32, 2),
                ]
                vertices = np.array(
                    list(
                        zip(model["pts"], model["normals"], colors,
                            texture_uv)), vertices_type)
            else:
                raise ValueError("Unknown shading type.")

        # Create vertex and index buffer for the loaded object model.
        self.vertex_buffers[obj_id] = vertices.view(gloo.VertexBuffer)
        self.index_buffers[obj_id] = model["faces"].flatten().astype(
            np.uint32).view(gloo.IndexBuffer)

        # Set shader for the selected shading.
        if self.shading == "flat":
            rgb_fragment_code = _rgb_fragment_flat_code
        elif self.shading == "phong":
            rgb_fragment_code = _rgb_fragment_phong_code
        else:
            raise ValueError("Unknown shading type.")

        # Prepare the RGB OpenGL program.
        rgb_program = gloo.Program(_rgb_vertex_code, rgb_fragment_code)
        rgb_program.bind(self.vertex_buffers[obj_id])
        if self.model_textures[obj_id] is not None:
            rgb_program["u_use_texture"] = int(True)
            rgb_program["u_texture"] = self.model_textures[obj_id]
        else:
            rgb_program["u_use_texture"] = int(False)
            rgb_program["u_texture"] = np.zeros((1, 1, 4), np.float32)
        self.rgb_programs[obj_id] = rgb_program

        # Prepare the depth OpenGL program.
        depth_program = gloo.Program(_depth_vertex_code, _depth_fragment_code)
        depth_program.bind(self.vertex_buffers[obj_id])
        self.depth_programs[obj_id] = depth_program
예제 #15
0
        "add",
        "adi",
        "mssd",
        "mspd",
        "proj",
        "projS",
        "ABSadd",
        "ABSadi",
        "ABSad",
        "AUCadd",
        "AUCadi",
        "AUCad",
    ]:
        misc.log("Loading object models...")
        for obj_id in dp_model["obj_ids"]:
            models[obj_id] = inout.load_ply(dp_model["model_tpath"].format(obj_id=obj_id))

    # Load models info.
    models_info = None
    if p["error_type"] in ["ad", "add", "adi", "vsd", "mssd", "mspd", "cus", "reteS", "reS", "teS", "projS"]:
        models_info = inout.load_json(dp_model["models_info_path"], keys_to_int=True)

    # Get sets of symmetry transformations for the object models.
    models_sym = None
    if p["error_type"] in ["mssd", "mspd", "reteS", "reS", "teS", "projS"]:
        models_sym = {}
        for obj_id in dp_model["obj_ids"]:
            models_sym[obj_id] = misc.get_symmetry_transformations(models_info[obj_id], p["max_sym_disc_step"])

    # Initialize a renderer.
    ren = None