""" Visual tests of modelrenderer """ from deeptracking.data.modelrenderer import ModelRenderer, InitOpenGL from deeptracking.utils.uniform_sphere_sampler import UniformSphereSampler from deeptracking.utils.camera import Camera import cv2 import numpy as np if __name__ == '__main__': camera = Camera.load_from_json("../data/camera.json") window = InitOpenGL(camera.width, camera.height) renderers = [] sampler = UniformSphereSampler(0.5, 0.7) dict = {} id = 0 renderer = ModelRenderer("../data/test.ply", "../../data/shaders", camera, window) renderer.load_ambiant_occlusion_map("../data/test_ao.ply") for sample in sampler: rgb, depth = renderer.render(sample.transpose()) cv2.imshow("rgb", rgb[:, :, ::-1]) cv2.imshow("depth", (depth / np.max(depth) * 255).astype(np.uint8)) cv2.waitKey()
SPHERE_MAX_RADIUS = float(data["sphere_max_radius"]) IMAGE_SIZE = (int(data["image_size"]), int(data["image_size"])) PRELOAD = data["preload"] == "True" SATURATION_THRESHOLD = int(data["saturation_threshold"]) if not os.path.exists(OUTPUT_PATH): os.mkdir(OUTPUT_PATH) real_dataset = Dataset(REAL_PATH) real_dataset.load() camera = Camera.load_from_json(real_dataset.path) real_dataset.camera = camera output_dataset = Dataset(OUTPUT_PATH, frame_class=data["save_type"]) output_dataset.camera = camera window_size = (real_dataset.camera.width, real_dataset.camera.height) window = InitOpenGL(*window_size) model = MODELS[0] vpRender = ModelRenderer(model["model_path"], SHADER_PATH, real_dataset.camera, window, window_size) vpRender.load_ambiant_occlusion_map(model["ambiant_occlusion_model"]) OBJECT_WIDTH = int(model["object_width"]) metadata = {} metadata["translation_range"] = str(TRANSLATION_RANGE) metadata["rotation_range"] = str(ROTATION_RANGE) metadata["image_size"] = str(IMAGE_SIZE[0]) metadata["save_type"] = data["save_type"] metadata["object_width"] = {} for model in MODELS: metadata["object_width"][model["name"]] = str(model["object_width"])
def setup_renderer(self, model_3d_path, model_3d_ao_path, shader_path): window = InitOpenGL(*self.image_size) self.renderer = ModelRenderer(model_3d_path, shader_path, self.camera, window, self.image_size) if model_3d_ao_path is not None: self.renderer.load_ambiant_occlusion_map(model_3d_ao_path)
def setup_renderer(self, model_3d_path, model_3d_ao_path, shader_path): window = InitOpenGL(self.camera.width, self.camera.height) self.renderer = ModelRenderer(model_3d_path, shader_path, self.camera, window) self.renderer.load_ambiant_occlusion_map(model_3d_ao_path)
SPHERE_MIN_RADIUS = float(data["sphere_min_radius"]) SPHERE_MAX_RADIUS = float(data["sphere_max_radius"]) IMAGE_SIZE = (int(data["image_size"]), int(data["image_size"])) PRELOAD = data["preload"] == "True" SATURATION_THRESHOLD = int(data["saturation_threshold"]) if not os.path.exists(OUTPUT_PATH): os.mkdir(OUTPUT_PATH) real_dataset = Dataset(REAL_PATH) real_dataset.load() camera = Camera.load_from_json(real_dataset.path) real_dataset.camera = camera output_dataset = Dataset(OUTPUT_PATH, frame_class=data["save_type"]) output_dataset.camera = camera window = InitOpenGL(real_dataset.camera.width, real_dataset.camera.height) model = MODELS[0] vpRender = ModelRenderer(model["model_path"], SHADER_PATH, real_dataset.camera, window) vpRender.load_ambiant_occlusion_map(model["ambiant_occlusion_model"]) OBJECT_WIDTH = int(model["object_width"]) metadata = {} metadata["translation_range"] = str(TRANSLATION_RANGE) metadata["rotation_range"] = str(ROTATION_RANGE) metadata["image_size"] = str(IMAGE_SIZE[0]) metadata["save_type"] = data["save_type"] metadata["object_width"] = {} for model in MODELS: metadata["object_width"][model["name"]] = str(model["object_width"]) metadata["min_radius"] = str(SPHERE_MIN_RADIUS)