def load_scene(config_path): scene_data = load_scene_data(config_path) scene = NNScene() setup_scene(scene, scene_data) return scene, scene_data
def load(self): self.scene = NNScene() setup_scene(self.scene, self.scene_data, use_mesh=self.use_mesh) if self.perturb_points and self.fastrand is None: print(f'SETTING PERTURB POINTS: {self.perturb_points}') tform = lambda p: self.perturb_points * (p - 0.5) self.fastrand = FastRand( (self.scene_data['pointcloud']['xyz'].shape[0], 2), tform, 10)
def __init__(self, args): with open(args.config) as f: _config = yaml.load(f) # support two types of configs # 1 type - config with scene data # 2 type - config with model checkpoints and path to scene data config if 'scene' in _config: # 1 type self.scene_data = load_scene_data(_config['scene']) net_ckpt = _config.get('net_ckpt') texture_ckpt = _config.get('texture_ckpt') else: self.scene_data = load_scene_data(args.config) net_ckpt = self.scene_data['config'].get('net_ckpt') texture_ckpt = self.scene_data['config'].get('texture_ckpt') self.viewport_size = args.viewport if args.viewport else self.scene_data[ 'config']['viewport_size'] self.viewport_size = fix_viewport_size(self.viewport_size) print('new viewport size ', self.viewport_size) # crop/resize viewport if self.scene_data['intrinsic_matrix'] is not None: K_src = self.scene_data['intrinsic_matrix'] old_size = self.scene_data['config']['viewport_size'] sx = self.viewport_size[0] / old_size[0] sy = self.viewport_size[1] / old_size[1] K_crop = rescale_K(K_src, sx, sy, keep_fov=args.keep_fov) self.scene_data['proj_matrix'] = get_proj_matrix( K_crop, self.viewport_size) elif self.scene_data['proj_matrix'] is not None: new_proj_matrix = crop_proj_matrix( self.scene_data['proj_matrix'], *self.scene_data['config']['viewport_size'], *self.viewport_size) self.scene_data['proj_matrix'] = new_proj_matrix else: raise Exception('no intrinsics are provided') if args.init_view: if args.init_view in self.scene_data['view_matrix']: idx = self.scene_data['camera_labels'].index(args.init_view) init_view = self.scene_data['view_matrix'][idx] elif os.path.exists(args.init_view): init_view = np.loadtxt(args.init_view) else: init_view = self.scene_data['view_matrix'][0] if args.origin_view: top_view = np.eye(4) top_view[2, 3] = 20. init_view = top_view if np.allclose(self.scene_data['model3d_origin'], np.eye(4)): print('Setting origin as mass center') origin = np.eye(4) origin[:3, 3] = -np.percentile( self.scene_data['pointcloud']['xyz'], 90, 0) self.scene_data['model3d_origin'] = origin else: # force identity origin self.scene_data['model3d_origin'] = np.eye(4) self.trackball = Trackball(init_view, self.viewport_size, 1, rotation_mode=args.rmode) args.use_mesh = args.use_mesh or _config.get( 'use_mesh') or args.use_texture # this also creates GL context necessary for setting up shaders self.window = app.Window(width=self.viewport_size[0], height=self.viewport_size[1], visible=True, fullscreen=False) self.window.set_size(*self.viewport_size) if args.checkpoint: assert 'Texture' in args.checkpoint, 'Set path to descriptors checkpoint' ep = re.search('epoch_[0-9]+', args.checkpoint).group().split('_')[-1] net_name = f'UNet_stage_0_epoch_{ep}_net.pth' net_ckpt = os.path.join(*args.checkpoint.split('/')[:-1], net_name) texture_ckpt = args.checkpoint need_neural_render = net_ckpt is not None self.out_buffer_location = 'torch' if need_neural_render else 'opengl' # setup screen image plane self.off_render = OffscreenRender( viewport_size=self.viewport_size, out_buffer_location=self.out_buffer_location, clear_color=args.clear_color) if self.out_buffer_location == 'torch': screen_tex, self.screen_tex_cuda = create_shared_texture( np.zeros((self.viewport_size[1], self.viewport_size[0], 4), np.float32)) else: screen_tex, self.screen_tex_cuda = self.off_render.color_buf, None self.screen_program = get_screen_program(screen_tex) self.scene = NNScene() if need_neural_render: print(f'Net checkpoint: {net_ckpt}') print(f'Texture checkpoint: {texture_ckpt}') self.model = OGL(self.scene, self.scene_data, self.viewport_size, net_ckpt, texture_ckpt, out_buffer_location=self.out_buffer_location, supersampling=args.supersampling, temporal_average=args.temp_avg) else: self.model = None if args.pca: assert texture_ckpt tex = torch.load(texture_ckpt, map_location='cpu')['state_dict']['texture_'] print('PCA...') pca = pca_color(tex) pca = (pca - np.percentile(pca, 10)) / (np.percentile(pca, 90) - np.percentile(pca, 10)) pca = np.clip(pca, 0, 1) self.scene_data['pointcloud']['rgb'] = np.clip(pca, 0, 1) setup_scene(self.scene, self.scene_data, args.use_mesh, args.use_texture) if args.light_position is not None: self.scene.set_light_position(args.light_position) if args.replay_camera: self.camera_trajectory = load_camera_trajectory(args.replay_camera) else: self.camera_trajectory = None self.window.attach(self.screen_program['transform']) self.window.push_handlers(on_init=self.on_init) self.window.push_handlers(on_close=self.on_close) self.window.push_handlers(on_draw=self.on_draw) self.window.push_handlers(on_resize=self.on_resize) self.window.push_handlers(on_key_press=self.on_key_press) self.window.push_handlers(on_mouse_press=self.on_mouse_press) self.window.push_handlers(on_mouse_drag=self.on_mouse_drag) self.window.push_handlers(on_mouse_release=self.on_mouse_release) self.window.push_handlers(on_mouse_scroll=self.on_mouse_scroll) self.mode0 = NNScene.MODE_COLOR self.mode1 = 0 self.point_size = 1 self.point_mode = False self.draw_points = not args.use_mesh self.flat_color = True self.neural_render = need_neural_render self.show_pca = False self.n_frame = 0 self.t_elapsed = 0 self.last_frame = None self.last_view_matrix = None self.last_gt_image = None self.mouse_pressed = False app.stopped = threading.Event() self.args = args