def set_animation(scene: bpy.types.Scene, fps: int = 24, frame_start: int = 1, frame_end: int = 48, frame_current: int = 1) -> None: scene.render.fps = fps scene.frame_start = frame_start scene.frame_end = frame_end scene.frame_current = frame_current
def init_scene(scene: bpy.types.Scene) -> None: """Initialize the given scene with default values. Arguments: scene {scene} -- current scene """ logger.info("Initializing scene: %s", scene.name) scene.render.engine = 'CYCLES' # switch to path tracing render engine scene.unit_settings.system = 'METRIC' # switch to metric units # --- Render option if bpy.context.preferences.addons[ 'cycles'].preferences.compute_device_type is not None: # CUDA or OpenCL scene.cycles.device = 'GPU' else: # CPU only scene.cycles.device = 'CPU' # images size and aspect ratio scene.render.pixel_aspect_x = 1.0 scene.render.pixel_aspect_y = 1.0 scene.render.resolution_x = 1920 # width scene.render.resolution_y = 1080 # height scene.render.resolution_percentage = 100 # rendering scale scene.render.use_border = False scene.render.use_crop_to_border = False # images metadata scene.render.use_stamp_time = True scene.render.use_stamp_date = True scene.render.use_stamp_render_time = True scene.render.use_stamp_frame = True scene.render.use_stamp_scene = True scene.render.use_stamp_memory = True scene.render.use_stamp_camera = True scene.render.use_stamp_lens = True scene.render.use_stamp_filename = True # image format scene.render.image_settings.color_mode = 'RGB' scene.render.image_settings.file_format = 'JPEG' scene.render.use_file_extension = True scene.render.use_overwrite = True # force overwrite, be careful! scene.render.image_settings.quality = 90 # image compression # post processing scene.render.use_compositing = True scene.render.use_sequencer = False # sampling scene.cycles.progressive = 'BRANCHED_PATH' scene.cycles.seed = 0 scene.cycles.sample_clamp_direct = 0 scene.cycles.sample_clamp_indirect = 0 scene.cycles.light_sampling_threshold = 0.01 scene.cycles.aa_samples = 32 scene.cycles.preview_aa_samples = 4 scene.cycles.sample_all_lights_direct = True scene.cycles.sample_all_lights_indirect = True scene.cycles.diffuse_samples = 3 scene.cycles.glossy_samples = 2 scene.cycles.transmission_samples = 2 scene.cycles.ao_samples = 1 scene.cycles.mesh_light_samples = 2 scene.cycles.subsurface_samples = 2 scene.cycles.volume_samples = 2 scene.cycles.sampling_pattern = 'SOBOL' scene.cycles.use_layer_samples = 'USE' # light paths scene.cycles.transparent_max_bounces = 8 scene.cycles.transparent_min_bounces = 8 scene.cycles.use_transparent_shadows = True scene.cycles.max_bounces = 8 scene.cycles.min_bounces = 3 scene.cycles.diffuse_bounces = 2 scene.cycles.glossy_bounces = 4 scene.cycles.transmission_bounces = 8 scene.cycles.volume_bounces = 2 scene.cycles.caustics_reflective = False scene.cycles.caustics_refractive = False scene.cycles.blur_glossy = 0.00 # performances scene.render.threads_mode = 'AUTO' scene.cycles.debug_bvh_type = 'DYNAMIC_BVH' scene.cycles.preview_start_resolution = 64 scene.cycles.tile_order = 'HILBERT_SPIRAL' scene.render.tile_x = 64 scene.render.tile_y = 64 scene.cycles.use_progressive_refine = False scene.render.use_save_buffers = False scene.render.use_persistent_data = False scene.cycles.debug_use_spatial_splits = False scene.cycles.debug_use_hair_bvh = True scene.cycles.debug_bvh_time_steps = 0 # -- Color Management scene.view_settings.view_transform = "Standard" # -- Animation options scene.frame_start = 1 scene.frame_end = 1 scene.frame_step = 1 scene.frame_current = 1 # -- World options world = scene.world if world is None: world = bpy.data.worlds.new("World") world.use_sky_paper = True scene.world = world # noise reduction world.cycles.sample_as_light = True world.cycles.sample_map_resolution = 2048 world.cycles.samples = 1 world.cycles.max_bounces = 1024 world.cycles.volume_sampling = 'EQUIANGULAR' world.cycles.volume_interpolation = 'LINEAR' world.cycles.homogeneous_volume = False logger.info("Scene initialized")
def evaluate(self, scene: bpy.types.Scene, target_pc_kdtree: KDTree, use_filtered_cloud: bool = True) -> Tuple[Dict, Dict]: """Evaluate the reconstructed 3D model. Run both point cloud evaluation and camera poses evaluation. Arguments: scene {bpy.types.Scene} -- ground truth scene target_pc_kdtree {KDTree} -- target/reference point cloud KDTree use_filtered_cloud {bool} -- if {True} the filtered point cloud is used for evaluation, the whole cloud otherwise Returns: dict -- point cloud evaluation results, see PointCloud.evaluate() dict -- camera poses evaluation results dictionary: 'pos_mean' {float}: mean position difference 'pos_std' {float}: position difference standard deviation 'pos_min' {float}: minimum position difference 'pos_max' {float}: maximum position difference 'lookat_mean' {float}: mean camera lookat orientation difference 'lookat_std' {float}: camera lookat orientation difference standard deviation 'lookat_min' {float}: minimum camera lookat orientation difference 'lookat_max' {float}: maximum camera lookat orientation difference 'rot_mean' {float}: mean camera orientation difference 'rot_std' {float}: camera orientation difference standard deviation 'rot_min' {float}: minimum camera orientation difference 'rot_max' {float}: maximum camera orientation difference 'camera_count' {float}: ground truth cameras count 'reconstructed_camera_count' {float}: reconstructed and evaluated cameras count 'reconstructed_camera_percent' {float}: percentage of reconstructed cameras """ # point cloud evaluation pc_result = self.point_cloud.evaluate(target_pc_kdtree, use_filtered_cloud) # # camera poses evaluation current_frame = scene.frame_current cam_results = [c.evaluate(scene) for c in self.cameras] scene.frame_current = current_frame # FIXME this is awful ¯\_(ツ)_/¯ cam_pos_dists = list(map(itemgetter('position_distance'), cam_results)) cam_lookat_diffs = list( map(itemgetter('lookat_difference_deg'), cam_results)) cam_rot_diffs = list( map(itemgetter('rotation_difference_deg'), cam_results)) # gt_camera_count = (scene.frame_end - scene.frame_start + 1) // scene.frame_step pos_mean = mean(cam_pos_dists) lookat_mean = mean(cam_lookat_diffs) rot_mean = mean(cam_rot_diffs) cam_result = { "pos_mean": pos_mean, "pos_std": stdev(cam_pos_dists, pos_mean) if len(cam_pos_dists) > 1 else 0., "pos_min": min(cam_pos_dists), "pos_max": max(cam_pos_dists), "lookat_mean": lookat_mean, "lookat_std": stdev(cam_lookat_diffs, lookat_mean) if len(cam_lookat_diffs) > 1 else 0., "lookat_min": min(cam_lookat_diffs), "lookat_max": max(cam_lookat_diffs), "rot_mean": rot_mean, "rot_std": stdev(cam_rot_diffs, rot_mean) if len(cam_rot_diffs) > 1 else 0., "rot_min": min(cam_rot_diffs), "rot_max": max(cam_rot_diffs), "camera_count": gt_camera_count, "reconstructed_camera_count": len(self.cameras), "reconstructed_camera_percent": len(self.cameras) / gt_camera_count } # return pc_result, cam_result