def set_animation(scene: bpy.types.Scene, fps: int = 24, frame_start: int = 1, frame_end: int = 48, frame_current: int = 1) -> None: scene.render.fps = fps scene.frame_start = frame_start scene.frame_end = frame_end scene.frame_current = frame_current
def evaluate(self, scene: bpy.types.Scene) -> Dict: """Given a scene evaluate the camera pose w.r.t. the ground truth. Arguments: scene {scene} -- scene, includes the render camera that will be used as ground truth Returns: Dict -- evaluation result dictionary containing: 'position_distance' {float}: position distance (measure unit depends on the scene's unit) 'lookat_difference_rad' {float}: non-oriented angle between lookAt vectors, in radians 'lookat_difference_deg' {float}: non-oriented angle between lookAt vectors, in degrees 'rotation_difference_rad' {float}: angle to align reconstructed camera to gt, in radians 'rotation_difference_deg' {float}: angle to align reconstructed camera to gt, in degrees """ # get ground truth scene.frame_set(self.frame_number) gt_matrix_world = scene.camera.matrix_world gt_pos = gt_matrix_world.to_translation() gt_rotation = gt_matrix_world.to_quaternion() gt_lookat = get_camera_lookat(scene.camera) # # --- position evaluation pos_distance = euclidean_distance(gt_pos, self.position) logger.debug("Camera position distance: %f (GT=%s, recon=%s)", pos_distance, gt_pos, self.position) # # --- look-at evaluation # compute the non-oriented angle between look-at vectors (gt and reconstructed) cos_theta = (gt_lookat @ self.look_at) / (gt_lookat.length * self.look_at.length) if cos_theta > 1.0 and cos_theta < 1.1: # rounding error cos_theta = 1.0 theta_rad = acos(cos_theta) theta_deg = degrees(theta_rad) logger.debug("Camera look-at: %f deg, %f rad. (GT=%s, recon=%s)", theta_deg, theta_rad, gt_lookat, self.look_at) # # --- rotation evaluation # compute rotation angle to align reconstructed camera to gt rot_diff = self.rotation.conjugated() @ gt_rotation #rot_diff = self.rotation.rotation_difference(gt_rotation) rot_diff_rad = rot_diff.angle rot_diff_deg = degrees(rot_diff_rad) if rot_diff_deg > 180.0: # angle in range 0-360, equal to +0-180 or -0-180 rot_diff_deg = 360.0 - rot_diff_deg logger.debug("Camera rotation difference: %f deg (GT=%s, recon=%s)", rot_diff_deg, gt_rotation, self.rotation) # results = { "position_distance": pos_distance, "lookat_difference_rad": theta_rad, "lookat_difference_deg": theta_deg, "rotation_difference_rad": rot_diff_rad, "rotation_difference_deg": rot_diff_deg } return results
def __init__(self, scene: bpy.types.Scene): master_collection = scene.collection self.children = [x.name_full for x in master_collection.children] self.objects = [x.name_full for x in master_collection.objects] if not scene.mixer_uuid: scene.mixer_uuid = str(uuid4()) self.mixer_uuid = scene.mixer_uuid
def camera_detect_nearest_intersection(view_layer: bpy.types.ViewLayer, camera: bpy.types.Camera, scene: bpy.types.Scene) -> Vector: """Detect the nearest intersection point in the camera look-at direction. Arguments: view_layer {bpy.types.ViewLayer} -- view layer camera {bpy.types.Camera} -- camera object scene {bpy.types.Scene} -- render scene Returns: Vector -- point of intersection between camera look-at and scene objects. If no intersection found returns camera location. TODO better return infinite? """ camera_lookat = get_camera_lookat(camera) if bpy.app.version >= BlenderVersion.V2_91: # see https://wiki.blender.org/wiki/Reference/Release_Notes/2.91/Python_API view_layer = view_layer.depsgraph result, location, *_ = scene.ray_cast(view_layer, camera.location, camera_lookat) logger.debug( "Nearest intersection for camera %s (location=%s, look_at=%s): found=%s, position=%s", camera.name, camera.location, camera_lookat, result, location) if result: return location else: return camera.location
def set_render_outlines(scene: bpy.types.Scene, line_thickness: float) -> None: """set up a scene for rendering outlines using freestyle""" scene.use_nodes = True scene.render.use_freestyle = True scene.render.line_thickness = line_thickness scene.view_layers['View Layer'].freestyle_settings.as_render_pass = True blender.add_link(scene, (scene.node_tree.nodes['Render Layers'], 'Freestyle'), (scene.node_tree.nodes['Composite'], 'Image'))
def set_render_settings(scene: bpy.types.Scene): scene.render.resolution_x = 256 scene.render.resolution_y = 256 scene.render.film_transparent = True scene.render.image_settings.file_format = 'PNG' scene.render.image_settings.color_mode = 'RGBA' scene.render.engine = 'CYCLES' scene.cycles.samples = 10 scene.cycles.use_square_samples = False scene.cycles.use_denoising = True try: scene.cycles.denoiser = 'OPENIMAGEDENOISE' except: pass scene.use_nodes = False scene.view_settings.exposure = -3.7 scene.view_settings.view_transform = 'Filmic' scene.view_settings.look = 'Medium High Contrast' scene.world = get_world()
def set_cycles_renderer(scene: bpy.types.Scene, camera_object: bpy.types.Object, num_samples: int, use_denoising: bool = True, use_motion_blur: bool = False, use_transparent_bg: bool = False) -> None: scene.camera = camera_object scene.render.image_settings.file_format = 'PNG' scene.render.engine = 'CYCLES' scene.render.use_motion_blur = use_motion_blur scene.render.film_transparent = use_transparent_bg scene.view_layers[0].cycles.use_denoising = use_denoising scene.cycles.samples = num_samples
def set_cycles_renderer(scene: bpy.types.Scene, camera_object: bpy.types.Object, num_samples: int, use_denoising: bool = True, use_motion_blur: bool = False, use_transparent_bg: bool = False, prefer_cuda_use: bool = True, use_adaptive_sampling: bool = False) -> None: scene.camera = camera_object scene.render.image_settings.file_format = 'PNG' scene.render.engine = 'CYCLES' scene.render.use_motion_blur = use_motion_blur scene.render.film_transparent = use_transparent_bg scene.view_layers[0].cycles.use_denoising = use_denoising scene.cycles.use_adaptive_sampling = use_adaptive_sampling scene.cycles.samples = num_samples # Enable GPU acceleration # Source - https://blender.stackexchange.com/a/196702 if prefer_cuda_use: bpy.context.scene.cycles.device = "GPU" # Change the preference setting bpy.context.preferences.addons[ "cycles"].preferences.compute_device_type = "CUDA" # Call get_devices() to let Blender detects GPU device (if any) bpy.context.preferences.addons["cycles"].preferences.get_devices() # Let Blender use all available devices, include GPU and CPU for d in bpy.context.preferences.addons["cycles"].preferences.devices: d["use"] = 1 # Display the devices to be used for rendering print("----") print("The following devices will be used for path tracing:") for d in bpy.context.preferences.addons["cycles"].preferences.devices: print("- {}".format(d["name"])) print("----")
def build_scene_composition(scene: bpy.types.Scene) -> None: scene.use_nodes = True utils.clean_nodes(scene.node_tree.nodes) render_layer_node = scene.node_tree.nodes.new(type="CompositorNodeRLayers") vignette_node = utils.create_vignette_node(scene.node_tree) vignette_node.inputs["Amount"].default_value = 0.70 lens_distortion_node = scene.node_tree.nodes.new( type="CompositorNodeLensdist") lens_distortion_node.inputs["Distort"].default_value = -0.050 lens_distortion_node.inputs["Dispersion"].default_value = 0.080 color_correction_node = scene.node_tree.nodes.new( type="CompositorNodeColorCorrection") color_correction_node.master_saturation = 1.10 color_correction_node.master_gain = 1.40 glare_node = scene.node_tree.nodes.new(type="CompositorNodeGlare") glare_node.glare_type = 'GHOSTS' glare_node.iterations = 2 glare_node.quality = 'HIGH' composite_node = scene.node_tree.nodes.new(type="CompositorNodeComposite") scene.node_tree.links.new(render_layer_node.outputs['Image'], vignette_node.inputs['Image']) scene.node_tree.links.new(vignette_node.outputs['Image'], lens_distortion_node.inputs['Image']) scene.node_tree.links.new(lens_distortion_node.outputs['Image'], color_correction_node.inputs['Image']) scene.node_tree.links.new(color_correction_node.outputs['Image'], glare_node.inputs['Image']) scene.node_tree.links.new(glare_node.outputs['Image'], composite_node.inputs['Image']) utils.arrange_nodes(scene.node_tree)
def build_scene_composition(scene: bpy.types.Scene, vignette: float = 0.20, dispersion: float = 0.050, gain: float = 1.10, saturation: float = 1.10) -> None: scene.use_nodes = True clean_nodes(scene.node_tree.nodes) render_layer_node = scene.node_tree.nodes.new(type="CompositorNodeRLayers") vignette_node = create_vignette_node(scene.node_tree) vignette_node.inputs["Amount"].default_value = vignette lens_distortion_node = scene.node_tree.nodes.new(type="CompositorNodeLensdist") lens_distortion_node.inputs["Distort"].default_value = -dispersion * 0.40 lens_distortion_node.inputs["Dispersion"].default_value = dispersion color_correction_node = scene.node_tree.nodes.new(type="CompositorNodeColorCorrection") color_correction_node.master_saturation = saturation color_correction_node.master_gain = gain split_tone_node = create_split_tone_node(scene.node_tree) glare_node = scene.node_tree.nodes.new(type="CompositorNodeGlare") glare_node.glare_type = 'FOG_GLOW' glare_node.quality = 'HIGH' composite_node = scene.node_tree.nodes.new(type="CompositorNodeComposite") scene.node_tree.links.new(render_layer_node.outputs['Image'], vignette_node.inputs['Image']) scene.node_tree.links.new(vignette_node.outputs['Image'], lens_distortion_node.inputs['Image']) scene.node_tree.links.new(lens_distortion_node.outputs['Image'], color_correction_node.inputs['Image']) scene.node_tree.links.new(color_correction_node.outputs['Image'], split_tone_node.inputs['Image']) scene.node_tree.links.new(split_tone_node.outputs['Image'], glare_node.inputs['Image']) scene.node_tree.links.new(glare_node.outputs['Image'], composite_node.inputs['Image']) arrange_nodes(scene.node_tree)
def set_cycles_renderer(scene: bpy.types.Scene, resolution_percentage: int, output_file_path: str, camera_object: bpy.types.Object, num_samples: int, use_denoising: bool = True, use_motion_blur: bool = False, use_transparent_bg: bool = False) -> None: scene.camera = camera_object scene.render.image_settings.file_format = 'PNG' scene.render.resolution_percentage = resolution_percentage scene.render.engine = 'CYCLES' scene.render.filepath = output_file_path scene.render.use_motion_blur = use_motion_blur if bpy.app.version >= (2, 80, 0): scene.render.film_transparent = use_transparent_bg scene.view_layers[0].cycles.use_denoising = use_denoising else: scene.cycles.film_transparent = use_transparent_bg scene.render.layers[0].cycles.use_denoising = use_denoising scene.cycles.samples = num_samples
def init_scene(scene: bpy.types.Scene) -> None: """Initialize the given scene with default values. Arguments: scene {scene} -- current scene """ logger.info("Initializing scene: %s", scene.name) scene.render.engine = 'CYCLES' # switch to path tracing render engine scene.unit_settings.system = 'METRIC' # switch to metric units # --- Render option if bpy.context.preferences.addons[ 'cycles'].preferences.compute_device_type is not None: # CUDA or OpenCL scene.cycles.device = 'GPU' else: # CPU only scene.cycles.device = 'CPU' # images size and aspect ratio scene.render.pixel_aspect_x = 1.0 scene.render.pixel_aspect_y = 1.0 scene.render.resolution_x = 1920 # width scene.render.resolution_y = 1080 # height scene.render.resolution_percentage = 100 # rendering scale scene.render.use_border = False scene.render.use_crop_to_border = False # images metadata scene.render.use_stamp_time = True scene.render.use_stamp_date = True scene.render.use_stamp_render_time = True scene.render.use_stamp_frame = True scene.render.use_stamp_scene = True scene.render.use_stamp_memory = True scene.render.use_stamp_camera = True scene.render.use_stamp_lens = True scene.render.use_stamp_filename = True # image format scene.render.image_settings.color_mode = 'RGB' scene.render.image_settings.file_format = 'JPEG' scene.render.use_file_extension = True scene.render.use_overwrite = True # force overwrite, be careful! scene.render.image_settings.quality = 90 # image compression # post processing scene.render.use_compositing = True scene.render.use_sequencer = False # sampling scene.cycles.progressive = 'BRANCHED_PATH' scene.cycles.seed = 0 scene.cycles.sample_clamp_direct = 0 scene.cycles.sample_clamp_indirect = 0 scene.cycles.light_sampling_threshold = 0.01 scene.cycles.aa_samples = 32 scene.cycles.preview_aa_samples = 4 scene.cycles.sample_all_lights_direct = True scene.cycles.sample_all_lights_indirect = True scene.cycles.diffuse_samples = 3 scene.cycles.glossy_samples = 2 scene.cycles.transmission_samples = 2 scene.cycles.ao_samples = 1 scene.cycles.mesh_light_samples = 2 scene.cycles.subsurface_samples = 2 scene.cycles.volume_samples = 2 scene.cycles.sampling_pattern = 'SOBOL' scene.cycles.use_layer_samples = 'USE' # light paths scene.cycles.transparent_max_bounces = 8 scene.cycles.transparent_min_bounces = 8 scene.cycles.use_transparent_shadows = True scene.cycles.max_bounces = 8 scene.cycles.min_bounces = 3 scene.cycles.diffuse_bounces = 2 scene.cycles.glossy_bounces = 4 scene.cycles.transmission_bounces = 8 scene.cycles.volume_bounces = 2 scene.cycles.caustics_reflective = False scene.cycles.caustics_refractive = False scene.cycles.blur_glossy = 0.00 # performances scene.render.threads_mode = 'AUTO' scene.cycles.debug_bvh_type = 'DYNAMIC_BVH' scene.cycles.preview_start_resolution = 64 scene.cycles.tile_order = 'HILBERT_SPIRAL' scene.render.tile_x = 64 scene.render.tile_y = 64 scene.cycles.use_progressive_refine = False scene.render.use_save_buffers = False scene.render.use_persistent_data = False scene.cycles.debug_use_spatial_splits = False scene.cycles.debug_use_hair_bvh = True scene.cycles.debug_bvh_time_steps = 0 # -- Color Management scene.view_settings.view_transform = "Standard" # -- Animation options scene.frame_start = 1 scene.frame_end = 1 scene.frame_step = 1 scene.frame_current = 1 # -- World options world = scene.world if world is None: world = bpy.data.worlds.new("World") world.use_sky_paper = True scene.world = world # noise reduction world.cycles.sample_as_light = True world.cycles.sample_map_resolution = 2048 world.cycles.samples = 1 world.cycles.max_bounces = 1024 world.cycles.volume_sampling = 'EQUIANGULAR' world.cycles.volume_interpolation = 'LINEAR' world.cycles.homogeneous_volume = False logger.info("Scene initialized")
def render_complete_callback(scene: bpy.types.Scene) -> None: """Callback on frame rendered and saved to file. Arguments: scene {bpy.types.Scene} -- scene being rendered Raises: RuntimeError: if something goes wrong with ExifTool """ logger.info("Rendering of frame %s completed.", scene.frame_current) scene.frame_set( scene.frame_current) # update current frame to the rendered one # # --- update EXIF metadata ff = scene.render.image_settings.file_format if ff in SFMFLOW_OT_render_images._files_with_exif: logger.debug("Updating EXIF metadata") filepath = scene.render.frame_path(frame=scene.frame_current) user_preferences = bpy.context.preferences addon_user_preferences_name = (__name__)[:__name__.index('.')] addon_prefs = user_preferences.addons[ addon_user_preferences_name].preferences # type: AddonPreferences exiftool_path = addon_prefs.exiftool_path camera_data = scene.camera.data # compute 35mm focal length fl = camera_data.lens fl35 = 43.27 / sqrt(camera_data.sensor_width**2 + camera_data.sensor_height**2) * fl res_percent = scene.render.resolution_percentage / 100. # build exiftool command exiftool_cmd = [ exiftool_path, "-exif:FocalLength={} mm".format(fl), "-exif:FocalLengthIn35mmFormat={}".format(int(fl35)), "-exif:Model=blender{}".format(int(camera_data.sensor_width)), "-exif:FocalPlaneXResolution={}".format( camera_data.sensor_width), "-exif:FocalPlaneYResolution={}".format( camera_data.sensor_height), "-exif:FocalPlaneResolutionUnit#=4", # millimeters "-exif:ExifImageWidth={}".format( floor(scene.render.resolution_x * res_percent)), "-exif:ExifImageHeight={}".format( floor(scene.render.resolution_y * res_percent)), "-exif:ExifVersion=0230", # some pipelines do not work with newer versions "-overwrite_original", filepath ] logger.info("Running ExifTool: %s", ' '.join(exiftool_cmd)) # run exiftool try: exit_code = run(exiftool_cmd, timeout=5, check=False).returncode except TimeoutExpired: exit_code = -1 logger.error("Timeout expired for EXIF metadata update!") except Exception as e: # pylint: disable=broad-except logger.error("Exiftool execution exception: %s)", e) finally: if exit_code != 0: msg = "Failed to set EXIF metadata for rendered frame '{}'".format( filepath) logger.error(msg) raise RuntimeError(msg) else: logger.info("Metadata correctly set for frame '%s'", filepath) else: logger.debug( "Skipping EXIF metadata update, not supported by %s format", ff) # # --- save camera pose ground truth SFMFLOW_OT_render_images._gt_writer.save_entry_for_current_frame() if scene.frame_current == scene.frame_end: SFMFLOW_OT_render_images._gt_writer.close()
def evaluate(self, scene: bpy.types.Scene, target_pc_kdtree: KDTree, use_filtered_cloud: bool = True) -> Tuple[Dict, Dict]: """Evaluate the reconstructed 3D model. Run both point cloud evaluation and camera poses evaluation. Arguments: scene {bpy.types.Scene} -- ground truth scene target_pc_kdtree {KDTree} -- target/reference point cloud KDTree use_filtered_cloud {bool} -- if {True} the filtered point cloud is used for evaluation, the whole cloud otherwise Returns: dict -- point cloud evaluation results, see PointCloud.evaluate() dict -- camera poses evaluation results dictionary: 'pos_mean' {float}: mean position difference 'pos_std' {float}: position difference standard deviation 'pos_min' {float}: minimum position difference 'pos_max' {float}: maximum position difference 'lookat_mean' {float}: mean camera lookat orientation difference 'lookat_std' {float}: camera lookat orientation difference standard deviation 'lookat_min' {float}: minimum camera lookat orientation difference 'lookat_max' {float}: maximum camera lookat orientation difference 'rot_mean' {float}: mean camera orientation difference 'rot_std' {float}: camera orientation difference standard deviation 'rot_min' {float}: minimum camera orientation difference 'rot_max' {float}: maximum camera orientation difference 'camera_count' {float}: ground truth cameras count 'reconstructed_camera_count' {float}: reconstructed and evaluated cameras count 'reconstructed_camera_percent' {float}: percentage of reconstructed cameras """ # point cloud evaluation pc_result = self.point_cloud.evaluate(target_pc_kdtree, use_filtered_cloud) # # camera poses evaluation current_frame = scene.frame_current cam_results = [c.evaluate(scene) for c in self.cameras] scene.frame_current = current_frame # FIXME this is awful ¯\_(ツ)_/¯ cam_pos_dists = list(map(itemgetter('position_distance'), cam_results)) cam_lookat_diffs = list( map(itemgetter('lookat_difference_deg'), cam_results)) cam_rot_diffs = list( map(itemgetter('rotation_difference_deg'), cam_results)) # gt_camera_count = (scene.frame_end - scene.frame_start + 1) // scene.frame_step pos_mean = mean(cam_pos_dists) lookat_mean = mean(cam_lookat_diffs) rot_mean = mean(cam_rot_diffs) cam_result = { "pos_mean": pos_mean, "pos_std": stdev(cam_pos_dists, pos_mean) if len(cam_pos_dists) > 1 else 0., "pos_min": min(cam_pos_dists), "pos_max": max(cam_pos_dists), "lookat_mean": lookat_mean, "lookat_std": stdev(cam_lookat_diffs, lookat_mean) if len(cam_lookat_diffs) > 1 else 0., "lookat_min": min(cam_lookat_diffs), "lookat_max": max(cam_lookat_diffs), "rot_mean": rot_mean, "rot_std": stdev(cam_rot_diffs, rot_mean) if len(cam_rot_diffs) > 1 else 0., "rot_min": min(cam_rot_diffs), "rot_max": max(cam_rot_diffs), "camera_count": gt_camera_count, "reconstructed_camera_count": len(self.cameras), "reconstructed_camera_percent": len(self.cameras) / gt_camera_count } # return pc_result, cam_result