def sync_collection_objects(self, depsgraph, object_keys_to_export, material_override): """ Export collections objects """ res = False frame_current = depsgraph.scene.frame_current for obj in self.depsgraph_objects(depsgraph): obj_key = object.key(obj) if obj_key not in object_keys_to_export: continue rpr_obj = self.rpr_context.objects.get(obj_key, None) if not rpr_obj: indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current) else: assign_materials(self.rpr_context, rpr_obj, obj, material_override) res = True return res
def sync_collection_objects(self, depsgraph, object_keys_to_export, material_override): """ Export collections objects """ res = False for obj in self.depsgraph_objects(depsgraph): obj_key = object.key(obj) if obj_key not in object_keys_to_export: continue rpr_obj = self.rpr_context.objects.get(obj_key, None) if rpr_obj: rpr_obj.set_visibility(True) if not material_override: rpr_obj.set_material(None) assign_materials(self.rpr_context, rpr_obj, obj, material_override) res = True else: indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override) res = True return res
def update_material_on_scene_objects(self, mat, depsgraph): """ Find all mesh material users and reapply material """ material_override = depsgraph.view_layer.material_override frame_current = depsgraph.scene.frame_current use_contour = depsgraph.scene.rpr.is_contour_used( is_final_engine=False) if material_override and material_override.name == mat.name: objects = self.depsgraph_objects(depsgraph) active_mat = material_override else: objects = tuple(obj for obj in self.depsgraph_objects(depsgraph) if mat.name in obj.material_slots.keys()) active_mat = mat updated = False for obj in objects: rpr_material = material.sync_update(self.rpr_context, active_mat, obj=obj) rpr_volume = material.sync_update(self.rpr_context, active_mat, 'Volume', obj=obj) rpr_displacement = material.sync_update(self.rpr_context, active_mat, 'Displacement', obj=obj) if not rpr_material and not rpr_volume and not rpr_displacement: continue indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) if object.key(obj) not in self.rpr_context.objects: object.sync(self.rpr_context, obj, indirect_only=indirect_only, frame_current=frame_current, use_contour=use_contour) updated = True continue updated |= object.sync_update(self.rpr_context, obj, False, False, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current, use_contour=use_contour) return updated
def update_material_on_scene_objects(self, mat, depsgraph): """ Find all mesh material users and reapply material """ material_override = depsgraph.view_layer.material_override if material_override and material_override.name == mat.name: objects = self.depsgraph_objects(depsgraph) active_mat = material_override else: objects = tuple(obj for obj in self.depsgraph_objects(depsgraph) if mat.name in obj.material_slots.keys()) active_mat = mat has_uv_map = material.has_uv_map_node(active_mat) updated = False for obj in objects: rpr_material, rpr_volume, rpr_displacement = \ self.get_object_rpr_materials(obj, active_mat, has_uv_map) if not rpr_material and not rpr_volume and not rpr_displacement: continue indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) if object.key(obj) not in self.rpr_context.objects: object.sync(self.rpr_context, obj, indirect_only=indirect_only) updated = True continue updated |= object.sync_update(self.rpr_context, obj, False, False, indirect_only=indirect_only, material_override=material_override) return updated
def sync_objects_collection(self, depsgraph): """ Removes objects which are not present in depsgraph anymore. Adds objects which are not present in rpr_context but existed in depsgraph """ res = False view_layer_data = ViewLayerSettings(depsgraph.view_layer) material_override = view_layer_data.material_override # set of depsgraph object keys depsgraph_keys = set.union( set(object.key(obj) for obj in self.depsgraph_objects(depsgraph)), set( instance.key(obj) for obj in self.depsgraph_instances(depsgraph))) # set of visible rpr object keys rpr_object_keys = set( key for key, obj in self.rpr_context.objects.items() if not isinstance(obj, pyrpr.Shape) or obj.is_visible) # sets of objects keys to remove from rpr object_keys_to_remove = rpr_object_keys - depsgraph_keys # sets of objects keys to export into rpr object_keys_to_export = depsgraph_keys - rpr_object_keys if object_keys_to_remove: log("Object keys to remove", object_keys_to_remove) for obj_key in object_keys_to_remove: if obj_key in self.rpr_context.objects: self.rpr_context.remove_object(obj_key) res = True if object_keys_to_export: log("Object keys to add", object_keys_to_export) res |= self.sync_collection_objects(depsgraph, object_keys_to_export, material_override) res |= self.sync_collection_instances(depsgraph, object_keys_to_export, material_override) # update/remove material override on rest of scene object if view_layer_data != self.view_layer_data: # update/remove material override on all other objects self.view_layer_data = view_layer_data res = True rpr_mesh_keys = set( key for key, obj in self.rpr_context.objects.items() if isinstance(obj, pyrpr.Mesh) and obj.is_visible) unchanged_meshes_keys = tuple(e for e in depsgraph_keys if e in rpr_mesh_keys) log("Object keys to update material override", unchanged_meshes_keys) self.sync_collection_objects(depsgraph, unchanged_meshes_keys, material_override) rpr_instance_keys = set( key for key, obj in self.rpr_context.objects.items() if isinstance(obj, pyrpr.Instance) and obj.is_visible) unchanged_instances_keys = tuple(e for e in depsgraph_keys if e in rpr_instance_keys) log("Instance keys to update material override", unchanged_instances_keys) self.sync_collection_instances(depsgraph, unchanged_instances_keys, material_override) return res
def sync(self, context): """ Prepare scene for export """ log('Start sync') depsgraph = context.evaluated_depsgraph_get() self.rpr_context.blender_data['depsgraph'] = depsgraph scene = depsgraph.scene scene.rpr.init_rpr_context(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.rpr_context.width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) self.rpr_context.height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) world.sync(self.rpr_context, scene.world) # cache blur data self.rpr_context.do_motion_blur = scene.render.use_motion_blur and \ not math.isclose(scene.camera.data.rpr.motion_blur_exposure, 0.0) if self.rpr_context.do_motion_blur: self.cache_blur_data(depsgraph) self.set_motion_blur_mode(scene) # camera, objects, particles for obj in self.depsgraph_objects(depsgraph, with_camera=True): indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, frame_current=scene.frame_current) # instances for inst in self.depsgraph_instances(depsgraph): indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, frame_current=scene.frame_current) # rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) self.rpr_context.texture_compression = scene.rpr.texture_compression # EXPORT CAMERA camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) camera_obj = depsgraph.objects.get(camera_key, None) if not camera_obj: camera_obj = scene.camera camera_data = camera.CameraData.init_from_camera( camera_obj.data, camera_obj.matrix_world, self.rpr_context.width / self.rpr_context.height) camera_data.export(rpr_camera) if self.rpr_context.do_motion_blur: rpr_camera.set_exposure(scene.camera.data.rpr.motion_blur_exposure) object.export_motion_blur(self.rpr_context, camera_key, object.get_transform(camera_obj)) # adaptive subdivision will be limited to the current scene render size self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.sync_auto_adapt_subdivision() self.rpr_context.sync_portal_lights() # Exported scene will be rendered vertically flipped, flip it back self.rpr_context.set_parameter(pyrpr.CONTEXT_Y_FLIP, True) log('Finish sync')
def sync_motion_blur(self, depsgraph: bpy.types.Depsgraph): def set_motion_blur(rpr_object, prev_matrix, cur_matrix): if hasattr(rpr_object, 'set_motion_transform'): rpr_object.set_motion_transform( np.array(prev_matrix, dtype=np.float32).reshape(4, 4)) else: velocity = (prev_matrix - cur_matrix).to_translation() rpr_object.set_linear_motion(*velocity) mul_diff = prev_matrix @ cur_matrix.inverted() quaternion = mul_diff.to_quaternion() if quaternion.axis.length > 0.5: rpr_object.set_angular_motion(*quaternion.axis, quaternion.angle) else: rpr_object.set_angular_motion(1.0, 0.0, 0.0, 0.0) if not isinstance(rpr_object, pyrpr.Camera): scale_motion = mul_diff.to_scale() - mathutils.Vector( (1, 1, 1)) rpr_object.set_scale_motion(*scale_motion) cur_matrices = {} # getting current frame matrices for obj in self.depsgraph_objects(depsgraph, with_camera=True): if not obj.rpr.motion_blur: continue key = object.key(obj) rpr_object = self.rpr_context.objects.get(key, None) if not rpr_object or not isinstance( rpr_object, (pyrpr.Shape, pyrpr.AreaLight, pyrpr.Camera)): continue cur_matrices[key] = obj.matrix_world.copy() for inst in self.depsgraph_instances(depsgraph): if not inst.parent.rpr.motion_blur: continue key = instance.key(inst) rpr_object = self.rpr_context.objects.get(key, None) if not rpr_object or not isinstance( rpr_object, (pyrpr.Shape, pyrpr.AreaLight)): continue cur_matrices[key] = inst.matrix_world.copy() if not cur_matrices: return cur_frame = depsgraph.scene.frame_current prev_frame = cur_frame - 1 # set to previous frame and calculate motion blur data self._set_scene_frame(depsgraph.scene, prev_frame, 0.0) try: for obj in self.depsgraph_objects(depsgraph, with_camera=True): key = object.key(obj) cur_matrix = cur_matrices.get(key, None) if cur_matrix is None: continue set_motion_blur(self.rpr_context.objects[key], obj.matrix_world, cur_matrix) for inst in self.depsgraph_instances(depsgraph): key = instance.key(inst) cur_matrix = cur_matrices.get(key, None) if cur_matrix is None: continue set_motion_blur(self.rpr_context.objects[key], inst.matrix_world, cur_matrix) finally: # restore current frame self._set_scene_frame(depsgraph.scene, cur_frame, 0.0)
def sync(self, depsgraph): log('Start syncing') # Preparations for syncing self.is_synced = False self.sync_time = time.perf_counter() scene = depsgraph.scene view_layer = depsgraph.view_layer material_override = view_layer.material_override self.render_layer_name = view_layer.name self.status_title = f"{scene.name}: {self.render_layer_name}" self.notify_status(0, "Start syncing") self.use_contour = scene.rpr.is_contour_used() self._init_rpr_context(scene) border = ((0, 0), (1, 1)) if not scene.render.use_border else \ ((scene.render.border_min_x, scene.render.border_min_y), (scene.render.border_max_x - scene.render.border_min_x, scene.render.border_max_y - scene.render.border_min_y)) screen_width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) screen_height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) self.width = int(screen_width * border[1][0]) self.height = int(screen_height * border[1][1]) self.rpr_context.resize(self.width, self.height) if self.use_contour: scene.rpr.export_contour_mode(self.rpr_context) self.rpr_context.blender_data['depsgraph'] = depsgraph # EXPORT OBJECTS objects_len = len(depsgraph.objects) for i, obj in enumerate(self.depsgraph_objects(depsgraph)): self.notify_status( 0, "Syncing object (%d/%d): %s" % (i, objects_len, obj.name)) # the correct collection visibility info is stored in original object indirect_only = obj.original.indirect_only_get( view_layer=view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=scene.frame_current, use_contour=self.use_contour) if self.rpr_engine.test_break(): log.warn("Syncing stopped by user termination") return # EXPORT INSTANCES instances_len = len(depsgraph.object_instances) last_instances_percent = 0 self.notify_status(0, "Syncing instances 0%") for i, inst in enumerate(self.depsgraph_instances(depsgraph)): instances_percent = (i * 100) // instances_len if instances_percent > last_instances_percent: self.notify_status(0, f"Syncing instances {instances_percent}%") last_instances_percent = instances_percent indirect_only = inst.parent.original.indirect_only_get( view_layer=view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, material_override=material_override, frame_current=scene.frame_current, use_contour=self.use_contour) if self.rpr_engine.test_break(): log.warn("Syncing stopped by user termination") return self.notify_status(0, "Syncing instances 100%") # EXPORT CAMERA camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) # Camera object should be taken from depsgrapgh objects. # Use bpy.scene.camera if none found camera_obj = depsgraph.objects.get(camera_key, None) if not camera_obj: camera_obj = scene.camera self.camera_data = camera.CameraData.init_from_camera( camera_obj.data, camera_obj.matrix_world, screen_width / screen_height, border) if scene.rpr.is_tile_render_available: if scene.camera.data.type == 'PANO': log.warn( "Tiles rendering is not supported for Panoramic camera") else: # create adaptive subdivision camera to use total render area for calculations subdivision_camera_key = camera_key + ".RPR_ADAPTIVE_SUBDIVISION_CAMERA" subdivision_camera = self.rpr_context.create_camera( subdivision_camera_key) self.camera_data.export(subdivision_camera) self.rpr_context.scene.set_subdivision_camera( subdivision_camera) # apply tiles settings self.tile_size = (min(self.width, scene.rpr.tile_x), min(self.height, scene.rpr.tile_y)) self.tile_order = scene.rpr.tile_order self.rpr_context.resize(*self.tile_size) else: self.camera_data.export(rpr_camera) # Environment is synced once per frame if scene.world.is_evaluated: # for some reason World data can came in unevaluated world_data = scene.world else: world_data = scene.world.evaluated_get(depsgraph) world_settings = world.sync(self.rpr_context, world_data) self.world_backplate = world_settings.backplate # SYNC MOTION BLUR self.rpr_context.do_motion_blur = scene.render.use_motion_blur and \ not math.isclose(scene.camera.data.rpr.motion_blur_exposure, 0.0) if self.rpr_context.do_motion_blur: self.sync_motion_blur(depsgraph) rpr_camera.set_exposure(scene.camera.data.rpr.motion_blur_exposure) self.set_motion_blur_mode(scene) # EXPORT PARTICLES # Note: particles should be exported after motion blur, # otherwise prev_location of particle will be (0, 0, 0) self.notify_status(0, "Syncing particles") for obj in self.depsgraph_objects(depsgraph): particle.sync(self.rpr_context, obj) # objects linked to scene as a collection are instanced, so walk thru them for particles for entry in self.depsgraph_instances(depsgraph): particle.sync(self.rpr_context, entry.instance_object) # EXPORT: AOVS, adaptive sampling, shadow catcher, denoiser enable_adaptive = scene.rpr.limits.noise_threshold > 0.0 view_layer.rpr.export_aovs(view_layer, self.rpr_context, self.rpr_engine, enable_adaptive, self.cryptomatte_allowed) if enable_adaptive: # if adaptive is enable turn on aov and settings self.rpr_context.enable_aov(pyrpr.AOV_VARIANCE) scene.rpr.limits.set_adaptive_params(self.rpr_context) # Image filter image_filter_settings = view_layer.rpr.denoiser.get_settings(scene) image_filter_settings['resolution'] = (self.width, self.height) self.setup_image_filter(image_filter_settings) # Shadow catcher if scene.rpr.render_quality != 'FULL': self.rpr_context.sync_catchers(False) background_filter_settings = { 'enable': scene.render.film_transparent, 'resolution': (self.width, self.height), } self.setup_background_filter(background_filter_settings) else: self.rpr_context.sync_catchers(scene.render.film_transparent) # SET rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) scene.rpr.export_pixel_filter(self.rpr_context) self.render_samples, self.render_time = (scene.rpr.limits.max_samples, scene.rpr.limits.seconds) if scene.rpr.render_quality == 'FULL2': if self.use_contour: self.render_update_samples = 1 else: self.render_update_samples = scene.rpr.limits.update_samples_rpr2 else: self.render_update_samples = scene.rpr.limits.update_samples if scene.rpr.use_render_stamp: self.render_stamp_text = self.prepare_scene_stamp_text(scene) self.sync_time = time.perf_counter() - self.sync_time self.is_synced = True self.notify_status(0, "Finish syncing") log('Finish sync')