def sync(self, context): """ Prepare scene for export """ log('Start sync') depsgraph = context.evaluated_depsgraph_get() self.rpr_context.blender_data['depsgraph'] = depsgraph scene = depsgraph.scene scene.rpr.init_rpr_context(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.rpr_context.width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) self.rpr_context.height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) world.sync(self.rpr_context, scene.world) # camera, objects, particles for obj in self.depsgraph_objects(depsgraph, with_camera=True): indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only) # instances for inst in self.depsgraph_instances(depsgraph): indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only) # rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) # adaptive subdivision will be limited to the current scene render size self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.sync_auto_adapt_subdivision(self.rpr_context.width, self.rpr_context.height) self.rpr_context.sync_portal_lights() # Exported scene will be rendered vertically flipped, flip it back self.rpr_context.set_parameter(pyrpr.CONTEXT_Y_FLIP, True) log('Finish sync')
def sync(self, depsgraph): log('Start syncing') self.is_synced = False scene = depsgraph.scene settings_scene = bpy.context.scene self._init_rpr_context(scene) self.rpr_context.resize(scene.render.resolution_x, scene.render.resolution_y) self.rpr_context.blender_data['depsgraph'] = depsgraph # export visible objects for obj in self.depsgraph_objects(depsgraph): object.sync(self.rpr_context, obj) # export camera camera.sync(self.rpr_context, depsgraph.objects[depsgraph.scene.camera.name]) # export world only if active_material.use_preview_world is enabled preview_obj = next(obj for obj in self.depsgraph_objects(depsgraph) if obj.name.startswith('preview_')) if preview_obj.active_material and preview_obj.active_material.use_preview_world: world.sync(self.rpr_context, settings_scene.world) self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.enable_aov(pyrpr.AOV_DEPTH) self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, True) settings_scene.rpr.export_ray_depth(self.rpr_context) settings_scene.rpr.export_pixel_filter(self.rpr_context) self.rpr_context.texture_compression = settings_scene.rpr.texture_compression self.render_samples = settings_scene.rpr.viewport_limits.preview_samples self.render_update_samples = settings_scene.rpr.viewport_limits.preview_update_samples self.is_synced = True log('Finish sync')
def sync(self, context): """ Prepare scene for export """ log('Start sync') depsgraph = context.evaluated_depsgraph_get() self.rpr_context.blender_data['depsgraph'] = depsgraph scene = depsgraph.scene scene.rpr.init_rpr_context(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.rpr_context.width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) self.rpr_context.height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) world.sync(self.rpr_context, scene.world) # cache blur data self.rpr_context.do_motion_blur = scene.render.use_motion_blur and \ not math.isclose(scene.camera.data.rpr.motion_blur_exposure, 0.0) if self.rpr_context.do_motion_blur: self.cache_blur_data(depsgraph) self.set_motion_blur_mode(scene) # camera, objects, particles for obj in self.depsgraph_objects(depsgraph, with_camera=True): indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, frame_current=scene.frame_current) # instances for inst in self.depsgraph_instances(depsgraph): indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, frame_current=scene.frame_current) # rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) self.rpr_context.texture_compression = scene.rpr.texture_compression # EXPORT CAMERA camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) camera_obj = depsgraph.objects.get(camera_key, None) if not camera_obj: camera_obj = scene.camera camera_data = camera.CameraData.init_from_camera( camera_obj.data, camera_obj.matrix_world, self.rpr_context.width / self.rpr_context.height) camera_data.export(rpr_camera) if self.rpr_context.do_motion_blur: rpr_camera.set_exposure(scene.camera.data.rpr.motion_blur_exposure) object.export_motion_blur(self.rpr_context, camera_key, object.get_transform(camera_obj)) # adaptive subdivision will be limited to the current scene render size self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.sync_auto_adapt_subdivision() self.rpr_context.sync_portal_lights() # Exported scene will be rendered vertically flipped, flip it back self.rpr_context.set_parameter(pyrpr.CONTEXT_Y_FLIP, True) log('Finish sync')
def sync(self, depsgraph): log('Start syncing') # Preparations for syncing self.is_synced = False self.sync_time = time.perf_counter() scene = depsgraph.scene view_layer = depsgraph.view_layer material_override = view_layer.material_override self.render_layer_name = view_layer.name self.status_title = f"{scene.name}: {self.render_layer_name}" self.notify_status(0, "Start syncing") self.use_contour = scene.rpr.is_contour_used() self._init_rpr_context(scene) border = ((0, 0), (1, 1)) if not scene.render.use_border else \ ((scene.render.border_min_x, scene.render.border_min_y), (scene.render.border_max_x - scene.render.border_min_x, scene.render.border_max_y - scene.render.border_min_y)) screen_width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) screen_height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) self.width = int(screen_width * border[1][0]) self.height = int(screen_height * border[1][1]) self.rpr_context.resize(self.width, self.height) if self.use_contour: scene.rpr.export_contour_mode(self.rpr_context) self.rpr_context.blender_data['depsgraph'] = depsgraph # EXPORT OBJECTS objects_len = len(depsgraph.objects) for i, obj in enumerate(self.depsgraph_objects(depsgraph)): self.notify_status( 0, "Syncing object (%d/%d): %s" % (i, objects_len, obj.name)) # the correct collection visibility info is stored in original object indirect_only = obj.original.indirect_only_get( view_layer=view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=scene.frame_current, use_contour=self.use_contour) if self.rpr_engine.test_break(): log.warn("Syncing stopped by user termination") return # EXPORT INSTANCES instances_len = len(depsgraph.object_instances) last_instances_percent = 0 self.notify_status(0, "Syncing instances 0%") for i, inst in enumerate(self.depsgraph_instances(depsgraph)): instances_percent = (i * 100) // instances_len if instances_percent > last_instances_percent: self.notify_status(0, f"Syncing instances {instances_percent}%") last_instances_percent = instances_percent indirect_only = inst.parent.original.indirect_only_get( view_layer=view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, material_override=material_override, frame_current=scene.frame_current, use_contour=self.use_contour) if self.rpr_engine.test_break(): log.warn("Syncing stopped by user termination") return self.notify_status(0, "Syncing instances 100%") # EXPORT CAMERA camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) # Camera object should be taken from depsgrapgh objects. # Use bpy.scene.camera if none found camera_obj = depsgraph.objects.get(camera_key, None) if not camera_obj: camera_obj = scene.camera self.camera_data = camera.CameraData.init_from_camera( camera_obj.data, camera_obj.matrix_world, screen_width / screen_height, border) if scene.rpr.is_tile_render_available: if scene.camera.data.type == 'PANO': log.warn( "Tiles rendering is not supported for Panoramic camera") else: # create adaptive subdivision camera to use total render area for calculations subdivision_camera_key = camera_key + ".RPR_ADAPTIVE_SUBDIVISION_CAMERA" subdivision_camera = self.rpr_context.create_camera( subdivision_camera_key) self.camera_data.export(subdivision_camera) self.rpr_context.scene.set_subdivision_camera( subdivision_camera) # apply tiles settings self.tile_size = (min(self.width, scene.rpr.tile_x), min(self.height, scene.rpr.tile_y)) self.tile_order = scene.rpr.tile_order self.rpr_context.resize(*self.tile_size) else: self.camera_data.export(rpr_camera) # Environment is synced once per frame if scene.world.is_evaluated: # for some reason World data can came in unevaluated world_data = scene.world else: world_data = scene.world.evaluated_get(depsgraph) world_settings = world.sync(self.rpr_context, world_data) self.world_backplate = world_settings.backplate # SYNC MOTION BLUR self.rpr_context.do_motion_blur = scene.render.use_motion_blur and \ not math.isclose(scene.camera.data.rpr.motion_blur_exposure, 0.0) if self.rpr_context.do_motion_blur: self.sync_motion_blur(depsgraph) rpr_camera.set_exposure(scene.camera.data.rpr.motion_blur_exposure) self.set_motion_blur_mode(scene) # EXPORT PARTICLES # Note: particles should be exported after motion blur, # otherwise prev_location of particle will be (0, 0, 0) self.notify_status(0, "Syncing particles") for obj in self.depsgraph_objects(depsgraph): particle.sync(self.rpr_context, obj) # objects linked to scene as a collection are instanced, so walk thru them for particles for entry in self.depsgraph_instances(depsgraph): particle.sync(self.rpr_context, entry.instance_object) # EXPORT: AOVS, adaptive sampling, shadow catcher, denoiser enable_adaptive = scene.rpr.limits.noise_threshold > 0.0 view_layer.rpr.export_aovs(view_layer, self.rpr_context, self.rpr_engine, enable_adaptive, self.cryptomatte_allowed) if enable_adaptive: # if adaptive is enable turn on aov and settings self.rpr_context.enable_aov(pyrpr.AOV_VARIANCE) scene.rpr.limits.set_adaptive_params(self.rpr_context) # Image filter image_filter_settings = view_layer.rpr.denoiser.get_settings(scene) image_filter_settings['resolution'] = (self.width, self.height) self.setup_image_filter(image_filter_settings) # Shadow catcher if scene.rpr.render_quality != 'FULL': self.rpr_context.sync_catchers(False) background_filter_settings = { 'enable': scene.render.film_transparent, 'resolution': (self.width, self.height), } self.setup_background_filter(background_filter_settings) else: self.rpr_context.sync_catchers(scene.render.film_transparent) # SET rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) scene.rpr.export_pixel_filter(self.rpr_context) self.render_samples, self.render_time = (scene.rpr.limits.max_samples, scene.rpr.limits.seconds) if scene.rpr.render_quality == 'FULL2': if self.use_contour: self.render_update_samples = 1 else: self.render_update_samples = scene.rpr.limits.update_samples_rpr2 else: self.render_update_samples = scene.rpr.limits.update_samples if scene.rpr.use_render_stamp: self.render_stamp_text = self.prepare_scene_stamp_text(scene) self.sync_time = time.perf_counter() - self.sync_time self.is_synced = True self.notify_status(0, "Finish syncing") log('Finish sync')