def sync_collection_objects(self, depsgraph, object_keys_to_export, material_override): """ Export collections objects """ res = False for obj in self.depsgraph_objects(depsgraph): obj_key = object.key(obj) if obj_key not in object_keys_to_export: continue rpr_obj = self.rpr_context.objects.get(obj_key, None) if rpr_obj: rpr_obj.set_visibility(True) if not material_override: rpr_obj.set_material(None) assign_materials(self.rpr_context, rpr_obj, obj, material_override) res = True else: indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override) res = True return res
def sync_collection_objects(self, depsgraph, object_keys_to_export, material_override): """ Export collections objects """ res = False frame_current = depsgraph.scene.frame_current for obj in self.depsgraph_objects(depsgraph): obj_key = object.key(obj) if obj_key not in object_keys_to_export: continue rpr_obj = self.rpr_context.objects.get(obj_key, None) if not rpr_obj: indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current) else: assign_materials(self.rpr_context, rpr_obj, obj, material_override) res = True return res
def _do_sync(self, depsgraph): # SYNCING OBJECTS AND INSTANCES self.notify_status("Starting...", "Sync") time_begin = time.perf_counter() self.use_contour = depsgraph.scene.rpr.is_contour_used( is_final_engine=False) # exporting objects frame_current = depsgraph.scene.frame_current material_override = depsgraph.view_layer.material_override objects_len = len(depsgraph.objects) for i, obj in enumerate(self.depsgraph_objects(depsgraph)): if self.is_finished: raise FinishRenderException time_sync = time.perf_counter() - time_begin self.notify_status( f"Time {time_sync:.1f} | Object ({i}/{objects_len}): {obj.name}", "Sync") indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current, use_contour=self.use_contour) # exporting instances instances_len = len(depsgraph.object_instances) last_instances_percent = 0 for i, inst in enumerate(self.depsgraph_instances(depsgraph)): if self.is_finished: raise FinishRenderException instances_percent = (i * 100) // instances_len if instances_percent > last_instances_percent: time_sync = time.perf_counter() - time_begin self.notify_status( f"Time {time_sync:.1f} | Instances {instances_percent}%", "Sync") last_instances_percent = instances_percent indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current, use_contour=self.use_contour) # shadow catcher self.rpr_context.sync_catchers(depsgraph.scene.render.film_transparent) self.is_synced = True
def update_material_on_scene_objects(self, mat, depsgraph): """ Find all mesh material users and reapply material """ material_override = depsgraph.view_layer.material_override frame_current = depsgraph.scene.frame_current use_contour = depsgraph.scene.rpr.is_contour_used( is_final_engine=False) if material_override and material_override.name == mat.name: objects = self.depsgraph_objects(depsgraph) active_mat = material_override else: objects = tuple(obj for obj in self.depsgraph_objects(depsgraph) if mat.name in obj.material_slots.keys()) active_mat = mat updated = False for obj in objects: rpr_material = material.sync_update(self.rpr_context, active_mat, obj=obj) rpr_volume = material.sync_update(self.rpr_context, active_mat, 'Volume', obj=obj) rpr_displacement = material.sync_update(self.rpr_context, active_mat, 'Displacement', obj=obj) if not rpr_material and not rpr_volume and not rpr_displacement: continue indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) if object.key(obj) not in self.rpr_context.objects: object.sync(self.rpr_context, obj, indirect_only=indirect_only, frame_current=frame_current, use_contour=use_contour) updated = True continue updated |= object.sync_update(self.rpr_context, obj, False, False, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current, use_contour=use_contour) return updated
def sync(self, context): """ Prepare scene for export """ log('Start sync') depsgraph = context.evaluated_depsgraph_get() self.rpr_context.blender_data['depsgraph'] = depsgraph scene = depsgraph.scene scene.rpr.init_rpr_context(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.rpr_context.width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) self.rpr_context.height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) world.sync(self.rpr_context, scene.world) # camera, objects, particles for obj in self.depsgraph_objects(depsgraph, with_camera=True): indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only) # instances for inst in self.depsgraph_instances(depsgraph): indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only) # rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) # adaptive subdivision will be limited to the current scene render size self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.sync_auto_adapt_subdivision(self.rpr_context.width, self.rpr_context.height) self.rpr_context.sync_portal_lights() # Exported scene will be rendered vertically flipped, flip it back self.rpr_context.set_parameter(pyrpr.CONTEXT_Y_FLIP, True) log('Finish sync')
def sync(self, depsgraph): log('Start syncing') self.is_synced = False scene = depsgraph.scene settings_scene = bpy.context.scene self._init_rpr_context(scene) self.rpr_context.resize(scene.render.resolution_x, scene.render.resolution_y) self.rpr_context.blender_data['depsgraph'] = depsgraph # export visible objects for obj in self.depsgraph_objects(depsgraph): object.sync(self.rpr_context, obj) # export camera camera.sync(self.rpr_context, depsgraph.objects[depsgraph.scene.camera.name]) # export world only if active_material.use_preview_world is enabled preview_obj = next(obj for obj in self.depsgraph_objects(depsgraph) if obj.name.startswith('preview_')) if preview_obj.active_material and preview_obj.active_material.use_preview_world: world.sync(self.rpr_context, settings_scene.world) self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.enable_aov(pyrpr.AOV_DEPTH) self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, True) settings_scene.rpr.export_ray_depth(self.rpr_context) settings_scene.rpr.export_pixel_filter(self.rpr_context) self.rpr_context.texture_compression = settings_scene.rpr.texture_compression self.render_samples = settings_scene.rpr.viewport_limits.preview_samples self.render_update_samples = settings_scene.rpr.viewport_limits.preview_update_samples self.is_synced = True log('Finish sync')
def update_material_on_scene_objects(self, mat, depsgraph): """ Find all mesh material users and reapply material """ material_override = depsgraph.view_layer.material_override if material_override and material_override.name == mat.name: objects = self.depsgraph_objects(depsgraph) active_mat = material_override else: objects = tuple(obj for obj in self.depsgraph_objects(depsgraph) if mat.name in obj.material_slots.keys()) active_mat = mat has_uv_map = material.has_uv_map_node(active_mat) updated = False for obj in objects: rpr_material, rpr_volume, rpr_displacement = \ self.get_object_rpr_materials(obj, active_mat, has_uv_map) if not rpr_material and not rpr_volume and not rpr_displacement: continue indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) if object.key(obj) not in self.rpr_context.objects: object.sync(self.rpr_context, obj, indirect_only=indirect_only) updated = True continue updated |= object.sync_update(self.rpr_context, obj, False, False, indirect_only=indirect_only, material_override=material_override) return updated
def _do_sync(self, depsgraph): # SYNCING OBJECTS AND INSTANCES self.notify_status("Starting...", "Sync") time_begin = time.perf_counter() # exporting objects frame_current = depsgraph.scene.frame_current material_override = depsgraph.view_layer.material_override objects_len = len(depsgraph.objects) for i, obj in enumerate(self.depsgraph_objects(depsgraph)): if self.is_finished: raise FinishRenderException time_sync = time.perf_counter() - time_begin self.notify_status( f"Time {time_sync:.1f} | Object ({i}/{objects_len}): {obj.name}", "Sync") indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current) # exporting instances instances_len = len(depsgraph.object_instances) last_instances_percent = 0 for i, inst in enumerate(self.depsgraph_instances(depsgraph)): if self.is_finished: raise FinishRenderException instances_percent = (i * 100) // instances_len if instances_percent > last_instances_percent: time_sync = time.perf_counter() - time_begin self.notify_status( f"Time {time_sync:.1f} | Instances {instances_percent}%", "Sync") last_instances_percent = instances_percent indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current) # shadow catcher if depsgraph.scene.rpr.render_quality != 'FULL': # non-Legacy modes self.rpr_context.sync_catchers(False) bg_filter_enabled = self.rpr_context.use_reflection_catcher or self.rpr_context.use_shadow_catcher background_filter_settings = { 'enable': bg_filter_enabled, 'use_background': depsgraph.scene.render.film_transparent, 'use_shadow': self.rpr_context.use_shadow_catcher, 'use_reflection': self.rpr_context.use_reflection_catcher, 'resolution': (self.width, self.height), } self.setup_background_filter(background_filter_settings) else: self.rpr_context.sync_catchers( depsgraph.scene.render.film_transparent) self.is_synced = True
def sync(self, context): """ Prepare scene for export """ log('Start sync') depsgraph = context.evaluated_depsgraph_get() self.rpr_context.blender_data['depsgraph'] = depsgraph scene = depsgraph.scene scene.rpr.init_rpr_context(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.rpr_context.width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) self.rpr_context.height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) world.sync(self.rpr_context, scene.world) # cache blur data self.rpr_context.do_motion_blur = scene.render.use_motion_blur and \ not math.isclose(scene.camera.data.rpr.motion_blur_exposure, 0.0) if self.rpr_context.do_motion_blur: self.cache_blur_data(depsgraph) self.set_motion_blur_mode(scene) # camera, objects, particles for obj in self.depsgraph_objects(depsgraph, with_camera=True): indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, frame_current=scene.frame_current) # instances for inst in self.depsgraph_instances(depsgraph): indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, frame_current=scene.frame_current) # rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) self.rpr_context.texture_compression = scene.rpr.texture_compression # EXPORT CAMERA camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) camera_obj = depsgraph.objects.get(camera_key, None) if not camera_obj: camera_obj = scene.camera camera_data = camera.CameraData.init_from_camera( camera_obj.data, camera_obj.matrix_world, self.rpr_context.width / self.rpr_context.height) camera_data.export(rpr_camera) if self.rpr_context.do_motion_blur: rpr_camera.set_exposure(scene.camera.data.rpr.motion_blur_exposure) object.export_motion_blur(self.rpr_context, camera_key, object.get_transform(camera_obj)) # adaptive subdivision will be limited to the current scene render size self.rpr_context.enable_aov(pyrpr.AOV_COLOR) self.rpr_context.sync_auto_adapt_subdivision() self.rpr_context.sync_portal_lights() # Exported scene will be rendered vertically flipped, flip it back self.rpr_context.set_parameter(pyrpr.CONTEXT_Y_FLIP, True) log('Finish sync')
def sync(self, depsgraph): log('Start syncing') # Preparations for syncing self.is_synced = False self.sync_time = time.perf_counter() scene = depsgraph.scene view_layer = depsgraph.view_layer material_override = view_layer.material_override self.render_layer_name = view_layer.name self.status_title = f"{scene.name}: {self.render_layer_name}" self.notify_status(0, "Start syncing") self.use_contour = scene.rpr.is_contour_used() self._init_rpr_context(scene) border = ((0, 0), (1, 1)) if not scene.render.use_border else \ ((scene.render.border_min_x, scene.render.border_min_y), (scene.render.border_max_x - scene.render.border_min_x, scene.render.border_max_y - scene.render.border_min_y)) screen_width = int(scene.render.resolution_x * scene.render.resolution_percentage / 100) screen_height = int(scene.render.resolution_y * scene.render.resolution_percentage / 100) self.width = int(screen_width * border[1][0]) self.height = int(screen_height * border[1][1]) self.rpr_context.resize(self.width, self.height) if self.use_contour: scene.rpr.export_contour_mode(self.rpr_context) self.rpr_context.blender_data['depsgraph'] = depsgraph # EXPORT OBJECTS objects_len = len(depsgraph.objects) for i, obj in enumerate(self.depsgraph_objects(depsgraph)): self.notify_status( 0, "Syncing object (%d/%d): %s" % (i, objects_len, obj.name)) # the correct collection visibility info is stored in original object indirect_only = obj.original.indirect_only_get( view_layer=view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=scene.frame_current, use_contour=self.use_contour) if self.rpr_engine.test_break(): log.warn("Syncing stopped by user termination") return # EXPORT INSTANCES instances_len = len(depsgraph.object_instances) last_instances_percent = 0 self.notify_status(0, "Syncing instances 0%") for i, inst in enumerate(self.depsgraph_instances(depsgraph)): instances_percent = (i * 100) // instances_len if instances_percent > last_instances_percent: self.notify_status(0, f"Syncing instances {instances_percent}%") last_instances_percent = instances_percent indirect_only = inst.parent.original.indirect_only_get( view_layer=view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, material_override=material_override, frame_current=scene.frame_current, use_contour=self.use_contour) if self.rpr_engine.test_break(): log.warn("Syncing stopped by user termination") return self.notify_status(0, "Syncing instances 100%") # EXPORT CAMERA camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) # Camera object should be taken from depsgrapgh objects. # Use bpy.scene.camera if none found camera_obj = depsgraph.objects.get(camera_key, None) if not camera_obj: camera_obj = scene.camera self.camera_data = camera.CameraData.init_from_camera( camera_obj.data, camera_obj.matrix_world, screen_width / screen_height, border) if scene.rpr.is_tile_render_available: if scene.camera.data.type == 'PANO': log.warn( "Tiles rendering is not supported for Panoramic camera") else: # create adaptive subdivision camera to use total render area for calculations subdivision_camera_key = camera_key + ".RPR_ADAPTIVE_SUBDIVISION_CAMERA" subdivision_camera = self.rpr_context.create_camera( subdivision_camera_key) self.camera_data.export(subdivision_camera) self.rpr_context.scene.set_subdivision_camera( subdivision_camera) # apply tiles settings self.tile_size = (min(self.width, scene.rpr.tile_x), min(self.height, scene.rpr.tile_y)) self.tile_order = scene.rpr.tile_order self.rpr_context.resize(*self.tile_size) else: self.camera_data.export(rpr_camera) # Environment is synced once per frame if scene.world.is_evaluated: # for some reason World data can came in unevaluated world_data = scene.world else: world_data = scene.world.evaluated_get(depsgraph) world_settings = world.sync(self.rpr_context, world_data) self.world_backplate = world_settings.backplate # SYNC MOTION BLUR self.rpr_context.do_motion_blur = scene.render.use_motion_blur and \ not math.isclose(scene.camera.data.rpr.motion_blur_exposure, 0.0) if self.rpr_context.do_motion_blur: self.sync_motion_blur(depsgraph) rpr_camera.set_exposure(scene.camera.data.rpr.motion_blur_exposure) self.set_motion_blur_mode(scene) # EXPORT PARTICLES # Note: particles should be exported after motion blur, # otherwise prev_location of particle will be (0, 0, 0) self.notify_status(0, "Syncing particles") for obj in self.depsgraph_objects(depsgraph): particle.sync(self.rpr_context, obj) # objects linked to scene as a collection are instanced, so walk thru them for particles for entry in self.depsgraph_instances(depsgraph): particle.sync(self.rpr_context, entry.instance_object) # EXPORT: AOVS, adaptive sampling, shadow catcher, denoiser enable_adaptive = scene.rpr.limits.noise_threshold > 0.0 view_layer.rpr.export_aovs(view_layer, self.rpr_context, self.rpr_engine, enable_adaptive, self.cryptomatte_allowed) if enable_adaptive: # if adaptive is enable turn on aov and settings self.rpr_context.enable_aov(pyrpr.AOV_VARIANCE) scene.rpr.limits.set_adaptive_params(self.rpr_context) # Image filter image_filter_settings = view_layer.rpr.denoiser.get_settings(scene) image_filter_settings['resolution'] = (self.width, self.height) self.setup_image_filter(image_filter_settings) # Shadow catcher if scene.rpr.render_quality != 'FULL': self.rpr_context.sync_catchers(False) background_filter_settings = { 'enable': scene.render.film_transparent, 'resolution': (self.width, self.height), } self.setup_background_filter(background_filter_settings) else: self.rpr_context.sync_catchers(scene.render.film_transparent) # SET rpr_context parameters self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False) scene.rpr.export_ray_depth(self.rpr_context) scene.rpr.export_pixel_filter(self.rpr_context) self.render_samples, self.render_time = (scene.rpr.limits.max_samples, scene.rpr.limits.seconds) if scene.rpr.render_quality == 'FULL2': if self.use_contour: self.render_update_samples = 1 else: self.render_update_samples = scene.rpr.limits.update_samples_rpr2 else: self.render_update_samples = scene.rpr.limits.update_samples if scene.rpr.use_render_stamp: self.render_stamp_text = self.prepare_scene_stamp_text(scene) self.sync_time = time.perf_counter() - self.sync_time self.is_synced = True self.notify_status(0, "Finish syncing") log('Finish sync')
def _do_sync_render(self, depsgraph): """ Thread function for self.sync_render_thread. It always run during viewport render. If it doesn't render it waits for self.restart_render_event """ def notify_status(info, status): """ Display export progress status """ wrap_info = textwrap.fill(info, 120) self.rpr_engine.update_stats(status, wrap_info) log(status, wrap_info) # requesting blender to call draw() self.rpr_engine.tag_redraw() class FinishRender(Exception): pass try: # SYNCING OBJECTS AND INSTANCES notify_status("Starting...", "Sync") time_begin = time.perf_counter() # exporting objects frame_current = depsgraph.scene.frame_current material_override = depsgraph.view_layer.material_override objects_len = len(depsgraph.objects) for i, obj in enumerate(self.depsgraph_objects(depsgraph)): if self.is_finished: raise FinishRender time_sync = time.perf_counter() - time_begin notify_status( f"Time {time_sync:.1f} | Object ({i}/{objects_len}): {obj.name}", "Sync") indirect_only = obj.original.indirect_only_get( view_layer=depsgraph.view_layer) object.sync(self.rpr_context, obj, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current) # exporting instances instances_len = len(depsgraph.object_instances) last_instances_percent = 0 for i, inst in enumerate(self.depsgraph_instances(depsgraph)): if self.is_finished: raise FinishRender instances_percent = (i * 100) // instances_len if instances_percent > last_instances_percent: time_sync = time.perf_counter() - time_begin notify_status( f"Time {time_sync:.1f} | Instances {instances_percent}%", "Sync") last_instances_percent = instances_percent indirect_only = inst.parent.original.indirect_only_get( view_layer=depsgraph.view_layer) instance.sync(self.rpr_context, inst, indirect_only=indirect_only, material_override=material_override, frame_current=frame_current) # shadow catcher self.rpr_context.sync_catchers( depsgraph.scene.render.film_transparent) self.is_synced = True # RENDERING notify_status("Starting...", "Render") is_adaptive = self.rpr_context.is_aov_enabled(pyrpr.AOV_VARIANCE) # Infinite cycle, which starts when scene has to be re-rendered. # It waits for restart_render_event be enabled. # Exit from this cycle is implemented through raising FinishRender # when self.is_finished be enabled from main thread. while True: self.restart_render_event.wait() if self.is_finished: raise FinishRender # preparations to start rendering iteration = 0 time_begin = 0.0 time_render = 0.0 if is_adaptive: all_pixels = active_pixels = self.rpr_context.width * self.rpr_context.height is_last_iteration = False # this cycle renders each iteration while True: if self.is_finished: raise FinishRender is_adaptive_active = is_adaptive and iteration >= self.rpr_context.get_parameter( pyrpr.CONTEXT_ADAPTIVE_SAMPLING_MIN_SPP) if self.restart_render_event.is_set(): # clears restart_render_event, prepares to start rendering self.restart_render_event.clear() iteration = 0 if self.is_resized: if not self.rpr_context.gl_interop: # When gl_interop is not enabled, than resize is better to do in # this thread. This is important for hybrid. with self.render_lock: self.rpr_context.resize( self.width, self.height) self.is_resized = False self.rpr_context.sync_auto_adapt_subdivision() self.rpr_context.sync_portal_lights() time_begin = time.perf_counter() log(f"Restart render [{self.width}, {self.height}]") # rendering with self.render_lock: if self.restart_render_event.is_set(): break self.rpr_context.set_parameter( pyrpr.CONTEXT_FRAMECOUNT, iteration) self.rpr_context.render(restart=(iteration == 0)) # resolving with self.resolve_lock: self._resolve() self.is_rendered = True self.is_denoised = False iteration += 1 # checking for last iteration # preparing information to show in viewport time_render_prev = time_render time_render = time.perf_counter() - time_begin iteration_time = time_render - time_render_prev if self.user_settings.adapt_viewport_resolution \ and not self.is_resolution_adapted \ and iteration == 2: target_time = 1.0 / self.user_settings.viewport_samples_per_sec self.requested_adapt_ratio = target_time / iteration_time if self.render_iterations > 0: info_str = f"Time: {time_render:.1f} sec"\ f" | Iteration: {iteration}/{self.render_iterations}" else: info_str = f"Time: {time_render:.1f}/{self.render_time} sec"\ f" | Iteration: {iteration}" if is_adaptive_active: active_pixels = self.rpr_context.get_info( pyrpr.CONTEXT_ACTIVE_PIXEL_COUNT, int) adaptive_progress = max( (all_pixels - active_pixels) / all_pixels, 0.0) info_str += f" | Adaptive Sampling: {math.floor(adaptive_progress * 100)}%" if self.render_iterations > 0: if iteration >= self.render_iterations: is_last_iteration = True else: if time_render >= self.render_time: is_last_iteration = True if is_adaptive and active_pixels == 0: is_last_iteration = True if is_last_iteration: break notify_status(info_str, "Render") # notifying viewport that rendering is finished if is_last_iteration: time_render = time.perf_counter() - time_begin if self.image_filter: notify_status( f"Time: {time_render:.1f} sec | Iteration: {iteration}" f" | Denoising...", "Render") # applying denoising with self.resolve_lock: if self.image_filter: self.update_image_filter_inputs() self.image_filter.run() self.is_denoised = True time_render = time.perf_counter() - time_begin notify_status( f"Time: {time_render:.1f} sec | Iteration: {iteration}" f" | Denoised", "Rendering Done") else: notify_status( f"Time: {time_render:.1f} sec | Iteration: {iteration}", "Rendering Done") except FinishRender: log("Finish by user") except Exception as e: log.error(e, 'EXCEPTION:', traceback.format_exc()) self.is_finished = True # notifying viewport about error notify_status(f"{e}.\nPlease see logs for more details.", "ERROR") log("Finish _do_sync_render")