def draw(self, context): settings = get_user_settings() devices = settings.viewport_devices layout = self.layout layout.use_property_split = True layout.use_property_decorate = False self.layout.enabled = settings.separate_viewport_devices if not pyrpr.Context.gpu_devices: col = layout.column(align=True) row = col.row() row.enabled = False row.prop(devices, 'cpu_state', text=pyrpr.Context.cpu_device['name']) col.prop(devices, 'cpu_threads') else: if pyrpr.Context.cpu_device: col = layout.column(align=True) col.enabled = context.scene.rpr.render_quality in ('FULL', 'FULL2') col.prop(devices, 'cpu_state', text=pyrpr.Context.cpu_device['name']) row = col.row() row.enabled = devices.cpu_state row.prop(devices, 'cpu_threads') layout.separator() col = layout.column(align=True) for i, gpu_device in enumerate(pyrpr.Context.gpu_devices): col.prop(devices, 'gpu_states', index=i, text=gpu_device['name'])
def update_render(self, scene: bpy.types.Scene, view_layer: bpy.types.ViewLayer): ''' update settings if changed while live returns True if restart needed ''' restart = scene.rpr.export_render_mode(self.rpr_context) restart |= scene.rpr.export_ray_depth(self.rpr_context) restart |= scene.rpr.export_pixel_filter(self.rpr_context) render_iterations, render_time = ( scene.rpr.viewport_limits.max_samples, 0) if self.render_iterations != render_iterations or self.render_time != render_time: self.render_iterations = render_iterations self.render_time = render_time restart = True restart |= scene.rpr.viewport_limits.set_adaptive_params( self.rpr_context) # image filter if self.setup_image_filter(self._get_image_filter_settings()): self.denoised_image = None restart = True restart |= self.setup_upscale_filter({ 'enable': get_user_settings().viewport_denoiser_upscale, 'resolution': (self.width, self.height), }) return restart
def draw(self, context): self.layout.use_property_split = True self.layout.use_property_decorate = False limits = context.scene.rpr.viewport_limits settings = get_user_settings() col = self.layout.column(align=True) row = col.row() row.prop(limits, 'min_samples') col.prop(limits, 'max_samples') row = col.row() row.prop(limits, 'noise_threshold', slider=True) if context.scene.rpr.render_quality == 'FULL2': row.enabled = False adapt_resolution = context.scene.rpr.render_quality != 'FULL2' col1 = col.column() col1.enabled = adapt_resolution col1.prop(settings, 'adapt_viewport_resolution') col1 = col.column(align=True) col1.enabled = settings.adapt_viewport_resolution and adapt_resolution col1.prop(settings, 'viewport_samples_per_sec', slider=True) col1.prop(settings, 'min_viewport_resolution_scale', slider=True) col.prop(settings, 'use_gl_interop') col.separator() col.prop(limits, 'preview_samples') col.prop(limits, 'preview_update_samples')
def execute(self, context): # iterate over all objects and find unsupported nodes baked_materials = [] selected_object = context.active_object for obj in context.scene.objects: if obj.type != 'MESH': continue for material_slot in obj.material_slots: if material_slot.material.name in baked_materials: continue nt = material_slot.material.node_tree if nt is None: continue nodes_to_bake = [] for node in nt.nodes: if not get_node_parser_class(node.bl_idname): nodes_to_bake.append(node) settings = get_user_settings() resolution = settings.bake_resolution old_selection = obj.select_get() obj.select_set(True) bake_nodes(nt, nodes_to_bake, material_slot.material, int(resolution), obj) obj.select_set(old_selection) baked_materials.append(material_slot.material.name) selected_object.select_set(True) return {'FINISHED'}
def update_render_quality(self, context): if self.render_quality in ('FULL', 'FULL2'): return settings = get_user_settings() settings.final_devices.cpu_state = False settings.viewport_devices.cpu_state = False
def __init__(self, rpr_engine): super().__init__(rpr_engine) self.gl_texture = gl.GLTexture() self.viewport_settings: ViewportSettings = None self.world_settings: world.WorldData = None self.shading_data: ShadingData = None self.view_layer_data: ViewLayerSettings = None self.sync_render_thread: threading.Thread = None self.restart_render_event = threading.Event() self.render_lock = threading.Lock() self.is_finished = False self.is_synced = False self.is_rendered = False self.is_resized = False self.denoised_image = None self.upscaled_image = None self.requested_adapt_ratio = None self.is_resolution_adapted = False self.width = 1 self.height = 1 self.render_iterations = 0 self.render_time = 0 self.view_mode = None self.space_data = None self.selected_objects = None self.user_settings = get_user_settings()
def draw(self, context): self.layout.use_property_split = True self.layout.use_property_decorate = False settings = get_user_settings() self.layout.prop(settings, 'bake_resolution') self.layout.operator('rpr.bake_all_nodes')
def _get_image_filter_settings(self): return { 'enable': get_user_settings().viewport_denoiser_upscale, 'resolution': (self.width, self.height), 'filter_type': 'ML', 'ml_color_only': False, 'ml_use_fp16_compute_type': True, }
def athena_send(self, data: dict): if not (utils.IS_WIN or utils.IS_MAC): return settings = get_user_settings() if not settings.collect_stat: return from rprblender.utils import athena if athena.is_disabled(): return devices = settings.final_devices data['CPU Enabled'] = devices.cpu_state for i, gpu_state in enumerate(devices.available_gpu_states): data[f'GPU{i} Enabled'] = gpu_state data['Resolution'] = (self.width, self.height) data['Number Lights'] = sum(1 for o in self.rpr_context.scene.objects if isinstance(o, pyrpr.Light)) data['AOVs Enabled'] = tuple( f'RPR_{v}' for v in dir(pyrpr) if v.startswith('AOV_') and getattr(pyrpr, v) in self.rpr_context.frame_buffers_aovs) data['Ray Depth'] = self.rpr_context.get_parameter( pyrpr.CONTEXT_MAX_RECURSION) data['Shadow Ray Depth'] = self.rpr_context.get_parameter( pyrpr.CONTEXT_MAX_DEPTH_SHADOW) data['Reflection Ray Depth'] = \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_DIFFUSE, 0) + \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_GLOSSY, 0) data['Refraction Ray Depth'] = \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_REFRACTION, 0) + \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_GLOSSY_REFRACTION, 0) data['Num Polygons'] = sum( (o.mesh.poly_count if isinstance(o, pyrpr.Instance) else o. poly_count) for o in self.rpr_context.objects.values() if isinstance(o, pyrpr.Shape)) data['Num Textures'] = len(self.rpr_context.images) # temporary ignore getting texture sizes with hybrid, # until it'll be fixed on hybrid core side from .context_hybrid import RPRContext as RPRContextHybrid if not isinstance(self.rpr_context, RPRContextHybrid): data['Textures Size'] = sum(im.size_byte for im in self.rpr_context.images.values()) \ // (1024 * 1024) # in MB data['RIF Type'] = self.image_filter.settings[ 'filter_type'] if self.image_filter else None self._update_athena_data(data) # sending data athena.send_data(data)
def athena_send(self, data: dict): if not (utils.IS_WIN or utils.IS_MAC): return settings = get_user_settings() if not settings.collect_stat: return devices = settings.final_devices data['CPU Enabled'] = devices.cpu_state for i, gpu_state in enumerate(devices.available_gpu_states): data[f'GPU{i} Enabled'] = gpu_state data['Resolution'] = (self.width, self.height) quality = -1 if utils.IS_MAC else self.rpr_context.get_parameter( pyrpr.CONTEXT_RENDER_QUALITY, -1) data['Quality'] = "full" if utils.IS_MAC else { -1: "full", pyrpr.RENDER_QUALITY_HIGH: "high", pyrpr.RENDER_QUALITY_MEDIUM: "medium", pyrpr.RENDER_QUALITY_LOW: "low" }[quality] data['Number Lights'] = sum(1 for o in self.rpr_context.scene.objects if isinstance(o, pyrpr.Light)) data['AOVs Enabled'] = tuple( f'RPR_{v}' for v in dir(pyrpr) if v.startswith('AOV_') and getattr(pyrpr, v) in self.rpr_context.frame_buffers_aovs) data['Ray Depth'] = self.rpr_context.get_parameter( pyrpr.CONTEXT_MAX_RECURSION) data['Shadow Ray Depth'] = self.rpr_context.get_parameter( pyrpr.CONTEXT_MAX_DEPTH_SHADOW) data['Reflection Ray Depth'] = \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_DIFFUSE, 0) + \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_GLOSSY, 0) data['Refraction Ray Depth'] = \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_REFRACTION, 0) + \ self.rpr_context.get_parameter(pyrpr.CONTEXT_MAX_DEPTH_GLOSSY_REFRACTION, 0) data['Num Polygons'] = sum( (o.mesh.poly_count if isinstance(o, pyrpr.Instance) else o. poly_count) for o in self.rpr_context.objects.values() if isinstance(o, pyrpr.Shape)) data['Num Textures'] = len(self.rpr_context.images) data['Textures Size'] = sum(im.size_byte for im in self.rpr_context.images.values()) \ // (1024 * 1024) # in MB data['RIF Type'] = self.image_filter.settings[ 'filter_type'] if self.image_filter else None # sending data from rprblender.utils import athena athena.send_data(data)
def prepare_scene_stamp_text(self, scene): """ Fill stamp with static scene and render devices info that user can ask for """ text = str(scene.rpr.render_stamp) text = text.replace("%i", socket.gethostname()) lights_count = len([ e for e in self.rpr_context.objects.values() if isinstance(e, pyrpr.Light) ]) text = text.replace("%sl", str(lights_count)) objects_count = len([ e for e in self.rpr_context.objects.values() if isinstance(e, ( pyrpr.Curve, pyrpr.Shape, pyrpr.HeteroVolume, )) and hasattr(e, 'is_visible') and e.is_visible ]) text = text.replace("%so", str(objects_count)) cpu_name = pyrpr.Context.cpu_device['name'] text = text.replace("%c", cpu_name) selected_gpu_names = '' settings = get_user_settings() devices = settings.final_devices for i, gpu_state in enumerate(devices.available_gpu_states): if gpu_state: name = pyrpr.Context.gpu_devices[i]['name'] if selected_gpu_names: selected_gpu_names += f" + {name}" else: selected_gpu_names += name hardware = '' render_mode = '' if selected_gpu_names: hardware = selected_gpu_names render_mode = "GPU" if devices.cpu_state: hardware += " / " render_mode += " + " if devices.cpu_state: hardware += cpu_name render_mode = render_mode + "CPU" text = text.replace("%g", selected_gpu_names) text = text.replace("%r", render_mode) text = text.replace("%h", hardware) ver = bl_info['version'] text = text.replace("%b", f"v{ver[0]}.{ver[1]}.{ver[2]}") return text
def execute(self, context): space = context.space_data nt = space.node_tree nodes_selected = context.selected_nodes settings = get_user_settings() resolution = settings.bake_resolution bake_nodes(nt, nodes_selected, context.material, int(resolution), bpy.context.active_object) return {'FINISHED'}
def set_render_device(render_mode): render_device_settings = get_user_settings().final_devices if render_mode == 'dual': render_device_settings.gpu_states[0] = True set_value(render_device_settings, 'cpu_state', True) elif render_mode == 'cpu': set_value(render_device_settings, 'cpu_state', True) render_device_settings.gpu_states[0] = False elif render_mode == 'gpu': set_value(render_device_settings, 'cpu_state', False) render_device_settings.gpu_states[0] = True device_name = pyrpr.Context.gpu_devices[0]['name'] return device_name
def draw(self, context): settings = get_user_settings() rpr = context.scene.rpr layout = self.layout layout.use_property_split = True layout.use_property_decorate = False layout.prop(rpr, 'log_min_level') if utils.IS_WIN or utils.IS_MAC: layout.prop(settings, 'collect_stat') col = layout.column(align=True) col.prop(rpr, 'trace_dump') row = col.row() row.enabled = rpr.trace_dump row.use_property_split = False row.prop(rpr, 'trace_dump_folder', text="")
def draw(self, context): self.layout.use_property_split = True self.layout.use_property_decorate = False limits = context.scene.rpr.viewport_limits settings = get_user_settings() col = self.layout.column(align=True) row = col.row() row.prop(limits, 'min_samples') col.prop(limits, 'max_samples') row = col.row() row.prop(limits, 'noise_threshold', slider=True) col.prop(limits, 'limit_viewport_resolution') col.prop(settings, 'use_gl_interop') col.separator() col.prop(limits, 'preview_samples') col.prop(limits, 'preview_update_samples')
def draw_header(self, context): settings = get_user_settings() self.layout.prop(settings, "separate_viewport_devices", text="") self.layout.active = settings.separate_viewport_devices
def get_devices(self, is_final_engine=True): """ Get render devices settings for current mode """ devices_settings = get_user_settings() return devices_settings.final_devices
def save_json(self, filepath, scene, view_layer): ''' save scene settings to json at filepath ''' output_base = os.path.splitext(filepath)[0] devices = get_user_settings().final_devices use_contour = scene.rpr.is_contour_used and not devices.cpu_state data = { 'width': int(scene.render.resolution_x * scene.render.resolution_percentage / 100), 'height': int(scene.render.resolution_y * scene.render.resolution_percentage / 100), 'iterations': scene.rpr.limits.max_samples, 'batchsize': scene.rpr.limits.update_samples, 'output': output_base + '.png', 'output.json': output_base + 'output.json' } # map of aov key to string aov_map = { pyrpr.AOV_AO: 'ao', pyrpr.AOV_BACKGROUND: 'background', pyrpr.AOV_COLOR: 'color', pyrpr.AOV_DEPTH: 'depth', pyrpr.AOV_DIFFUSE_ALBEDO: 'albedo.diffuse', pyrpr.AOV_DIRECT_DIFFUSE: 'direct.diffuse', pyrpr.AOV_DIRECT_ILLUMINATION: 'direct.illumination', pyrpr.AOV_DIRECT_REFLECT: 'direct.reflect', pyrpr.AOV_EMISSION: 'emission', pyrpr.AOV_GEOMETRIC_NORMAL: 'normal.geom', pyrpr.AOV_INDIRECT_DIFFUSE: 'indirect.diffuse', pyrpr.AOV_INDIRECT_ILLUMINATION: 'indirect.illumination', pyrpr.AOV_INDIRECT_REFLECT: 'indirect.reflect', pyrpr.AOV_LIGHT_GROUP0: 'light.group0', pyrpr.AOV_LIGHT_GROUP1: 'light.group1', pyrpr.AOV_LIGHT_GROUP2: 'light.group2', pyrpr.AOV_LIGHT_GROUP3: 'light.group3', pyrpr.AOV_MATERIAL_ID: 'material.id', pyrpr.AOV_OBJECT_GROUP_ID: 'group.id', pyrpr.AOV_OBJECT_ID: 'object.id', pyrpr.AOV_OPACITY: 'opacity', pyrpr.AOV_REFRACT: 'refract', pyrpr.AOV_SHADING_NORMAL: 'normal', pyrpr.AOV_SHADOW_CATCHER: 'shadow.catcher', pyrpr.AOV_REFLECTION_CATCHER: 'reflection.catcher', pyrpr.AOV_UV: 'uv', pyrpr.AOV_VELOCITY: 'velocity', pyrpr.AOV_VARIANCE: 'variance', pyrpr.AOV_VOLUME: 'volume', pyrpr.AOV_WORLD_COORDINATE: 'world.coordinate' } aovs = {} for i, enable_aov in enumerate(view_layer.rpr.enable_aovs): aov = view_layer.rpr.aovs_info[i] aov_type = aov['rpr'] if enable_aov or (use_contour and aov_type in CONTOUR_AOVS): aov_name = aov_map[aov_type] aovs[aov_name] = output_base + '.' + aov_name + '.png' data['aovs'] = aovs # set devices based on final render device_settings = {} device_settings['cpu'] = int(devices.cpu_state) device_settings['threads'] = devices.cpu_threads for i, gpu_state in enumerate(devices.available_gpu_states): device_settings[f'gpu{i}'] = int(gpu_state) if use_contour: data['contour'] = { "object.id": int(scene.rpr.contour_use_object_id), "material.id": int(scene.rpr.contour_use_material_id), "normal": int(scene.rpr.contour_use_shading_normal), "threshold.normal": scene.rpr.contour_normal_threshold, "linewidth.objid": scene.rpr.contour_object_id_line_width, "linewidth.matid": scene.rpr.contour_material_id_line_width, "linewidth.normal": scene.rpr.contour_shading_normal_line_width, "antialiasing": scene.rpr.contour_antialiasing, "debug": int(scene.rpr.contour_use_shading_normal) } data['context'] = device_settings with open(filepath, 'w') as outfile: json.dump(data, outfile)
def sync(self, context, depsgraph): log('Start sync') scene = depsgraph.scene viewport_limits = scene.rpr.viewport_limits view_layer = depsgraph.view_layer settings = get_user_settings() use_gl_interop = settings.use_gl_interop and not scene.render.film_transparent scene.rpr.init_rpr_context(self.rpr_context, is_final_engine=False, use_gl_interop=use_gl_interop) self.rpr_context.blender_data['depsgraph'] = depsgraph self.shading_data = ShadingData(context) self.view_layer_data = ViewLayerSettings(view_layer) # setting initial render resolution as (1, 1) just for AOVs creation. # It'll be resized to correct resolution in draw() function self.rpr_context.resize(1, 1) self.rpr_context.enable_aov(pyrpr.AOV_COLOR) if viewport_limits.noise_threshold > 0.0: # if adaptive is enable turn on aov and settings self.rpr_context.enable_aov(pyrpr.AOV_VARIANCE) viewport_limits.set_adaptive_params(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.world_settings = self._get_world_settings(depsgraph) self.world_settings.export(self.rpr_context) rpr_camera = self.rpr_context.create_camera() rpr_camera.set_name("Camera") self.rpr_context.scene.set_camera(rpr_camera) # image filter self.setup_image_filter(self._get_image_filter_settings(scene)) # upscale filter self.setup_upscale_filter({ 'enable': scene.rpr.viewport_upscale, 'resolution': (self.width, self.height), }) # other context settings self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, True) self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS, 1) scene.rpr.export_render_mode(self.rpr_context) scene.rpr.export_ray_depth(self.rpr_context) self.rpr_context.texture_compression = scene.rpr.texture_compression scene.rpr.export_pixel_filter(self.rpr_context) self.render_iterations, self.render_time = ( viewport_limits.max_samples, 0) self.is_finished = False self.restart_render_event.clear() self.view_mode = context.mode self.space_data = context.space_data self.selected_objects = context.selected_objects self.sync_render_thread = threading.Thread(target=self._do_sync_render, args=(depsgraph, )) self.sync_render_thread.start() log('Finish sync')
def get_devices(self, is_final_engine=True): """ Get render devices settings for current mode """ devices_settings = get_user_settings() if is_final_engine or not devices_settings.separate_viewport_devices: return devices_settings.final_devices return devices_settings.viewport_devices
def save_json(self, filepath, scene, view_layer): ''' save scene settings to json at filepath ''' output_base = os.path.splitext(filepath)[0] data = { 'width': int(scene.render.resolution_x * scene.render.resolution_percentage / 100), 'height': int(scene.render.resolution_y * scene.render.resolution_percentage / 100), 'iterations': scene.rpr.limits.max_samples, 'batchsize': scene.rpr.limits.update_samples, 'output': output_base + '.png', 'output.json': output_base + 'output.json' } # map of aov key to string aov_map = { pyrpr.AOV_AO: 'ao', pyrpr.AOV_BACKGROUND: 'background', pyrpr.AOV_COLOR: 'color', pyrpr.AOV_DEPTH: 'depth', pyrpr.AOV_DIFFUSE_ALBEDO: 'diffuse_albedo', pyrpr.AOV_DIRECT_DIFFUSE: 'direct_diffuse', pyrpr.AOV_DIRECT_ILLUMINATION: 'direct_illumination', pyrpr.AOV_DIRECT_REFLECT: 'direct_reflect', pyrpr.AOV_EMISSION: 'emission', pyrpr.AOV_GEOMETRIC_NORMAL: 'geometric_normal', pyrpr.AOV_INDIRECT_DIFFUSE: 'indirect_diffuse', pyrpr.AOV_INDIRECT_ILLUMINATION: 'indirect_illumination', pyrpr.AOV_INDIRECT_REFLECT: 'indirect_reflect', pyrpr.AOV_LIGHT_GROUP0: 'light_group0', pyrpr.AOV_LIGHT_GROUP1: 'light_group1', pyrpr.AOV_LIGHT_GROUP2: 'light_group2', pyrpr.AOV_LIGHT_GROUP3: 'light_group3', pyrpr.AOV_MATERIAL_IDX: 'material_idx', pyrpr.AOV_OBJECT_GROUP_ID: 'object_group_id', pyrpr.AOV_OBJECT_ID: 'object_id', pyrpr.AOV_OPACITY: 'opacity', pyrpr.AOV_REFRACT: 'refract', pyrpr.AOV_SHADING_NORMAL: 'shading_normal', pyrpr.AOV_SHADOW_CATCHER: 'shadow_catcher', pyrpr.AOV_UV: 'uv', pyrpr.AOV_VELOCITY: 'velocity', pyrpr.AOV_VOLUME: 'volume', pyrpr.AOV_WORLD_COORDINATE: 'world_coordinate' } aovs = {} for i, enable_aov in enumerate(view_layer.rpr.enable_aovs): if enable_aov: aov = view_layer.rpr.aovs_info[i] aov_name = aov_map[aov['rpr']] aovs[aov_name] = output_base + '.' + aov_name + '.png' data['aovs'] = aovs # set devices based on final render device_settings = {} devices = get_user_settings().final_devices device_settings['cpu'] = int(devices.cpu_state) device_settings['threads'] = devices.cpu_threads for i, gpu_state in enumerate(devices.available_gpu_states): device_settings[f'gpu{i}'] = int(gpu_state) data['context'] = device_settings with open(filepath, 'w') as outfile: json.dump(data, outfile)
def sync(self, context, depsgraph): log('Start sync') scene = depsgraph.scene viewport_limits = scene.rpr.viewport_limits view_layer = depsgraph.view_layer settings = get_user_settings() use_gl_interop = settings.use_gl_interop and not scene.render.film_transparent scene.rpr.init_rpr_context(self.rpr_context, is_final_engine=False, use_gl_interop=use_gl_interop) self.rpr_context.blender_data['depsgraph'] = depsgraph self.shading_data = ShadingData(context) self.view_layer_data = ViewLayerSettings(view_layer) # getting initial render resolution viewport_settings = ViewportSettings(context) width, height = viewport_settings.width, viewport_settings.height if width * height == 0: # if width, height == 0, 0, then we set it to 1, 1 to be able to set AOVs width, height = 1, 1 self.rpr_context.resize(width, height) if not self.rpr_context.gl_interop: self.gl_texture = gl.GLTexture(width, height) self.rpr_context.enable_aov(pyrpr.AOV_COLOR) if viewport_limits.noise_threshold > 0.0: # if adaptive is enable turn on aov and settings self.rpr_context.enable_aov(pyrpr.AOV_VARIANCE) viewport_limits.set_adaptive_params(self.rpr_context) self.rpr_context.scene.set_name(scene.name) self.world_settings = self._get_world_settings(depsgraph) self.world_settings.export(self.rpr_context) rpr_camera = self.rpr_context.create_camera() rpr_camera.set_name("Camera") self.rpr_context.scene.set_camera(rpr_camera) # image filter image_filter_settings = view_layer.rpr.denoiser.get_settings( scene, False) image_filter_settings['resolution'] = (self.rpr_context.width, self.rpr_context.height) self.setup_image_filter(image_filter_settings) # other context settings self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, True) self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS, 1) scene.rpr.export_render_mode(self.rpr_context) scene.rpr.export_ray_depth(self.rpr_context) scene.rpr.export_pixel_filter(self.rpr_context) self.render_iterations, self.render_time = ( viewport_limits.max_samples, 0) self.is_finished = False self.restart_render_event.clear() self.sync_render_thread = threading.Thread(target=self._do_sync_render, args=(depsgraph, )) self.sync_render_thread.start() log('Finish sync')