def _render_tiles(self):
        athena_data = {}

        tile_iterator = utils.tile_iterator(self.tile_order, self.width,
                                            self.height, *self.tile_size)
        tiles_number = tile_iterator.len
        is_adaptive = self.rpr_context.is_aov_enabled(pyrpr.AOV_VARIANCE)

        rpr_camera = self.rpr_context.scene.camera

        time_begin = time.perf_counter()
        athena_data['Start Time'] = datetime.datetime.utcnow().strftime(
            "%Y-%m-%d %H:%M:%S.%f")
        athena_data['End Status'] = "successful"
        progress = 0.0

        for tile_index, (tile_pos, tile_size) in enumerate(tile_iterator()):
            if self.rpr_engine.test_break():
                athena_data['End Status'] = "cancelled"
                break

            log(f"Render tile {tile_index} / {tiles_number}: [{tile_pos}, {tile_size}]"
                )

            tile = ((tile_pos[0] / self.width, tile_pos[1] / self.height),
                    (tile_size[0] / self.width, tile_size[1] / self.height))
            # set camera for tile
            self.camera_data.export(rpr_camera, tile=tile)
            self.rpr_context.resize(*tile_size)

            # export backplate section for tile if backplate present
            if self.world_backplate:
                self.world_backplate.export(self.rpr_context,
                                            (self.width, self.height), tile)

            sample = 0
            if is_adaptive:
                all_pixels = active_pixels = self.rpr_context.width * self.rpr_context.height

            render_iteration = 0
            while True:
                if self.rpr_engine.test_break():
                    break

                update_samples = min(self.render_update_samples,
                                     self.render_samples - sample)
                self.current_render_time = time.perf_counter() - time_begin
                progress = (tile_index +
                            sample / self.render_samples) / tiles_number
                info_str = f"Render Time: {self.current_render_time:.1f} sec"\
                           f" | Tile: {tile_index}/{tiles_number}"\
                           f" | Samples: {sample}/{self.render_samples}"
                log_str = f"  samples: {sample} +{update_samples} / {self.render_samples}"\
                    f", progress: {progress * 100:.1f}%, time: {self.current_render_time:.2f}"

                is_adaptive_active = is_adaptive and sample >= \
                                     self.rpr_context.get_parameter(pyrpr.CONTEXT_ADAPTIVE_SAMPLING_MIN_SPP)
                if is_adaptive_active:
                    adaptive_progress = max(
                        (all_pixels - active_pixels) / all_pixels, 0.0)
                    progress = max(progress, (tile_index + adaptive_progress) /
                                   tiles_number)
                    info_str += f" | Adaptive Sampling: {adaptive_progress * 100:.0f}%"
                    log_str += f", active_pixels: {active_pixels}"

                self.notify_status(progress, info_str)
                log(log_str)

                self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS,
                                               update_samples)
                self.rpr_context.set_parameter(pyrpr.CONTEXT_FRAMECOUNT,
                                               render_iteration)
                self.rpr_context.render(restart=(sample == 0))

                sample += update_samples

                self.rpr_context.resolve()
                self.update_render_result(tile_pos,
                                          tile_size,
                                          layer_name=self.render_layer_name)

                # store maximum actual number of used samples for render stamp info
                self.current_sample = max(self.current_sample, sample)

                if is_adaptive_active:
                    active_pixels = self.rpr_context.get_info(
                        pyrpr.CONTEXT_ACTIVE_PIXEL_COUNT, int)
                    if active_pixels == 0:
                        break

                if sample == self.render_samples:
                    break

                render_iteration += 1
                if render_iteration > 1 and self.render_update_samples < MAX_RENDER_ITERATIONS and not self.use_contour:
                    # progressively increase update samples up to 32
                    self.render_update_samples *= 2

            if self.image_filter and not self.rpr_engine.test_break():
                self.update_image_filter_inputs(tile_pos)

        if self.image_filter and not self.rpr_engine.test_break():
            self.notify_status(1.0, "Applying denoising final image")

            # getting already rendered images for every render pass
            result = self.rpr_engine.get_result()
            render_passes = result.layers[self.render_layer_name].passes
            length = sum((len(p.rect) * p.channels for p in render_passes))
            images = np.empty(length, dtype=np.float32)
            render_passes.foreach_get('rect', images)

            # updating points
            result = self.rpr_engine.begin_result(0,
                                                  0,
                                                  self.width,
                                                  self.height,
                                                  layer=self.render_layer_name)

            render_passes = result.layers[0].passes
            pos = 0
            for p in render_passes:
                length = len(p.rect) * p.channels

                # we will update only Combined pass
                if p.name == "Combined":
                    self.image_filter.run()
                    image = self.image_filter.get_data()
                    images[pos:pos + length] = image.flatten()
                    break

                pos += length

            render_passes.foreach_set('rect', images)

            self.rpr_engine.end_result(result)

        if not self.rpr_engine.test_break():
            self.apply_render_stamp_to_image()

        athena_data['Stop Time'] = datetime.datetime.utcnow().strftime(
            "%Y-%m-%d %H:%M:%S.%f")
        athena_data['Samples'] = round(self.render_samples * progress)

        log.info(f"Scene synchronization time:",
                 perfcounter_to_str(self.sync_time))
        log.info(f"Render time:", perfcounter_to_str(self.current_render_time))

        self.athena_send(athena_data)
    def _render(self):
        athena_data = {}

        time_begin = time.perf_counter()
        athena_data['Start Time'] = datetime.datetime.utcnow().strftime(
            "%Y-%m-%d %H:%M:%S.%f")
        athena_data['End Status'] = "successful"

        self.current_sample = 0
        is_adaptive = self.rpr_context.is_aov_enabled(pyrpr.AOV_VARIANCE)
        if is_adaptive:
            all_pixels = active_pixels = self.rpr_context.width * self.rpr_context.height

        while True:
            if self.rpr_engine.test_break():
                athena_data['End Status'] = "cancelled"
                break

            self.current_render_time = time.perf_counter() - time_begin
            is_adaptive_active = is_adaptive and self.current_sample >= \
                                 self.rpr_context.get_parameter(pyrpr.CONTEXT_ADAPTIVE_SAMPLING_MIN_SPP)

            # if less than update_samples left, use the remainder
            update_samples = min(self.render_update_samples,
                                 self.render_samples - self.current_sample)

            # we report time/iterations left as fractions if limit enabled
            time_str = f"{self.current_render_time:.1f}/{self.render_time}" if self.render_time \
                       else f"{self.current_render_time:.1f}"

            # percent done is one of percent iterations or percent time so pick whichever is greater
            progress = max(
                self.current_sample / self.render_samples,
                self.current_render_time /
                self.render_time if self.render_time else 0)
            info_str = f"Render Time: {time_str} sec | "\
                       f"Samples: {self.current_sample}/{self.render_samples}"
            log_str = f"  samples: {self.current_sample} +{update_samples} / {self.render_samples}"\
                      f", progress: {progress * 100:.1f}%, time: {self.current_render_time:.2f}"
            if is_adaptive_active:
                adaptive_progress = max(
                    (all_pixels - active_pixels) / all_pixels, 0.0)

                progress = max(progress, adaptive_progress)
                info_str += f" | Adaptive Sampling: {math.floor(adaptive_progress * 100)}%"
                log_str += f", active_pixels: {active_pixels}"

            self.notify_status(progress, info_str)

            log(log_str)

            self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS,
                                           update_samples)
            self.rpr_context.set_parameter(pyrpr.CONTEXT_FRAMECOUNT,
                                           self.render_iteration)
            self.rpr_context.render(restart=(self.current_sample == 0))

            self.current_sample += update_samples

            self.rpr_context.resolve()
            if self.background_filter:
                self.update_background_filter_inputs()
                self.background_filter.run()
            self.update_render_result((0, 0), (self.width, self.height),
                                      layer_name=self.render_layer_name)

            # stop at whichever comes first:
            # max samples or max time if enabled or active_pixels == 0
            if is_adaptive_active:
                active_pixels = self.rpr_context.get_info(
                    pyrpr.CONTEXT_ACTIVE_PIXEL_COUNT, int)
                if active_pixels == 0:
                    break

            if self.current_sample == self.render_samples:
                break

            if self.render_time and self.current_render_time >= self.render_time:
                break

            self.render_iteration += 1
            if self.render_iteration > 1 and self.render_update_samples < MAX_RENDER_ITERATIONS and not self.use_contour:
                # progressively increase update samples up to 32
                self.render_update_samples *= 2

        if self.image_filter:
            self.notify_status(1.0, "Applying denoising final image")
            self.update_image_filter_inputs()
            self.image_filter.run()
            self.update_render_result((0, 0), (self.width, self.height),
                                      layer_name=self.render_layer_name,
                                      apply_image_filter=True)

        self.apply_render_stamp_to_image()

        athena_data['Stop Time'] = datetime.datetime.utcnow().strftime(
            "%Y-%m-%d %H:%M:%S.%f")
        athena_data['Samples'] = self.current_sample

        log.info(f"Scene synchronization time:",
                 perfcounter_to_str(self.sync_time))
        log.info(f"Render time:", perfcounter_to_str(self.current_render_time))
        self.athena_send(athena_data)
Esempio n. 3
0
    def _render_contour(self):
        log(f"Doing Outline Pass")

        # set contour settings
        self.rpr_context.set_parameter(pyrpr.CONTEXT_GPUINTEGRATOR,
                                       "gpucontour")

        # enable contour aovs
        self.rpr_context.disable_aovs()
        self.rpr_context.resize(self.width, self.height)

        self.rpr_context.enable_aov(pyrpr.AOV_COLOR)
        self.rpr_context.enable_aov(pyrpr.AOV_OBJECT_ID)
        self.rpr_context.enable_aov(pyrpr.AOV_MATERIAL_ID)
        self.rpr_context.enable_aov(pyrpr.AOV_SHADING_NORMAL)

        # setting camera
        self.camera_data.export(self.rpr_context.scene.camera)

        athena_data = {}

        time_begin = time.perf_counter()
        athena_data['Start Time'] = datetime.datetime.utcnow().strftime(
            "%Y-%m-%d %H:%M:%S.%f")
        athena_data['End Status'] = "successful"

        self.current_sample = 0

        while True:
            if self.rpr_engine.test_break():
                athena_data['End Status'] = "cancelled"
                break

            self.current_render_time = time.perf_counter() - time_begin

            # if less than update_samples left, use the remainder
            update_samples = 1

            # we report time/iterations left as fractions if limit enabled
            time_str = f"{self.current_render_time:.1f}/{self.render_time}" if self.render_time \
                       else f"{self.current_render_time:.1f}"

            # percent done is one of percent iterations or percent time so pick whichever is greater
            progress = max(
                self.current_sample / self.contour_pass_samples,
                self.current_render_time /
                self.render_time if self.render_time else 0)
            info_str = f"Outline Pass | Render Time: {time_str} sec | "\
                       f"Samples: {self.current_sample}/{self.contour_pass_samples}"
            log_str = f"  samples: {self.current_sample} +{update_samples} / {self.contour_pass_samples}"\
                      f", progress: {progress * 100:.1f}%, time: {self.current_render_time:.2f}"

            self.notify_status(progress, info_str)

            log(log_str)

            self.rpr_context.set_parameter(pyrpr.CONTEXT_ITERATIONS,
                                           update_samples)
            self.rpr_context.set_parameter(pyrpr.CONTEXT_FRAMECOUNT,
                                           self.render_iteration)
            self.rpr_context.render(restart=(self.current_sample == 0))

            self.current_sample += update_samples

            self.rpr_context.resolve()
            self._update_render_result_contour(
                (0, 0), (self.width, self.height),
                layer_name=self.render_layer_name)

            if self.current_sample == self.contour_pass_samples:
                break

            if self.render_time and self.current_render_time >= self.render_time:
                break

            self.render_iteration += 1

        athena_data['Stop Time'] = datetime.datetime.utcnow().strftime(
            "%Y-%m-%d %H:%M:%S.%f")
        athena_data['Samples'] = self.current_sample

        log.info(f"Scene synchronization time:",
                 perfcounter_to_str(self.sync_time))
        log.info(f"Render time:", perfcounter_to_str(self.current_render_time))
        self.athena_send(athena_data)