示例#1
0
        def superres_func(face_bgr):
            if self.face_enhancer is None:
                self.face_enhancer = FaceEnhancer(place_model_on_cpu=True)

            return self.face_enhancer.enhance(face_bgr,
                                              is_tanh=True,
                                              preserve_size=False)
示例#2
0
    class Cli(Subprocessor.Cli):

        #override
        def on_initialize(self, client_dict):
            device_idx = client_dict['device_idx']
            cpu_only = client_dict['device_type'] == 'CPU'
            self.output_dirpath = client_dict['output_dirpath']
            nn_initialize_mp_lock = client_dict['nn_initialize_mp_lock']

            if cpu_only:
                device_config = nn.DeviceConfig.CPU()
                device_vram = 99
            else:
                device_config = nn.DeviceConfig.GPUIndexes([device_idx])
                device_vram = device_config.devices[0].total_mem_gb

            nn.initialize(device_config)

            intro_str = 'Running on %s.' % (client_dict['device_name'])

            self.log_info(intro_str)

            from facelib import FaceEnhancer
            self.fe = FaceEnhancer(place_model_on_cpu=(device_vram <= 2
                                                       or cpu_only),
                                   run_on_cpu=cpu_only)

        #override
        def process_data(self, filepath):
            try:
                dflimg = DFLIMG.load(filepath)
                if dflimg is None or not dflimg.has_data():
                    self.log_err(f"{filepath.name} is not a dfl image file")
                else:
                    dfl_dict = dflimg.get_dict()

                    img = cv2_imread(filepath).astype(np.float32) / 255.0
                    img = self.fe.enhance(img)
                    img = np.clip(img * 255, 0, 255).astype(np.uint8)

                    output_filepath = self.output_dirpath / filepath.name

                    cv2_imwrite(str(output_filepath), img,
                                [int(cv2.IMWRITE_JPEG_QUALITY), 100])

                    dflimg = DFLIMG.load(output_filepath)
                    dflimg.set_dict(dfl_dict)
                    dflimg.save()

                    return (1, filepath, output_filepath)
            except:
                self.log_err(
                    f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}"
                )

            return (0, filepath, None)
示例#3
0
        def on_initialize(self, client_dict):
            device_idx = client_dict['device_idx']
            cpu_only = client_dict['device_type'] == 'CPU'
            self.output_dirpath = client_dict['output_dirpath']

            device_config = nnlib.DeviceConfig(cpu_only=cpu_only,
                                               force_gpu_idx=device_idx,
                                               allow_growth=True)
            nnlib.import_all(device_config)

            device_vram = device_config.gpu_vram_gb[0]

            intro_str = 'Running on %s.' % (client_dict['device_name'])
            if not cpu_only and device_vram <= 2:
                intro_str += " Recommended to close all programs using this device."

            self.log_info(intro_str)

            from facelib import FaceEnhancer
            self.fe = FaceEnhancer()
        def on_initialize(self, client_dict):
            device_idx = client_dict['device_idx']
            cpu_only = client_dict['device_type'] == 'CPU'
            self.output_dirpath = client_dict['output_dirpath']
            nn_initialize_mp_lock = client_dict['nn_initialize_mp_lock']

            if cpu_only:
                device_config = nn.DeviceConfig.CPU()
                device_vram = 99
            else:
                device_config = nn.DeviceConfig.GPUIndexes([device_idx])
                device_vram = device_config.devices[0].total_mem_gb

            nn.initialize(device_config)

            intro_str = 'Running on %s.' % (client_dict['device_name'])

            self.log_info(intro_str)

            from facelib import FaceEnhancer
            self.fe = FaceEnhancer(place_model_on_cpu=(device_vram <= 2))
示例#5
0
    class Cli(Subprocessor.Cli):

        #override
        def on_initialize(self, client_dict):
            device_idx = client_dict['device_idx']
            cpu_only = client_dict['device_type'] == 'CPU'
            self.output_dirpath = client_dict['output_dirpath']

            device_config = nnlib.DeviceConfig(cpu_only=cpu_only,
                                               force_gpu_idx=device_idx,
                                               allow_growth=True)
            nnlib.import_all(device_config)

            device_vram = device_config.gpu_vram_gb[0]

            intro_str = 'Running on %s.' % (client_dict['device_name'])
            if not cpu_only and device_vram <= 2:
                intro_str += " Recommended to close all programs using this device."

            self.log_info(intro_str)

            from facelib import FaceEnhancer
            self.fe = FaceEnhancer()

        #override
        def process_data(self, filepath):
            try:
                dflimg = DFLIMG.load(filepath)
                if dflimg is None:
                    self.log_err("%s is not a dfl image file" %
                                 (filepath.name))
                else:
                    img = cv2_imread(filepath).astype(np.float32) / 255.0

                    img = self.fe.enhance(img)

                    img = np.clip(img * 255, 0, 255).astype(np.uint8)

                    output_filepath = self.output_dirpath / filepath.name

                    cv2_imwrite(str(output_filepath), img,
                                [int(cv2.IMWRITE_JPEG_QUALITY), 100])
                    dflimg.embed_and_set(str(output_filepath))
                    return (1, filepath, output_filepath)
            except:
                self.log_err(
                    f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}"
                )

            return (0, filepath, None)
示例#6
0
class MergeSubprocessor(Subprocessor):
    class Frame(object):
        def __init__(self,
                     prev_temporal_frame_infos=None,
                     frame_info=None,
                     next_temporal_frame_infos=None):
            self.prev_temporal_frame_infos = prev_temporal_frame_infos
            self.frame_info = frame_info
            self.next_temporal_frame_infos = next_temporal_frame_infos
            self.output_filepath = None

            self.idx = None
            self.cfg = None
            self.is_done = False
            self.is_processing = False
            self.is_shown = False
            self.image = None

    class ProcessingFrame(object):
        def __init__(self,
                     idx=None,
                     cfg=None,
                     prev_temporal_frame_infos=None,
                     frame_info=None,
                     next_temporal_frame_infos=None,
                     output_filepath=None,
                     need_return_image=False):
            self.idx = idx
            self.cfg = cfg
            self.prev_temporal_frame_infos = prev_temporal_frame_infos
            self.frame_info = frame_info
            self.next_temporal_frame_infos = next_temporal_frame_infos
            self.output_filepath = output_filepath

            self.need_return_image = need_return_image
            if self.need_return_image:
                self.image = None

    class Cli(Subprocessor.Cli):

        #override
        def on_initialize(self, client_dict):
            self.log_info('Running on %s.' % (client_dict['device_name']))
            self.device_idx = client_dict['device_idx']
            self.device_name = client_dict['device_name']
            self.predictor_func = client_dict['predictor_func']
            self.predictor_input_shape = client_dict['predictor_input_shape']
            self.superres_func = client_dict['superres_func']
            self.fanseg_input_size = client_dict['fanseg_input_size']
            self.fanseg_extract_func = client_dict['fanseg_extract_func']

            #transfer and set stdin in order to work code.interact in debug subprocess
            stdin_fd = client_dict['stdin_fd']
            if stdin_fd is not None:
                sys.stdin = os.fdopen(stdin_fd)

            def blursharpen_func(img,
                                 sharpen_mode=0,
                                 kernel_size=3,
                                 amount=100):
                if kernel_size % 2 == 0:
                    kernel_size += 1
                if amount > 0:
                    if sharpen_mode == 1:  #box
                        kernel = np.zeros((kernel_size, kernel_size),
                                          dtype=np.float32)
                        kernel[kernel_size // 2, kernel_size // 2] = 1.0
                        box_filter = np.ones(
                            (kernel_size, kernel_size),
                            dtype=np.float32) / (kernel_size**2)
                        kernel = kernel + (kernel - box_filter) * amount
                        return cv2.filter2D(img, -1, kernel)
                    elif sharpen_mode == 2:  #gaussian
                        blur = cv2.GaussianBlur(img,
                                                (kernel_size, kernel_size), 0)
                        img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur,
                                              -(0.5 * amount), 0)
                        return img
                elif amount < 0:
                    n = -amount
                    while n > 0:

                        img_blur = cv2.medianBlur(img, 5)
                        if int(n / 10) != 0:
                            img = img_blur
                        else:
                            pass_power = (n % 10) / 10.0
                            img = img * (1.0 -
                                         pass_power) + img_blur * pass_power
                        n = max(n - 10, 0)

                    return img
                return img

            self.blursharpen_func = blursharpen_func

            return None

        #override
        def process_data(self, pf):  #pf=ProcessingFrame
            cfg = pf.cfg.copy()
            cfg.blursharpen_func = self.blursharpen_func
            cfg.superres_func = self.superres_func

            frame_info = pf.frame_info

            filepath = frame_info.filepath
            landmarks_list = frame_info.landmarks_list

            output_filepath = pf.output_filepath
            need_return_image = pf.need_return_image

            if len(landmarks_list) == 0:
                self.log_info('no faces found for %s, copying without faces' %
                              (filepath.name))

                if cfg.export_mask_alpha:
                    img_bgr = cv2_imread(filepath)
                    h, w, c = img_bgr.shape
                    if c == 1:
                        img_bgr = np.repeat(img_bgr, 3, -1)
                    if c == 3:
                        img_bgr = np.concatenate([
                            img_bgr,
                            np.zeros((h, w, 1), dtype=img_bgr.dtype)
                        ],
                                                 axis=-1)

                    cv2_imwrite(output_filepath, img_bgr)
                else:
                    if filepath.suffix == '.png':
                        shutil.copy(str(filepath), str(output_filepath))
                    else:
                        img_bgr = cv2_imread(filepath)
                        cv2_imwrite(output_filepath, img_bgr)

                if need_return_image:
                    img_bgr = cv2_imread(filepath)
                    pf.image = img_bgr
            else:
                if cfg.type == MergerConfig.TYPE_MASKED:
                    cfg.fanseg_input_size = self.fanseg_input_size
                    cfg.fanseg_extract_func = self.fanseg_extract_func

                    try:
                        final_img = MergeMasked(self.predictor_func,
                                                self.predictor_input_shape,
                                                cfg, frame_info)
                    except Exception as e:
                        e_str = traceback.format_exc()
                        if 'MemoryError' in e_str:
                            raise Subprocessor.SilenceException
                        else:
                            raise Exception(
                                f'Error while merging file [{filepath}]: {e_str}'
                            )

                elif cfg.type == MergerConfig.TYPE_FACE_AVATAR:
                    final_img = MergeFaceAvatar(self.predictor_func,
                                                self.predictor_input_shape,
                                                cfg,
                                                pf.prev_temporal_frame_infos,
                                                pf.frame_info,
                                                pf.next_temporal_frame_infos)

                if output_filepath is not None and final_img is not None:
                    cv2_imwrite(output_filepath, final_img)

                if need_return_image:
                    pf.image = final_img

            return pf

        #overridable
        def get_data_name(self, pf):
            #return string identificator of your data
            return pf.frame_info.filepath

    #override
    def __init__(self, is_interactive, merger_session_filepath, predictor_func,
                 predictor_input_shape, merger_config, frames,
                 frames_root_path, output_path, model_iter):
        if len(frames) == 0:
            raise ValueError("len (frames) == 0")

        super().__init__('Merger',
                         MergeSubprocessor.Cli,
                         86400 if MERGER_DEBUG else 60,
                         io_loop_sleep_time=0.001)

        self.is_interactive = is_interactive
        self.merger_session_filepath = Path(merger_session_filepath)
        self.merger_config = merger_config

        self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_input_shape = predictor_input_shape

        self.face_enhancer = None

        def superres_func(mode, face_bgr):
            if mode == 1:
                if self.face_enhancer is None:
                    self.face_enhancer = FaceEnhancer(place_model_on_cpu=True)

                return self.face_enhancer.enhance(face_bgr,
                                                  is_tanh=True,
                                                  preserve_size=False)

        self.superres_host, self.superres_func = SubprocessFunctionCaller.make_pair(
            superres_func)

        self.fanseg_by_face_type = {}
        self.fanseg_input_size = 256

        def fanseg_extract_func(face_type, *args, **kwargs):
            fanseg = self.fanseg_by_face_type.get(face_type, None)
            if self.fanseg_by_face_type.get(face_type, None) is None:
                cpu_only = len(nn.getCurrentDeviceConfig().devices) == 0

                with nn.tf.device('/CPU:0' if cpu_only else '/GPU:0'):
                    fanseg = TernausNet("FANSeg",
                                        self.fanseg_input_size,
                                        FaceType.toString(face_type),
                                        place_model_on_cpu=True)

                self.fanseg_by_face_type[face_type] = fanseg
            return fanseg.extract(*args, **kwargs)

        self.fanseg_host, self.fanseg_extract_func = SubprocessFunctionCaller.make_pair(
            fanseg_extract_func)

        self.frames_root_path = frames_root_path
        self.output_path = output_path
        self.model_iter = model_iter

        self.prefetch_frame_count = self.process_count = min(
            6, multiprocessing.cpu_count())

        session_data = None
        if self.is_interactive and self.merger_session_filepath.exists():
            io.input_skip_pending()
            if io.input_bool("Use saved session?", True):
                try:
                    with open(str(self.merger_session_filepath), "rb") as f:
                        session_data = pickle.loads(f.read())

                except Exception as e:
                    pass

        self.frames = frames
        self.frames_idxs = [*range(len(self.frames))]
        self.frames_done_idxs = []

        if self.is_interactive and session_data is not None:
            # Loaded session data, check it
            s_frames = session_data.get('frames', None)
            s_frames_idxs = session_data.get('frames_idxs', None)
            s_frames_done_idxs = session_data.get('frames_done_idxs', None)
            s_model_iter = session_data.get('model_iter', None)

            frames_equal = (s_frames is not None) and \
                           (s_frames_idxs is not None) and \
                           (s_frames_done_idxs is not None) and \
                           (s_model_iter is not None) and \
                           (len(frames) == len(s_frames)) # frames count must match

            if frames_equal:
                for i in range(len(frames)):
                    frame = frames[i]
                    s_frame = s_frames[i]
                    # frames filenames must match
                    if frame.frame_info.filepath.name != s_frame.frame_info.filepath.name:
                        frames_equal = False
                    if not frames_equal:
                        break

            if frames_equal:
                io.log_info('Using saved session from ' +
                            '/'.join(self.merger_session_filepath.parts[-2:]))

                for frame in s_frames:
                    if frame.cfg is not None:
                        # recreate MergerConfig class using constructor with get_config() as dict params
                        # so if any new param will be added, old merger session will work properly
                        frame.cfg = frame.cfg.__class__(
                            **frame.cfg.get_config())

                self.frames = s_frames
                self.frames_idxs = s_frames_idxs
                self.frames_done_idxs = s_frames_done_idxs

                rewind_to_begin = len(
                    self.frames_idxs) == 0  # all frames are done?

                if self.model_iter != s_model_iter:
                    # model was more trained, recompute all frames
                    rewind_to_begin = True
                    for frame in self.frames:
                        frame.is_done = False

                if rewind_to_begin:
                    while len(self.frames_done_idxs) > 0:
                        prev_frame = self.frames[self.frames_done_idxs.pop()]
                        self.frames_idxs.insert(0, prev_frame.idx)

                if len(self.frames_idxs) != 0:
                    cur_frame = self.frames[self.frames_idxs[0]]
                    cur_frame.is_shown = False

            if not frames_equal:
                session_data = None

        if session_data is None:
            for filename in pathex.get_image_paths(
                    self.output_path):  #remove all images in output_path
                Path(filename).unlink()

            frames[0].cfg = self.merger_config.copy()

        for i in range(len(self.frames)):
            frame = self.frames[i]
            frame.idx = i
            frame.output_filepath = self.output_path / (
                frame.frame_info.filepath.stem + '.png')

    #override
    def process_info_generator(self):
        r = [0] if MERGER_DEBUG else range(self.process_count)

        for i in r:
            yield 'CPU%d' % (i), {}, {
                'device_idx': i,
                'device_name': 'CPU%d' % (i),
                'predictor_func': self.predictor_func,
                'predictor_input_shape': self.predictor_input_shape,
                'superres_func': self.superres_func,
                'fanseg_input_size': self.fanseg_input_size,
                'fanseg_extract_func': self.fanseg_extract_func,
                'stdin_fd': sys.stdin.fileno() if MERGER_DEBUG else None
            }

    #overridable optional
    def on_clients_initialized(self):
        io.progress_bar("Merging",
                        len(self.frames_idxs) + len(self.frames_done_idxs),
                        initial=len(self.frames_done_idxs))

        self.process_remain_frames = not self.is_interactive
        self.is_interactive_quitting = not self.is_interactive

        if self.is_interactive:
            help_images = {
                MergerConfig.TYPE_MASKED:
                cv2_imread(
                    str(
                        Path(__file__).parent / 'gfx' /
                        'help_merger_masked.jpg')),
                MergerConfig.TYPE_FACE_AVATAR:
                cv2_imread(
                    str(
                        Path(__file__).parent / 'gfx' /
                        'help_merger_face_avatar.jpg')),
            }

            self.main_screen = Screen(initial_scale_to_width=1368,
                                      image=None,
                                      waiting_icon=True)
            self.help_screen = Screen(
                initial_scale_to_height=768,
                image=help_images[self.merger_config.type],
                waiting_icon=False)
            self.screen_manager = ScreenManager(
                "Merger", [self.main_screen, self.help_screen],
                capture_keys=True)
            self.screen_manager.set_current(self.help_screen)
            self.screen_manager.show_current()

            self.masked_keys_funcs = {
                '`':
                lambda cfg, shift_pressed: cfg.set_mode(0),
                '1':
                lambda cfg, shift_pressed: cfg.set_mode(1),
                '2':
                lambda cfg, shift_pressed: cfg.set_mode(2),
                '3':
                lambda cfg, shift_pressed: cfg.set_mode(3),
                '4':
                lambda cfg, shift_pressed: cfg.set_mode(4),
                '5':
                lambda cfg, shift_pressed: cfg.set_mode(5),
                '6':
                lambda cfg, shift_pressed: cfg.set_mode(6),
                '7':
                lambda cfg, shift_pressed: cfg.set_mode(7),
                '8':
                lambda cfg, shift_pressed: cfg.set_mode(8),
                'q':
                lambda cfg, shift_pressed: cfg.add_hist_match_threshold(
                    1 if not shift_pressed else 5),
                'a':
                lambda cfg, shift_pressed: cfg.add_hist_match_threshold(
                    -1 if not shift_pressed else -5),
                'w':
                lambda cfg, shift_pressed: cfg.add_erode_mask_modifier(
                    1 if not shift_pressed else 5),
                's':
                lambda cfg, shift_pressed: cfg.add_erode_mask_modifier(
                    -1 if not shift_pressed else -5),
                'e':
                lambda cfg, shift_pressed: cfg.add_blur_mask_modifier(
                    1 if not shift_pressed else 5),
                'd':
                lambda cfg, shift_pressed: cfg.add_blur_mask_modifier(
                    -1 if not shift_pressed else -5),
                'r':
                lambda cfg, shift_pressed: cfg.add_motion_blur_power(
                    1 if not shift_pressed else 5),
                'f':
                lambda cfg, shift_pressed: cfg.add_motion_blur_power(
                    -1 if not shift_pressed else -5),
                'y':
                lambda cfg, shift_pressed: cfg.add_blursharpen_amount(
                    1 if not shift_pressed else 5),
                'h':
                lambda cfg, shift_pressed: cfg.add_blursharpen_amount(
                    -1 if not shift_pressed else -5),
                'u':
                lambda cfg, shift_pressed: cfg.add_output_face_scale(
                    1 if not shift_pressed else 5),
                'j':
                lambda cfg, shift_pressed: cfg.add_output_face_scale(
                    -1 if not shift_pressed else -5),
                'i':
                lambda cfg, shift_pressed: cfg.add_image_denoise_power(
                    1 if not shift_pressed else 5),
                'k':
                lambda cfg, shift_pressed: cfg.add_image_denoise_power(
                    -1 if not shift_pressed else -5),
                'o':
                lambda cfg, shift_pressed: cfg.add_bicubic_degrade_power(
                    1 if not shift_pressed else 5),
                'l':
                lambda cfg, shift_pressed: cfg.add_bicubic_degrade_power(
                    -1 if not shift_pressed else -5),
                'p':
                lambda cfg, shift_pressed: cfg.add_color_degrade_power(
                    1 if not shift_pressed else 5),
                ';':
                lambda cfg, shift_pressed: cfg.add_color_degrade_power(-1),
                ':':
                lambda cfg, shift_pressed: cfg.add_color_degrade_power(-5),
                'z':
                lambda cfg, shift_pressed: cfg.toggle_masked_hist_match(),
                'x':
                lambda cfg, shift_pressed: cfg.toggle_mask_mode(),
                'c':
                lambda cfg, shift_pressed: cfg.toggle_color_transfer_mode(),
                'v':
                lambda cfg, shift_pressed: cfg.toggle_super_resolution_mode(),
                'b':
                lambda cfg, shift_pressed: cfg.toggle_export_mask_alpha(),
                'n':
                lambda cfg, shift_pressed: cfg.toggle_sharpen_mode(),
            }
            self.masked_keys = list(self.masked_keys_funcs.keys())

    #overridable optional
    def on_clients_finalized(self):
        io.progress_bar_close()

        if self.is_interactive:
            self.screen_manager.finalize()

            for frame in self.frames:
                frame.output_filepath = None
                frame.image = None

            session_data = {
                'frames': self.frames,
                'frames_idxs': self.frames_idxs,
                'frames_done_idxs': self.frames_done_idxs,
                'model_iter': self.model_iter,
            }
            self.merger_session_filepath.write_bytes(
                pickle.dumps(session_data))

            io.log_info("Session is saved to " +
                        '/'.join(self.merger_session_filepath.parts[-2:]))

    #override
    def on_tick(self):
        self.predictor_func_host.process_messages()
        self.superres_host.process_messages()
        self.fanseg_host.process_messages()

        go_prev_frame = False
        go_first_frame = False
        go_prev_frame_overriding_cfg = False
        go_first_frame_overriding_cfg = False

        go_next_frame = self.process_remain_frames
        go_next_frame_overriding_cfg = False
        go_last_frame_overriding_cfg = False

        cur_frame = None
        if len(self.frames_idxs) != 0:
            cur_frame = self.frames[self.frames_idxs[0]]

        if self.is_interactive:
            self.main_screen.set_waiting_icon(False)

            if not self.is_interactive_quitting and not self.process_remain_frames:
                if cur_frame is not None:
                    if not cur_frame.is_shown:
                        if cur_frame.is_done:
                            cur_frame.is_shown = True
                            io.log_info(
                                cur_frame.cfg.to_string(
                                    cur_frame.frame_info.filepath.name))

                            if cur_frame.image is None:
                                cur_frame.image = cv2_imread(
                                    cur_frame.output_filepath)
                                if cur_frame.image is None:
                                    # unable to read? recompute then
                                    cur_frame.is_done = False
                                    cur_frame.is_shown = False
                            self.main_screen.set_image(cur_frame.image)
                        else:
                            self.main_screen.set_waiting_icon(True)

                else:
                    self.main_screen.set_image(None)
            else:
                self.main_screen.set_image(None)
                self.main_screen.set_waiting_icon(True)

            self.screen_manager.show_current()

            key_events = self.screen_manager.get_key_events()
            key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[
                -1] if len(key_events) > 0 else (0, 0, False, False, False)

            if key == 9:  #tab
                self.screen_manager.switch_screens()
            else:
                if key == 27:  #esc
                    self.is_interactive_quitting = True
                elif self.screen_manager.get_current() is self.main_screen:

                    if self.merger_config.type == MergerConfig.TYPE_MASKED and chr_key in self.masked_keys:
                        self.process_remain_frames = False

                        if cur_frame is not None:
                            cfg = cur_frame.cfg
                            prev_cfg = cfg.copy()

                            if cfg.type == MergerConfig.TYPE_MASKED:
                                self.masked_keys_funcs[chr_key](cfg,
                                                                shift_pressed)

                            if prev_cfg != cfg:
                                io.log_info(
                                    cfg.to_string(
                                        cur_frame.frame_info.filepath.name))
                                cur_frame.is_done = False
                                cur_frame.is_shown = False
                    else:

                        if chr_key == ',' or chr_key == 'm':
                            self.process_remain_frames = False
                            go_prev_frame = True

                            if chr_key == ',':
                                if shift_pressed:
                                    go_first_frame = True

                            elif chr_key == 'm':
                                if not shift_pressed:
                                    go_prev_frame_overriding_cfg = True
                                else:
                                    go_first_frame_overriding_cfg = True

                        elif chr_key == '.' or chr_key == '/':
                            self.process_remain_frames = False
                            go_next_frame = True

                            if chr_key == '.':
                                if shift_pressed:
                                    self.process_remain_frames = not self.process_remain_frames

                            elif chr_key == '/':
                                if not shift_pressed:
                                    go_next_frame_overriding_cfg = True
                                else:
                                    go_last_frame_overriding_cfg = True

                        elif chr_key == '-':
                            self.screen_manager.get_current().diff_scale(-0.1)
                        elif chr_key == '=':
                            self.screen_manager.get_current().diff_scale(0.1)

        if go_prev_frame:
            if cur_frame is None or cur_frame.is_done:
                if cur_frame is not None:
                    cur_frame.image = None

                while True:
                    if len(self.frames_done_idxs) > 0:
                        prev_frame = self.frames[self.frames_done_idxs.pop()]
                        self.frames_idxs.insert(0, prev_frame.idx)
                        prev_frame.is_shown = False
                        io.progress_bar_inc(-1)

                        if cur_frame is not None and (
                                go_prev_frame_overriding_cfg
                                or go_first_frame_overriding_cfg):
                            if prev_frame.cfg != cur_frame.cfg:
                                prev_frame.cfg = cur_frame.cfg.copy()
                                prev_frame.is_done = False

                        cur_frame = prev_frame

                    if go_first_frame_overriding_cfg or go_first_frame:
                        if len(self.frames_done_idxs) > 0:
                            continue
                    break

        elif go_next_frame:
            if cur_frame is not None and cur_frame.is_done:
                cur_frame.image = None
                cur_frame.is_shown = True
                self.frames_done_idxs.append(cur_frame.idx)
                self.frames_idxs.pop(0)
                io.progress_bar_inc(1)

                f = self.frames

                if len(self.frames_idxs) != 0:
                    next_frame = f[self.frames_idxs[0]]
                    next_frame.is_shown = False

                    if go_next_frame_overriding_cfg or go_last_frame_overriding_cfg:

                        if go_next_frame_overriding_cfg:
                            to_frames = next_frame.idx + 1
                        else:
                            to_frames = len(f)

                        for i in range(next_frame.idx, to_frames):
                            f[i].cfg = None

                    for i in range(
                            min(len(self.frames_idxs),
                                self.prefetch_frame_count)):
                        frame = f[self.frames_idxs[i]]
                        if frame.cfg is None:
                            if i == 0:
                                frame.cfg = cur_frame.cfg.copy()
                            else:
                                frame.cfg = f[self.frames_idxs[i -
                                                               1]].cfg.copy()

                            frame.is_done = False  #initiate solve again
                            frame.is_shown = False

            if len(self.frames_idxs) == 0:
                self.process_remain_frames = False

        return (self.is_interactive and self.is_interactive_quitting) or \
               (not self.is_interactive and self.process_remain_frames == False)

    #override
    def on_data_return(self, host_dict, pf):
        frame = self.frames[pf.idx]
        frame.is_done = False
        frame.is_processing = False

    #override
    def on_result(self, host_dict, pf_sent, pf_result):
        frame = self.frames[pf_result.idx]
        frame.is_processing = False
        if frame.cfg == pf_result.cfg:
            frame.is_done = True
            frame.image = pf_result.image

    #override
    def get_data(self, host_dict):
        if self.is_interactive and self.is_interactive_quitting:
            return None

        for i in range(min(len(self.frames_idxs), self.prefetch_frame_count)):
            frame = self.frames[self.frames_idxs[i]]

            if not frame.is_done and not frame.is_processing and frame.cfg is not None:
                frame.is_processing = True
                return MergeSubprocessor.ProcessingFrame(
                    idx=frame.idx,
                    cfg=frame.cfg.copy(),
                    prev_temporal_frame_infos=frame.prev_temporal_frame_infos,
                    frame_info=frame.frame_info,
                    next_temporal_frame_infos=frame.next_temporal_frame_infos,
                    output_filepath=frame.output_filepath,
                    need_return_image=True)

        return None

    #override
    def get_result(self):
        return 0