Beispiel #1
0
    def ask_settings(self):
        s = """Choose sharpen mode: \n"""
        for key in self.sharpen_dict.keys():
            s += f"""({key}) {self.sharpen_dict[key]}\n"""
        io.log_info(s)
        self.sharpen_mode = io.input_int(
            "",
            0,
            valid_list=self.sharpen_dict.keys(),
            help_message="Enhance details by applying sharpen filter.")

        if self.sharpen_mode != 0:
            self.blursharpen_amount = np.clip(
                io.input_int("Choose blur/sharpen amount",
                             0,
                             add_info="-100..100"), -100, 100)

        s = """Choose super resolution mode: \n"""
        for key in self.super_res_dict.keys():
            s += f"""({key}) {self.super_res_dict[key]}\n"""
        io.log_info(s)
        self.super_resolution_mode = io.input_int(
            "",
            0,
            valid_list=self.super_res_dict.keys(),
            help_message="Enhance details by applying superresolution network."
        )
Beispiel #2
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4

        yn_str = {True: 'y', False: 'n'}
        ask_override = self.ask_override()

        resolution = default_resolution = self.options[
            'resolution'] = self.load_or_def_option('resolution', 512)

        if self.is_first_run() or ask_override:
            self.ask_batch_size(suggest_batch_size)
            resolution = io.input_int("Resolution",
                                      default_resolution,
                                      add_info="64-1024")

        self.stage_max = stage_max = np.clip(
            mathlib.get_power_of_two(resolution), 6, 10) - 2
        self.options['resolution'] = resolution = 2**(stage_max + 2)

        default_stage = self.load_or_def_option('stage', 0)
        default_target_stage_iter = self.load_or_def_option(
            'target_stage_iter', self.iter + 100000)

        if (self.is_first_run() or ask_override):
            new_stage = np.clip(
                io.input_int("Stage", default_stage,
                             add_info=f"0-{stage_max}"), 0, stage_max)
            if new_stage != default_stage:
                self.options['start_stage_iter'] = self.iter
                default_target_stage_iter = self.iter + 100000
            self.options['stage'] = new_stage
        else:
            self.options['stage'] = default_stage

        if self.options['stage'] == 0:
            if 'start_stage_iter' in self.options:
                self.options.pop('start_stage_iter')

            if 'target_stage_iter' in self.options:
                self.options.pop('target_stage_iter')
        else:
            if (self.is_first_run() or ask_override):
                self.options['target_stage_iter'] = io.input_int(
                    "Target stage iteration", default_target_stage_iter)
            else:
                self.options['target_stage_iter'] = default_target_stage_iter
Beispiel #3
0
def extract_video(input_file,
                  output_dir,
                  output_ext=None,
                  fps=None,
                  start_frame=None,
                  end_frame=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = pathex.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if fps is None:
        fps = io.input_int(
            "Enter FPS",
            0,
            help_message=
            "How many frames of every second of the video will be extracted. 0 - full fps"
        )

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format",
            "png", ["png", "jpg"],
            help_message=
            "png is lossless, but extraction is x10 slower for HDD, requires x10 more disk space than jpg."
        )

    for filename in pathex.get_image_paths(output_path, ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    if start_frame is not None and end_frame is not None:
        job = job.trim(start_frame=start_frame, end_frame=end_frame)

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #4
0
def process_folder ( dirpath):
    
    image_size = io.input_int(f"New image size", 512, valid_range=[256,2048])


    output_dirpath = dirpath.parent / (dirpath.name + '_resized')
    output_dirpath.mkdir (exist_ok=True, parents=True)

    dirpath_parts = '/'.join( dirpath.parts[-2:])
    output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] )
    io.log_info (f"Resizing faceset in {dirpath_parts}")
    io.log_info ( f"Processing to {output_dirpath_parts}")

    output_images_paths = pathex.get_image_paths(output_dirpath)
    if len(output_images_paths) > 0:
        for filename in output_images_paths:
            Path(filename).unlink()

    image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )]
    result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size).run()

    is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
    if is_merge:
        io.log_info (f"Copying processed files to {dirpath_parts}")

        for (filepath, output_filepath) in result:
            try:
                shutil.copy (output_filepath, filepath)
            except:
                pass

        io.log_info (f"Removing {output_dirpath_parts}")
        shutil.rmtree(output_dirpath)
Beispiel #5
0
def denoise_image_sequence(input_dir, ext=None, factor=None):
    input_path = Path(input_dir)

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if ext is None:
        ext = io.input_str("Input image format (extension)", "png")

    if factor is None:
        factor = np.clip(io.input_int("Denoise factor?", 5, add_info="1-20"),
                         1, 20)

    kwargs = {}
    if ext == 'jpg':
        kwargs.update({'q:v': '2'})

    job = (ffmpeg.input(str(input_path / ('%5d.' + ext))).filter(
        "hqdn3d", factor, factor, 5,
        5).output(str(input_path / ('%5d.' + ext)), **kwargs))

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #6
0
    def ask_batch_size(self, suggest_batch_size=None, range=None):
        default_batch_size = self.load_or_def_option('batch_size', suggest_batch_size or self.batch_size)

        batch_size = max(0, io.input_int("Batch_size", default_batch_size, valid_range=range, help_message="Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."))

        if range is not None:
            batch_size = np.clip(batch_size, range[0], range[1])

        self.options['batch_size'] = self.batch_size = batch_size
Beispiel #7
0
def cut_video(input_file,
              from_time=None,
              to_time=None,
              audio_track_id=None,
              bitrate=None):
    input_file_path = Path(input_file)
    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    output_file_path = input_file_path.parent / (
        input_file_path.stem + "_cut" + input_file_path.suffix)

    if from_time is None:
        from_time = io.input_str("From time", "00:00:00.000")

    if to_time is None:
        to_time = io.input_str("To time", "00:00:00.000")

    if audio_track_id is None:
        audio_track_id = io.input_int("Specify audio track id.", 0)

    if bitrate is None:
        bitrate = max(1, io.input_int("输出码率 Bitrate of output file in MB/s",
                                      25))

    kwargs = {
        "c:v": "libx264",
        "b:v": "%dM" % (bitrate),
        "pix_fmt": "yuv420p",
    }

    job = ffmpeg.input(str(input_file_path), ss=from_time, to=to_time)

    job_v = job['v:0']
    job_a = job['a:' + str(audio_track_id) + '?']

    job = ffmpeg.output(job_v, job_a, str(output_file_path),
                        **kwargs).overwrite_output()

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #8
0
 def ask_autobackup_hour(self):
     default_autobackup_hour = self.options[
         'autobackup_hour'] = self.load_or_def_option('autobackup_hour', 0)
     self.options['autobackup_hour'] = io.input_int(
         f"Autobackup every N hour",
         default_autobackup_hour,
         add_info="0..24",
         help_message=
         "Autobackup model files with preview every N hour. Latest backup located in model/<>_autobackups/01"
     )
Beispiel #9
0
 def ask_batch_size(self, suggest_batch_size=None):
     default_batch_size = self.load_or_def_option(
         'batch_size', suggest_batch_size or self.batch_size)
     self.batch_size = max(
         0,
         io.input_int(
             "Batch_size",
             default_batch_size,
             help_message=
             "Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."
         ))
Beispiel #10
0
    def ask_settings(self):
        s = """Choose sharpen mode: \n"""
        for key in self.sharpen_dict.keys():
            s += f"""({key}) {self.sharpen_dict[key]}\n"""
        io.log_info(s)
        # self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.")
        self.sharpen_mode = UIParamReflect.UIParam2Config.sharpen_mode

        if self.sharpen_mode != 0:
            self.blursharpen_amount = np.clip(
                io.input_int("Choose blur/sharpen amount",
                             0,
                             add_info="-100..100"), -100, 100)
Beispiel #11
0
def process_folder ( dirpath):
    
    image_size = io.input_int(f"New image size", 512, valid_range=[128,2048])
    
    face_type = io.input_str ("Change face type", 'same', ['h','mf','f','wf','head','same']).lower()
    if face_type == 'same':
        face_type = None
    else:
        face_type = {'h'  : FaceType.HALF,
                     'mf' : FaceType.MID_FULL,
                     'f'  : FaceType.FULL,
                     'wf' : FaceType.WHOLE_FACE,
                     'head' : FaceType.HEAD}[face_type]
                     

    output_dirpath = dirpath.parent / (dirpath.name + '_resized')
    output_dirpath.mkdir (exist_ok=True, parents=True)

    dirpath_parts = '/'.join( dirpath.parts[-2:])
    output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] )
    io.log_info (f"Resizing faceset in {dirpath_parts}")
    io.log_info ( f"Processing to {output_dirpath_parts}")

    output_images_paths = pathex.get_image_paths(output_dirpath)
    if len(output_images_paths) > 0:
        for filename in output_images_paths:
            Path(filename).unlink()

    image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )]
    result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size, face_type).run()

    is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
    if is_merge:
        io.log_info (f"Copying processed files to {dirpath_parts}")

        for (filepath, output_filepath) in result:
            try:
                shutil.copy (output_filepath, filepath)
            except:
                pass

        io.log_info (f"Removing {output_dirpath_parts}")
        shutil.rmtree(output_dirpath)
Beispiel #12
0
def main(input_path, sort_by_method=None):
    io.log_info("Running sort tool.\r\n")

    if sort_by_method is None:
        io.log_info(f"Choose sorting method:")

        key_list = list(sort_func_methods.keys())
        for i, key in enumerate(key_list):
            desc, func = sort_func_methods[key]
            io.log_info(f"[{i}] {desc}")

        io.log_info("")
        id = io.input_int("", 3, valid_list=[*range(len(key_list))])

        sort_by_method = key_list[id]
    else:
        sort_by_method = sort_by_method.lower()

    desc, func = sort_func_methods[sort_by_method]
    img_list, trash_img_list = func(input_path)

    final_process(input_path, img_list, trash_img_list)
Beispiel #13
0
    def ask_settings(self):
        s = """Choose mode: \n"""
        for key in mode_dict.keys():
            s += f"""({key}) {mode_dict[key]}\n"""
        io.log_info(s)
        mode = io.input_int("", mode_str_dict.get(self.default_mode, 1))

        self.mode = mode_dict.get(mode, self.default_mode)

        if 'raw' not in self.mode:
            if self.mode == 'hist-match':
                self.masked_hist_match = io.input_bool("Masked hist match?",
                                                       True)

            if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int("Hist match threshold",
                                 255,
                                 add_info="0..255"), 0, 255)

        s = """Choose mask mode: \n"""
        for key in mask_mode_dict.keys():
            s += f"""({key}) {mask_mode_dict[key]}\n"""
        io.log_info(s)
        self.mask_mode = io.input_int("", 1, valid_list=mask_mode_dict.keys())

        if 'raw' not in self.mode:
            self.erode_mask_modifier = np.clip(
                io.input_int("Choose erode mask modifier",
                             0,
                             add_info="-400..400"), -400, 400)
            self.blur_mask_modifier = np.clip(
                io.input_int("Choose blur mask modifier", 0,
                             add_info="0..400"), 0, 400)
            self.motion_blur_power = np.clip(
                io.input_int("Choose motion blur power", 0, add_info="0..100"),
                0, 100)

        self.output_face_scale = np.clip(
            io.input_int("Choose output face scale modifier",
                         0,
                         add_info="-50..50"), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str(
                "Color transfer to predicted face",
                None,
                valid_list=list(ctm_str_dict.keys())[1:])
            self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]

        super().ask_settings()

        self.super_resolution_power = np.clip(
            io.input_int(
                "Choose super resolution power",
                0,
                add_info="0..100",
                help_message=
                "Enhance details by applying superresolution network."), 0,
            100)

        if 'raw' not in self.mode:
            self.image_denoise_power = np.clip(
                io.input_int("Choose image degrade by denoise power",
                             0,
                             add_info="0..500"), 0, 500)
            self.bicubic_degrade_power = np.clip(
                io.input_int("Choose image degrade by bicubic rescale power",
                             0,
                             add_info="0..100"), 0, 100)
            self.color_degrade_power = np.clip(
                io.input_int("Degrade color power of final image",
                             0,
                             add_info="0..100"), 0, 100)

        io.log_info("")
Beispiel #14
0
def sort_best(input_path, include_by_blur=True):
    io.log_info("Performing sort by best faces.")

    target_count = io.input_int("Target number of faces?", 2000)

    img_list, trash_img_list = FinalLoaderSubprocessor(
        pathex.get_image_paths(input_path), include_by_blur).run()
    final_img_list = []

    grads = 128
    imgs_per_grad = round(target_count / grads)

    grads_space = np.linspace(-math.pi / 2, math.pi / 2, grads)

    yaws_sample_list = [None] * grads
    for g in io.progress_bar_generator(range(grads), "Sort by yaw"):
        yaw = grads_space[g]
        next_yaw = grads_space[g + 1] if g < grads - 1 else yaw

        yaw_samples = []
        for img in img_list:
            s_yaw = -img[3]
            if (g == 0          and s_yaw < next_yaw) or \
               (g < grads-1     and s_yaw >= yaw and s_yaw < next_yaw) or \
               (g == grads-1    and s_yaw >= yaw):
                yaw_samples += [img]
        if len(yaw_samples) > 0:
            yaws_sample_list[g] = yaw_samples

    total_lack = 0
    for g in io.progress_bar_generator(range(grads), ""):
        img_list = yaws_sample_list[g]
        img_list_len = len(img_list) if img_list is not None else 0

        lack = imgs_per_grad - img_list_len
        total_lack += max(lack, 0)

    imgs_per_grad += total_lack // grads

    if include_by_blur:
        sharpned_imgs_per_grad = imgs_per_grad * 10
        for g in io.progress_bar_generator(range(grads), "Sort by blur"):
            img_list = yaws_sample_list[g]
            if img_list is None:
                continue

            img_list = sorted(img_list,
                              key=operator.itemgetter(1),
                              reverse=True)

            if len(img_list) > sharpned_imgs_per_grad:
                trash_img_list += img_list[sharpned_imgs_per_grad:]
                img_list = img_list[0:sharpned_imgs_per_grad]

            yaws_sample_list[g] = img_list

    yaw_pitch_sample_list = [None] * grads
    pitch_grads = imgs_per_grad

    for g in io.progress_bar_generator(range(grads), "Sort by pitch"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue

        pitch_sample_list = [None] * pitch_grads

        grads_space = np.linspace(-math.pi / 2, math.pi / 2, pitch_grads)

        for pg in range(pitch_grads):

            pitch = grads_space[pg]
            next_pitch = grads_space[pg + 1] if pg < pitch_grads - 1 else pitch

            pitch_samples = []
            for img in img_list:
                s_pitch = img[4]
                if (pg == 0                and s_pitch < next_pitch) or \
                   (pg < pitch_grads-1     and s_pitch >= pitch and s_pitch < next_pitch) or \
                   (pg == pitch_grads-1    and s_pitch >= pitch):
                    pitch_samples += [img]

            if len(pitch_samples) > 0:
                pitch_sample_list[pg] = pitch_samples
        yaw_pitch_sample_list[g] = pitch_sample_list

    yaw_pitch_sample_list = FinalHistDissimSubprocessor(
        yaw_pitch_sample_list).run()

    for g in io.progress_bar_generator(range(grads), "Fetching the best"):
        pitch_sample_list = yaw_pitch_sample_list[g]
        if pitch_sample_list is None:
            continue

        n = imgs_per_grad

        while n > 0:
            n_prev = n
            for pg in range(pitch_grads):
                img_list = pitch_sample_list[pg]
                if img_list is None:
                    continue
                final_img_list += [img_list.pop(0)]
                if len(img_list) == 0:
                    pitch_sample_list[pg] = None
                n -= 1
                if n == 0:
                    break
            if n_prev == n:
                break

        for pg in range(pitch_grads):
            img_list = pitch_sample_list[pg]
            if img_list is None:
                continue
            trash_img_list += img_list

    return final_img_list, trash_img_list
Beispiel #15
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        suggest_batch_size = 4

        yn_str = {True: 'y', False: 'n'}
        default_models_opt_on_gpu = self.options[
            'models_opt_on_gpu'] = self.load_or_def_option(
                'models_opt_on_gpu', False)
        default_ae_dims = self.options['ae_dims'] = self.load_or_def_option(
            'ae_dims', 256)
        default_e_dims = self.options['e_dims'] = self.load_or_def_option(
            'e_dims', 64)
        default_d_dims = self.options['d_dims'] = self.load_or_def_option(
            'd_dims', 64)
        default_d_mask_dims = self.options[
            'd_mask_dims'] = self.load_or_def_option('d_mask_dims', 16)
        default_gan_power = self.options[
            'gan_power'] = self.load_or_def_option('gan_power', 0.0)
        default_clipgrad = self.options['clipgrad'] = self.load_or_def_option(
            'clipgrad', False)

        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_batch_size(suggest_batch_size)

        if self.is_first_run():
            self.options['ae_dims'] = np.clip(
                io.input_int(
                    "AutoEncoder dimensions",
                    default_ae_dims,
                    add_info="32-1024",
                    help_message=
                    "All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 32, 1024)

            e_dims = np.clip(
                io.input_int(
                    "Encoder dimensions",
                    default_e_dims,
                    add_info="16-256",
                    help_message=
                    "More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 16, 256)
            self.options['e_dims'] = e_dims + e_dims % 2

            d_dims = np.clip(
                io.input_int(
                    "Decoder dimensions",
                    default_d_dims,
                    add_info="16-256",
                    help_message=
                    "More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 16, 256)
            self.options['d_dims'] = d_dims + d_dims % 2

            d_mask_dims = np.clip(
                io.input_int(
                    "Decoder mask dimensions",
                    default_d_mask_dims,
                    add_info="16-256",
                    help_message=
                    "Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality."
                ), 16, 256)
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2

        if self.is_first_run() or ask_override:
            if len(device_config.devices) == 1:
                self.options['models_opt_on_gpu'] = io.input_bool(
                    "Place models and optimizer on GPU",
                    default_models_opt_on_gpu,
                    help_message=
                    "When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions."
                )

            self.options['gan_power'] = np.clip(
                io.input_number(
                    "GAN power",
                    default_gan_power,
                    add_info="0.0 .. 10.0",
                    help_message=
                    "Train the network in Generative Adversarial manner. Accelerates the speed of training. Forces the neural network to learn small details of the face. You can enable/disable this option at any time. Typical value is 1.0"
                ), 0.0, 10.0)

            self.options['clipgrad'] = io.input_bool(
                "Enable gradient clipping",
                default_clipgrad,
                help_message=
                "Gradient clipping reduces chance of model collapse, sacrificing speed of training."
            )
Beispiel #16
0
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)
    InfoNotifier.InfoNotifier.g_progress_info.append(
        "\n视频帧输出目录: " + str(Path(output_path).absolute()))

    if input_file_path.suffix == '.*':
        input_file_path = pathex.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    InfoNotifier.InfoNotifier.g_progress_info.append("\n视频输入路径:" +
                                                     str(input_file_path))

    if input_file_path is None:
        io.log_err("input_file not found.")
        InfoNotifier.InfoNotifier.g_progress_info.append("\n视频输入路径不存在")
        return

    if fps is None:
        fps = io.input_int(
            "Enter FPS",
            0,
            help_message=
            "How many frames of every second of the video will be extracted. 0 - full fps"
        )
    InfoNotifier.InfoNotifier.g_progress_info.append("\n视频帧抽取频率: full fps")

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format",
            "png", ["png", "jpg"],
            help_message=
            "png is lossless, but extraction is x10 slower for HDD, requires x10 more disk space than jpg."
        )

    InfoNotifier.InfoNotifier.g_progress_info.append("\n视频帧输出格式频率: " +
                                                     output_ext)

    filenames = pathex.get_image_paths(output_path, ['.' + output_ext])
    if len(filenames) != 0:
        InfoNotifier.InfoNotifier.g_progress_info.append(
            "\n视频帧输出目录不为空, 该目录将被清空!")

    for filename in filenames:
        Path(filename).unlink()
        QApplication.processEvents()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job, err = job.run(cmd=UIParamReflect.GlobalConfig.ffmpeg_cmd_path)
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #17
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4

        yn_str = {True:'y',False:'n'}
        min_res = 64
        max_res = 640

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 224)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'wf')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)

        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.options.get('d_dims', None)
        default_d_mask_dims        = self.options['d_mask_dims']        = self.options.get('d_mask_dims', None)
        default_morph_factor       = self.options['morph_factor']       = self.options.get('morph_factor', 0.33)
        default_masked_training    = self.options['masked_training']    = self.load_or_def_option('masked_training', True)
        default_eyes_mouth_prio    = self.options['eyes_mouth_prio']    = self.load_or_def_option('eyes_mouth_prio', True)
        default_uniform_yaw        = self.options['uniform_yaw']        = self.load_or_def_option('uniform_yaw', False)

        lr_dropout = self.load_or_def_option('lr_dropout', 'n')
        lr_dropout = {True:'y', False:'n'}.get(lr_dropout, lr_dropout) #backward comp
        default_lr_dropout         = self.options['lr_dropout'] = lr_dropout

        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']      = self.load_or_def_option('pretrain', False)


        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_src_flip()
            self.ask_random_dst_flip()
            self.ask_batch_size(suggest_batch_size)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 32 .")
            resolution = np.clip ( (resolution // 32) * 32, min_res, max_res)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['wf','head'], help_message="whole face / head").lower()


        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', 64)

        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)

        if self.is_first_run():
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )

            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2

            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2

            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
            
            morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="The smaller the value, the more src-like facial expressions will appear. The larger the value, the less space there is to train a large dst faceset in the neural network. Typical fine value is 0.33"), 0.1, 0.5 )
            self.options['morph_factor'] = morph_factor


        if self.is_first_run() or ask_override:
            if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head':
                self.options['masked_training']  = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")

            self.options['eyes_mouth_prio'] = io.input_bool ("Eyes and mouth priority", default_eyes_mouth_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction. Also makes the detail of the teeth higher.')
            self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')

        default_gan_power          = self.options['gan_power']          = self.load_or_def_option('gan_power', 0.0)
        default_gan_patch_size     = self.options['gan_patch_size']     = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8)
        default_gan_dims           = self.options['gan_dims']           = self.load_or_def_option('gan_dims', 16)

        if self.is_first_run() or ask_override:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

            self.options['lr_dropout']  = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")

            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")

            self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 1.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with lr_dropout(on) and random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 1.0 )

            if self.options['gan_power'] != 0.0:
                gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
                self.options['gan_patch_size'] = gan_patch_size

                gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
                self.options['gan_dims'] = gan_dims

            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
            
            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, uniform_yaw=Y")
        
        self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims'])
        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
Beispiel #18
0
def denoise_image_sequence(input_dir, ext=None, factor=None):
    input_path = Path(input_dir)

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    image_paths = [
        Path(filepath) for filepath in pathex.get_image_paths(input_path)
    ]

    # Check extension of all images
    image_paths_suffix = None
    for filepath in image_paths:
        if image_paths_suffix is None:
            image_paths_suffix = filepath.suffix
        else:
            if filepath.suffix != image_paths_suffix:
                io.log_err(
                    f"All images in {input_path.name} should be with the same extension."
                )
                return

    if factor is None:
        factor = np.clip(io.input_int("Denoise factor?", 7, add_info="1-20"),
                         1, 20)

    # Rename to temporary filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):
        src = filepath
        dst = filepath.parent / (f'{i+1:06}_{filepath.name}')
        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return

    # Rename to sequental filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):

        src = filepath.parent / (f'{i+1:06}_{filepath.name}')
        dst = filepath.parent / (f'{i+1:06}{filepath.suffix}')
        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return

    # Process image sequence in ffmpeg
    kwargs = {}
    if image_paths_suffix == '.jpg':
        kwargs.update({'q:v': '2'})

    job = (ffmpeg.input(str(input_path / ('%6d' + image_paths_suffix))).filter(
        "hqdn3d", factor, factor, 5,
        5).output(str(input_path / ('%6d' + image_paths_suffix)), **kwargs))

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))

    # Rename to temporary filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):
        src = filepath.parent / (f'{i+1:06}{filepath.suffix}')
        dst = filepath.parent / (f'{i+1:06}_{filepath.name}')
        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return

    # Rename to initial filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):
        src = filepath.parent / (f'{i+1:06}_{filepath.name}')
        dst = filepath

        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return
Beispiel #19
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4

        yn_str = {True:'y',False:'n'}

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 128)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'f')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)
        default_archi              = self.options['archi']              = self.load_or_def_option('archi', 'df')
        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.options.get('d_dims', None)
        default_d_mask_dims        = self.options['d_mask_dims']        = self.options.get('d_mask_dims', None)
        default_masked_training    = self.options['masked_training']    = self.load_or_def_option('masked_training', True)
        default_eyes_prio          = self.options['eyes_prio']          = self.load_or_def_option('eyes_prio', False)
        default_lr_dropout         = self.options['lr_dropout']         = self.load_or_def_option('lr_dropout', False)
        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_gan_power          = self.options['gan_power']          = self.load_or_def_option('gan_power', 0.0)
        default_true_face_power    = self.options['true_face_power']    = self.load_or_def_option('true_face_power', 0.0)
        default_face_style_power   = self.options['face_style_power']   = self.load_or_def_option('face_style_power', 0.0)
        default_bg_style_power     = self.options['bg_style_power']     = self.load_or_def_option('bg_style_power', 0.0)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']           = self.load_or_def_option('pretrain', False)

        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_flip()
            self.ask_batch_size(suggest_batch_size)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-512", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
            resolution = np.clip ( (resolution // 16) * 16, 64, 512)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf'], help_message="Half / mid face / full face / whole face. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead, but requires manual merge in Adobe After Effects.").lower()
            self.options['archi'] = io.input_str ("AE architecture", default_archi, ['df','liae','dfhd','liaehd'], help_message="'df' keeps faces more natural.\n'liae' can fix overly different face shapes.\n'hd' are experimental versions.").lower()

        default_d_dims             = 48 if self.options['archi'] == 'dfhd' else 64
        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', default_d_dims)

        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)

        if self.is_first_run():
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )

            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2


            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2

            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2

        if self.is_first_run() or ask_override:
            if self.options['face_type'] == 'wf':
                self.options['masked_training']  = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' type. Masked training clips training area to full_face mask, thus network will train the faces properly.  When the face is trained enough, disable this option to train all area of the frame. Merge with 'raw-rgb' mode, then use Adobe After Effects to manually mask and compose whole face include forehead.")
            
            self.options['eyes_prio']  = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
      
        if self.is_first_run() or ask_override:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

            self.options['lr_dropout']  = io.input_bool ("Use learning rate dropout", default_lr_dropout, help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations.")
            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")

            self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Accelerates the speed of training. Forces the neural network to learn small details of the face. You can enable/disable this option at any time. Typical value is 1.0"), 0.0, 10.0 )

            if 'df' in self.options['archi']:
                self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
            else:
                self.options['true_face_power'] = 0.0

            if self.options['face_type'] != 'wf':
                self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.001 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
                self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn to transfer background around face. This can make face more like dst. Enabling this option increases the chance of model collapse. Typical value is 2.0"), 0.0, 100.0 )
                
            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
            
            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.")

        if self.options['pretrain'] and self.get_pretraining_data_path() is None:
            raise Exception("pretraining_data_path is not defined")

        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
Beispiel #20
0
 def ask_maximum_n_backups(self, default_value=24):
     default_maximum_n_backups = self.options['maximum_n_backups'] = self.load_or_def_option('maximum_n_backups', default_value)
     self.options['maximum_n_backups'] = io.input_int(f"Maximum N backups", default_maximum_n_backups, help_message="Maximum amount of backups that are located in model/<>_autobackups. Inputting 0 here would allow it to autobackup as many times as it occurs.")
Beispiel #21
0
 def ask_autobackup_hour(self, default_value=0):
     default_autobackup_hour = self.options['autobackup_hour'] = self.load_or_def_option('autobackup_hour', default_value)
     self.options['autobackup_hour'] = io.input_int(f"Autobackup every N hour", default_autobackup_hour, add_info="0..24", help_message="Autobackup model files with preview every N hour. Latest backup is the last folder when sorted by name ascending located in model/<>_autobackups")
Beispiel #22
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    image_size=256,
    face_type='full_face',
    max_faces_from_image=0,
    cpu_only=False,
    force_gpu_idxs=None,
):
    face_type = FaceType.fromString(face_type)

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_debug_path = output_path.parent / (output_path.name + '_debug')

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            output_images_paths = pathex.get_image_paths(output_path)
            if len(output_images_paths) > 0:
                io.input(
                    f"WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue."
                )
                for filename in output_images_paths:
                    Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    input_path_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
            io.log_info('Found %d images.' % (len(input_path_image_paths)))
    else:
        if output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()
        else:
            output_debug_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'landmarks-manual',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'all',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #23
0
def mask_editor_main(input_dir,
                     confirmed_dir=None,
                     skipped_dir=None,
                     no_default_mask=False):
    input_path = Path(input_dir)

    confirmed_path = Path(confirmed_dir)
    skipped_path = Path(skipped_dir)

    if not input_path.exists():
        raise ValueError('Input directory not found. Please ensure it exists.')

    if not confirmed_path.exists():
        confirmed_path.mkdir(parents=True)

    if not skipped_path.exists():
        skipped_path.mkdir(parents=True)

    if not no_default_mask:
        eyebrows_expand_mod = np.clip(
            io.input_int("Default eyebrows expand modifier?",
                         100,
                         add_info="0..400"), 0, 400) / 100.0
    else:
        eyebrows_expand_mod = None

    wnd_name = "MaskEditor tool"
    io.named_window(wnd_name)
    io.capture_mouse(wnd_name)
    io.capture_keys(wnd_name)

    cached_images = {}

    image_paths = [Path(x) for x in pathex.get_image_paths(input_path)]
    done_paths = []
    done_images_types = {}
    image_paths_total = len(image_paths)
    saved_ie_polys = IEPolys()
    zoom_factor = 1.0
    preview_images_count = 9
    target_wh = 256

    do_prev_count = 0
    do_save_move_count = 0
    do_save_count = 0
    do_skip_move_count = 0
    do_skip_count = 0

    def jobs_count():
        return do_prev_count + do_save_move_count + do_save_count + do_skip_move_count + do_skip_count

    is_exit = False
    while not is_exit:

        if len(image_paths) > 0:
            filepath = image_paths.pop(0)
        else:
            filepath = None

        next_image_paths = image_paths[0:preview_images_count]
        next_image_paths_names = [path.name for path in next_image_paths]
        prev_image_paths = done_paths[-preview_images_count:]
        prev_image_paths_names = [path.name for path in prev_image_paths]

        for key in list(cached_images.keys()):
            if key not in prev_image_paths_names and \
               key not in next_image_paths_names:
                cached_images.pop(key)

        for paths in [prev_image_paths, next_image_paths]:
            for path in paths:
                if path.name not in cached_images:
                    cached_images[path.name] = cv2_imread(str(path)) / 255.0

        if filepath is not None:
            dflimg = DFLIMG.load(filepath)

            if dflimg is None:
                io.log_err("%s is not a dfl image file" % (filepath.name))
                continue
            else:
                lmrks = dflimg.get_landmarks()
                ie_polys = IEPolys.load(dflimg.get_ie_polys())
                fanseg_mask = dflimg.get_fanseg_mask()

                if filepath.name in cached_images:
                    img = cached_images[filepath.name]
                else:
                    img = cached_images[filepath.name] = cv2_imread(
                        str(filepath)) / 255.0

                if fanseg_mask is not None:
                    mask = fanseg_mask
                else:
                    if no_default_mask:
                        mask = np.zeros((target_wh, target_wh, 3))
                    else:
                        mask = LandmarksProcessor.get_image_hull_mask(
                            img.shape,
                            lmrks,
                            eyebrows_expand_mod=eyebrows_expand_mod)
        else:
            img = np.zeros((target_wh, target_wh, 3))
            mask = np.ones((target_wh, target_wh, 3))
            ie_polys = None

        def get_status_lines_func():
            return [
                'Progress: %d / %d . Current file: %s' %
                (len(done_paths), image_paths_total,
                 str(filepath.name) if filepath is not None else "end"),
                '[Left mouse button] - mark include mask.',
                '[Right mouse button] - mark exclude mask.',
                '[Middle mouse button] - finish current poly.',
                '[Mouse wheel] - undo/redo poly or point. [+ctrl] - undo to begin/redo to end',
                '[r] - applies edits made to last saved image.',
                '[q] - prev image. [w] - skip and move to %s. [e] - save and move to %s. '
                % (skipped_path.name, confirmed_path.name),
                '[z] - prev image. [x] - skip. [c] - save. ',
                'hold [shift] - speed up the frame counter by 10.',
                '[-/+] - window zoom [esc] - quit',
            ]

        try:
            ed = MaskEditor(img,
                            [(done_images_types[name], cached_images[name])
                             for name in prev_image_paths_names],
                            [(0, cached_images[name])
                             for name in next_image_paths_names], mask,
                            ie_polys, get_status_lines_func)
        except Exception as e:
            print(e)
            continue

        next = False
        while not next:
            io.process_messages(0.005)

            if jobs_count() == 0:
                for (x, y, ev, flags) in io.get_mouse_events(wnd_name):
                    x, y = int(x / zoom_factor), int(y / zoom_factor)
                    ed.set_mouse_pos(x, y)
                    if filepath is not None:
                        if ev == io.EVENT_LBUTTONDOWN:
                            ed.mask_point(1)
                        elif ev == io.EVENT_RBUTTONDOWN:
                            ed.mask_point(0)
                        elif ev == io.EVENT_MBUTTONDOWN:
                            ed.mask_finish()
                        elif ev == io.EVENT_MOUSEWHEEL:
                            if flags & 0x80000000 != 0:
                                if flags & 0x8 != 0:
                                    ed.undo_to_begin_point()
                                else:
                                    ed.undo_point()
                            else:
                                if flags & 0x8 != 0:
                                    ed.redo_to_end_point()
                                else:
                                    ed.redo_point()

                for key, chr_key, ctrl_pressed, alt_pressed, shift_pressed in io.get_key_events(
                        wnd_name):
                    if chr_key == 'q' or chr_key == 'z':
                        do_prev_count = 1 if not shift_pressed else 10
                    elif chr_key == '-':
                        zoom_factor = np.clip(zoom_factor - 0.1, 0.1, 4.0)
                        ed.set_screen_changed()
                    elif chr_key == '+':
                        zoom_factor = np.clip(zoom_factor + 0.1, 0.1, 4.0)
                        ed.set_screen_changed()
                    elif key == 27:  #esc
                        is_exit = True
                        next = True
                        break
                    elif filepath is not None:
                        if chr_key == 'e':
                            saved_ie_polys = ed.ie_polys
                            do_save_move_count = 1 if not shift_pressed else 10
                        elif chr_key == 'c':
                            saved_ie_polys = ed.ie_polys
                            do_save_count = 1 if not shift_pressed else 10
                        elif chr_key == 'w':
                            do_skip_move_count = 1 if not shift_pressed else 10
                        elif chr_key == 'x':
                            do_skip_count = 1 if not shift_pressed else 10
                        elif chr_key == 'r' and saved_ie_polys != None:
                            ed.set_ie_polys(saved_ie_polys)

            if do_prev_count > 0:
                do_prev_count -= 1
                if len(done_paths) > 0:
                    if filepath is not None:
                        image_paths.insert(0, filepath)

                    filepath = done_paths.pop(-1)
                    done_images_types[filepath.name] = 0

                    if filepath.parent != input_path:
                        new_filename_path = input_path / filepath.name
                        filepath.rename(new_filename_path)
                        image_paths.insert(0, new_filename_path)
                    else:
                        image_paths.insert(0, filepath)

                    next = True
            elif filepath is not None:
                if do_save_move_count > 0:
                    do_save_move_count -= 1

                    ed.mask_finish()
                    dflimg.embed_and_set(
                        str(filepath),
                        ie_polys=ed.get_ie_polys(),
                        eyebrows_expand_mod=eyebrows_expand_mod)

                    done_paths += [confirmed_path / filepath.name]
                    done_images_types[filepath.name] = 2
                    filepath.rename(done_paths[-1])

                    next = True
                elif do_save_count > 0:
                    do_save_count -= 1

                    ed.mask_finish()
                    dflimg.embed_and_set(
                        str(filepath),
                        ie_polys=ed.get_ie_polys(),
                        eyebrows_expand_mod=eyebrows_expand_mod)

                    done_paths += [filepath]
                    done_images_types[filepath.name] = 2

                    next = True
                elif do_skip_move_count > 0:
                    do_skip_move_count -= 1

                    done_paths += [skipped_path / filepath.name]
                    done_images_types[filepath.name] = 1
                    filepath.rename(done_paths[-1])

                    next = True
                elif do_skip_count > 0:
                    do_skip_count -= 1

                    done_paths += [filepath]
                    done_images_types[filepath.name] = 1

                    next = True
            else:
                do_save_move_count = do_save_count = do_skip_move_count = do_skip_count = 0

            if jobs_count() == 0:
                if ed.switch_screen_changed():
                    screen = ed.make_screen()
                    if zoom_factor != 1.0:
                        h, w, c = screen.shape
                        screen = cv2.resize(
                            screen,
                            (int(w * zoom_factor), int(h * zoom_factor)))
                    io.show_image(wnd_name, screen)

        io.process_messages(0.005)

    io.destroy_all_windows()
Beispiel #24
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()
        
        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb
            
        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4
        
        yn_str = {True:'y',False:'n'}
        ask_override = self.ask_override()

        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_flip()
            self.ask_batch_size(suggest_batch_size)

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 128)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'f')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)
        default_archi              = self.options['archi']              = self.load_or_def_option('archi', 'dfhd')
        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', 64)
        
        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
        
        default_learn_mask         = self.options['learn_mask']         = self.load_or_def_option('learn_mask', True)
        default_lr_dropout         = self.options['lr_dropout']         = self.load_or_def_option('lr_dropout', False)
        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_true_face_training = self.options['true_face_training'] = self.load_or_def_option('true_face_training', False)
        default_face_style_power   = self.options['face_style_power']   = self.load_or_def_option('face_style_power', 0.0)
        default_bg_style_power     = self.options['bg_style_power']     = self.load_or_def_option('bg_style_power', 0.0)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']           = self.load_or_def_option('pretrain', False)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-256", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
            resolution = np.clip ( (resolution // 16) * 16, 64, 256)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f'], help_message="Half / mid face / full face. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face.").lower()

        if (self.is_first_run() or ask_override) and len(device_config.devices) == 1:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

        if self.is_first_run():
            self.options['archi'] = io.input_str ("AE architecture", default_archi, ['dfhd','liaehd','df','liae'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'hd' is heavyweight version for the best quality.").lower() #-s version is slower, but has decreased change to collapse.
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
            
            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2
            
            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2
            
            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
            
        if self.is_first_run() or ask_override:
            self.options['learn_mask']  = io.input_bool ("Learn mask", default_learn_mask, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case merger forced to use 'not predicted mask' that is not smooth as predicted.")
            self.options['lr_dropout']  = io.input_bool ("Use learning rate dropout", default_lr_dropout, help_message="When the face is trained enough, you can enable this option to get extra sharpness for less amount of iterations.")
            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness for less amount of iterations.")

            if 'df' in self.options['archi']:
                self.options['true_face_training'] = io.input_bool ("Enable 'true face' training", default_true_face_training, help_message="The result face will be more like src and will get extra sharpness. Enable it for last 10-20k iterations before conversion.")
            else:
                self.options['true_face_training'] = False

            self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
            self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn to transfer background around face. This can make face more like dst. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.")

        if self.options['pretrain'] and self.get_pretraining_data_path() is None:
            raise Exception("pretraining_data_path is not defined") 

        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
        
        if self.pretrain_just_disabled:
            self.set_iter(1)
Beispiel #25
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    face_type='full_face',
    max_faces_from_image=None,
    image_size=None,
    jpeg_quality=None,
    cpu_only=False,
    force_gpu_idxs=None,
):

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if not output_path.exists():
        output_path.mkdir(parents=True, exist_ok=True)

    if face_type is not None:
        face_type = FaceType.fromString(face_type)

    if face_type is None:
        if manual_output_debug_fix:
            files = pathex.get_image_paths(output_path)
            if len(files) != 0:
                dflimg = DFLIMG.load(Path(files[0]))
                if dflimg is not None and dflimg.has_data():
                    face_type = FaceType.fromString(dflimg.get_face_type())

    input_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)
    output_images_paths = pathex.get_image_paths(output_path)
    output_debug_path = output_path.parent / (output_path.name + '_debug')

    continue_extraction = False
    if not manual_output_debug_fix and len(output_images_paths) > 0:
        if len(output_images_paths) > 128:
            continue_extraction = io.input_bool(
                "Continue extraction?",
                True,
                help_message=
                "Extraction can be continued, but you must specify the same options again."
            )

        if len(output_images_paths) > 128 and continue_extraction:
            try:
                input_image_paths = input_image_paths[
                    [Path(x).stem for x in input_image_paths].
                    index(Path(output_images_paths[-128]).stem.split('_')[0]):]
            except:
                io.log_err(
                    "Error in fetching the last index. Extraction cannot be continued."
                )
                return
        elif input_path != output_path:
            io.input(
                f"\n WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue.\n"
            )
            for filename in output_images_paths:
                Path(filename).unlink()

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    if face_type is None:
        face_type = io.input_str(
            "Face type",
            'wf', ['f', 'wf', 'head'],
            help_message=
            "Full face / whole face / head. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset."
        ).lower()
        face_type = {
            'f': FaceType.FULL,
            'wf': FaceType.WHOLE_FACE,
            'head': FaceType.HEAD
        }[face_type]

    if max_faces_from_image is None:
        max_faces_from_image = io.input_int(
            f"Max number of faces from image",
            0,
            help_message=
            "If you extract a src faceset that has frames with a large number of faces, it is advisable to set max faces to 3 to speed up extraction. 0 - unlimited"
        )

    if image_size is None:
        image_size = io.input_int(
            f"Image size",
            512 if face_type < FaceType.HEAD else 768,
            valid_range=[256, 2048],
            help_message=
            "Output image size. The higher image size, the worse face-enhancer works. Use higher than 512 value only if the source image is sharp enough and the face does not need to be enhanced."
        )

    if jpeg_quality is None:
        jpeg_quality = io.input_int(
            f"Jpeg quality",
            90,
            valid_range=[1, 100],
            help_message=
            "Jpeg quality. The higher jpeg quality the larger the output file size."
        )

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_debug:
        output_debug_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_image_paths = DeletedFilesSearcherSubprocessor(
                input_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_image_paths = sorted(input_image_paths)
            io.log_info('Found %d images.' % (len(input_image_paths)))
    else:
        if not continue_extraction and output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()

    images_found = len(input_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'landmarks-manual',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'all',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #26
0
 def ask_target_iter(self, default_value=0):
     default_target_iter = self.load_or_def_option('target_iter', default_value)
     self.options['target_iter'] = max(0, io.input_int("Target iteration", default_target_iter))
Beispiel #27
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4

        yn_str = {True:'y',False:'n'}
        min_res = 64
        max_res = 640

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 128)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'f')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)

        archi = self.load_or_def_option('archi', 'df')
        archi = {'dfuhd':'df-u','liaeuhd':'liae-u'}.get(archi, archi) #backward comp
        default_archi              = self.options['archi'] = archi

        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.options.get('d_dims', None)
        default_d_mask_dims        = self.options['d_mask_dims']        = self.options.get('d_mask_dims', None)
        default_masked_training    = self.options['masked_training']    = self.load_or_def_option('masked_training', True)
        default_eyes_prio          = self.options['eyes_prio']          = self.load_or_def_option('eyes_prio', False)
        default_uniform_yaw        = self.options['uniform_yaw']        = self.load_or_def_option('uniform_yaw', False)

        lr_dropout = self.load_or_def_option('lr_dropout', 'n')
        lr_dropout = {True:'y', False:'n'}.get(lr_dropout, lr_dropout) #backward comp
        default_lr_dropout         = self.options['lr_dropout'] = lr_dropout

        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_gan_power          = self.options['gan_power']          = self.load_or_def_option('gan_power', 0.0)
        default_true_face_power    = self.options['true_face_power']    = self.load_or_def_option('true_face_power', 0.0)
        default_face_style_power   = self.options['face_style_power']   = self.load_or_def_option('face_style_power', 0.0)
        default_bg_style_power     = self.options['bg_style_power']     = self.load_or_def_option('bg_style_power', 0.0)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']           = self.load_or_def_option('pretrain', False)

        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_flip()
            self.ask_batch_size(suggest_batch_size)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16 and 32 for -d archi.")
            resolution = np.clip ( (resolution // 16) * 16, min_res, max_res)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset.").lower()

            while True:
                archi = io.input_str ("AE architecture", default_archi, help_message=\
"""
'df' keeps more identity-preserved face.
'liae' can fix overly different face shapes.
'-u' increased likeness of the face.
'-d' (experimental) doubling the resolution using the same computation cost.
Examples: df, liae, df-d, df-ud, liae-ud, ...
""").lower()

                archi_split = archi.split('-')

                if len(archi_split) == 2:
                    archi_type, archi_opts = archi_split
                elif len(archi_split) == 1:
                    archi_type, archi_opts = archi_split[0], None
                else:
                    continue

                if archi_type not in ['df', 'liae']:
                    continue

                if archi_opts is not None:
                    if len(archi_opts) == 0:
                        continue
                    if len([ 1 for opt in archi_opts if opt not in ['u','d'] ]) != 0:
                        continue

                    if 'd' in archi_opts:
                        self.options['resolution'] = np.clip ( (self.options['resolution'] // 32) * 32, min_res, max_res)

                break
            self.options['archi'] = archi

        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', 64)

        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)

        if self.is_first_run():
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )

            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2

            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2

            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2

        if self.is_first_run() or ask_override:
            if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head':
                self.options['masked_training']  = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")

            self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
            self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')

        if self.is_first_run() or ask_override:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

            self.options['lr_dropout']  = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")

            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")

            self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Forces the neural network to learn small details of the face. Enable it only when the face is trained enough and don't disable. Typical value is 0.1"), 0.0, 10.0 )

            if 'df' in self.options['archi']:
                self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
            else:
                self.options['true_face_power'] = 0.0

            self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn the color of the predicted face to be the same as dst inside mask. If you want to use this option with 'whole_face' you have to use XSeg trained mask. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.001 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
            self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn the area outside mask of the predicted face to be the same as dst. If you want to use this option with 'whole_face' you have to use XSeg trained mask. For whole_face you have to use XSeg trained mask. This can make face more like dst. Enabling this option increases the chance of model collapse. Typical value is 2.0"), 0.0, 100.0 )

            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")

            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.")

        if self.options['pretrain'] and self.get_pretraining_data_path() is None:
            raise Exception("pretraining_data_path is not defined")

        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
Beispiel #28
0
def main(model_class_name=None,
         saved_models_path=None,
         training_data_src_path=None,
         force_model_name=None,
         input_path=None,
         output_path=None,
         output_mask_path=None,
         aligned_path=None,
         force_gpu_idxs=None,
         cpu_only=None):
    io.log_info("Running merger.\r\n")

    try:
        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not output_mask_path.exists():
            output_mask_path.mkdir(parents=True, exist_ok=True)

        if not saved_models_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        # Initialize model
        import models
        model = models.import_model(model_class_name)(
            is_training=False,
            saved_models_path=saved_models_path,
            force_gpu_idxs=force_gpu_idxs,
            force_model_name=force_model_name,
            cpu_only=cpu_only)

        predictor_func, predictor_input_shape, cfg = model.get_MergerConfig()

        # Preparing MP functions
        predictor_func = MPFunc(predictor_func)

        run_on_cpu = len(nn.getCurrentDeviceConfig().devices) == 0
        xseg_256_extract_func = MPClassFuncOnDemand(
            XSegNet,
            'extract',
            name='XSeg',
            resolution=256,
            weights_file_root=saved_models_path,
            place_model_on_cpu=True,
            run_on_cpu=run_on_cpu)

        face_enhancer_func = MPClassFuncOnDemand(FaceEnhancer,
                                                 'enhance',
                                                 place_model_on_cpu=True,
                                                 run_on_cpu=run_on_cpu)

        is_interactive = io.input_bool("Use interactive merger?",
                                       True) if not io.is_colab() else False

        if not is_interactive:
            cfg.ask_settings()

        subprocess_count = io.input_int(
            "Number of workers?",
            max(8, multiprocessing.cpu_count()),
            valid_range=[1, multiprocessing.cpu_count()],
            help_message=
            "Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores."
        )

        input_path_image_paths = pathex.get_image_paths(input_path)

        if cfg.type == MergerConfig.TYPE_MASKED:
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            packed_samples = None
            try:
                packed_samples = samplelib.PackedFaceset.load(aligned_path)
            except:
                io.log_err(
                    f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}"
                )

            if packed_samples is not None:
                io.log_info("Using packed faceset.")

                def generator():
                    for sample in io.progress_bar_generator(
                            packed_samples, "Collecting alignments"):
                        filepath = Path(sample.filename)
                        yield filepath, DFLIMG.load(
                            filepath,
                            loader_func=lambda x: sample.read_raw_file())
            else:

                def generator():
                    for filepath in io.progress_bar_generator(
                            pathex.get_image_paths(aligned_path),
                            "Collecting alignments"):
                        filepath = Path(filepath)
                        yield filepath, DFLIMG.load(filepath)

            alignments = {}
            multiple_faces_detected = False

            for filepath, dflimg in generator():
                if dflimg is None or not dflimg.has_data():
                    io.log_err(f"{filepath.name} is not a dfl image file")
                    continue

                source_filename = dflimg.get_source_filename()
                if source_filename is None:
                    continue

                source_filepath = Path(source_filename)
                source_filename_stem = source_filepath.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(
                    (dflimg.get_source_landmarks(), filepath, source_filepath))

                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info("")
                io.log_info(
                    "Warning: multiple faces detected. Only one alignment file should refer one source file."
                )
                io.log_info("")

            for a_key in list(alignments.keys()):
                a_ar = alignments[a_key]
                if len(a_ar) > 1:
                    for _, filepath, source_filepath in a_ar:
                        io.log_info(
                            f"alignment {filepath.name} refers to {source_filepath.name} "
                        )
                    io.log_info("")

                alignments[a_key] = [a[0] for a in a_ar]

            if multiple_faces_detected:
                io.log_info(
                    "It is strongly recommended to process the faces separatelly."
                )
                io.log_info(
                    "Use 'recover original filename' to determine the exact duplicates."
                )
                io.log_info("")

            frames = [
                InteractiveMergerSubprocessor.Frame(frame_info=FrameInfo(
                    filepath=Path(p),
                    landmarks_list=alignments.get(Path(p).stem, None)))
                for p in input_path_image_paths
            ]

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
                io.log_info("")
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        if len(frames) == 0:
            io.log_info("No frames to merge in input_dir.")
        else:
            if False:
                pass
            else:
                InteractiveMergerSubprocessor(
                    is_interactive=is_interactive,
                    merger_session_filepath=model.get_strpath_storage_for_file(
                        'merger_session.dat'),
                    predictor_func=predictor_func,
                    predictor_input_shape=predictor_input_shape,
                    face_enhancer_func=face_enhancer_func,
                    xseg_256_extract_func=xseg_256_extract_func,
                    merger_config=cfg,
                    frames=frames,
                    frames_root_path=input_path,
                    output_path=output_path,
                    output_mask_path=output_mask_path,
                    model_iter=model.get_iter(),
                    subprocess_count=subprocess_count,
                ).run()

        model.finalize()

    except Exception as e:
        print(traceback.format_exc())
Beispiel #29
0
def video_from_sequence_(input_dir,
                         output_file,
                         reference_file=None,
                         ext=None,
                         fps=None,
                         bitrate=None,
                         include_audio=False,
                         lossless=None):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(
        reference_file) if reference_file is not None else None

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = io.input_str("Input image format (extension)", "png")

    if lossless is None:
        lossless = io.input_bool("Use lossless codec", False)

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == '.*':
            reference_file_path = pathex.get_first_file_by_stem(
                reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            io.log_err("reference_file not found.")
            return

        #probing reference file
        probe = ffmpeg.probe(str(reference_file_path))

        #getting first video and audio streams id with fps
        for stream in probe['streams']:
            if video_id is None and stream['codec_type'] == 'video':
                video_id = stream['index']
                fps = stream['r_frame_rate']

            if audio_id is None and stream['codec_type'] == 'audio':
                audio_id = stream['index']

        if audio_id is not None:
            #has audio track
            ref_in_a = ffmpeg.input(str(reference_file_path))[str(audio_id)]

    if fps is None:
        #if fps not specified and not overwritten by reference-file
        fps = max(1, io.input_int("Enter FPS", 25))

    if not lossless and bitrate is None:
        bitrate = 1  #max (1, settings.bitrate)#io.input_int ("Bitrate of output file in MB/s", 16)

    input_image_paths = pathex.get_image_paths(input_path)

    i_in = ffmpeg.input('pipe:', format='image2pipe', r=fps)

    output_args = [i_in]

    if include_audio and ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str(output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update({
            "c:v": "libx264",
            "crf": "0",
            "pix_fmt": "yuv420p",
        })
    else:
        output_kwargs.update({
            "c:v": "libx264",
            "b:v": "%dM" % (1),
            "pix_fmt": "yuv420p",
        })

    if include_audio and ref_in_a is not None:
        output_kwargs.update({
            "c:a": "aac",
            "b:a": "192k",
            "ar": "48000",
            "strict": "experimental"
        })

    job = (ffmpeg.output(*output_args, **output_kwargs).overwrite_output())

    try:
        job_run = job.run_async(pipe_stdin=True)

        for image_path in input_image_paths:
            with open(image_path, "rb") as f:
                image_bytes = f.read()
                job_run.stdin.write(image_bytes)

        job_run.stdin.close()
        job_run.wait()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #30
0
    def ask_settings(self):
        s = """Choose mode: \n"""
        for key in mode_dict.keys():
            s += f"""({key}) {mode_dict[key]}\n"""
        io.log_info(s)
        mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) )

        self.mode = mode_dict.get (mode, self.default_mode )

        if 'raw' not in self.mode:
            if self.mode == 'hist-match':
                self.masked_hist_match = io.input_bool("Masked hist match?", True)

            if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255)

        if self.face_type == FaceType.FULL:
            s = """Choose mask mode: \n"""
            for key in full_face_mask_mode_dict.keys():
                s += f"""({key}) {full_face_mask_mode_dict[key]}\n"""
            io.log_info(s)

            self.mask_mode = io.input_int ("", 1, valid_list=full_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks.")
        else:
            s = """Choose mask mode: \n"""
            for key in half_face_mask_mode_dict.keys():
                s += f"""({key}) {half_face_mask_mode_dict[key]}\n"""
            io.log_info(s)
            self.mask_mode = io.input_int ("", 1, valid_list=half_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images.")

        if 'raw' not in self.mode:
            self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400)
            self.blur_mask_modifier =  np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400)
            self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100)

        self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", None, valid_list=list(ctm_str_dict.keys())[1:] )
            self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]

        super().ask_settings()
 
        self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100)

        if 'raw' not in self.mode:
            self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500)
            self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100)
            self.color_degrade_power = np.clip (  io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100)

        io.log_info ("")