Beispiel #1
0
 def onInitializeOptions(self, is_first_run, ask_override):
     default_resolution = 128
     default_archi = 'df'
     default_face_type = 'f'
     
     if is_first_run:
         resolution = io.input_int("Resolution ( 64-256 ?:help skip:128) : ", default_resolution, help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
         resolution = np.clip (resolution, 64, 256)            
         while np.modf(resolution / 16)[0] != 0.0:
             resolution -= 1
         self.options['resolution'] = resolution
         
         self.options['face_type'] = io.input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower()            
         self.options['learn_mask'] = io.input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted. Model with style values can be learned without mask and produce same quality result.")
         self.options['archi'] = io.input_str ("AE architecture (df, liae, vg ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae','vg'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'vg' - currently testing.").lower()
     else:
         self.options['resolution'] = self.options.get('resolution', default_resolution)
         self.options['face_type'] = self.options.get('face_type', default_face_type)
         self.options['learn_mask'] = self.options.get('learn_mask', True)            
         self.options['archi'] = self.options.get('archi', default_archi)
     
     default_ae_dims = 256 if self.options['archi'] == 'liae' else 512
     default_ed_ch_dims = 42
     if is_first_run:
         self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
         self.options['ed_ch_dims'] = np.clip ( io.input_int("Encoder/Decoder dims per channel (21-85 ?:help skip:%d) : " % (default_ed_ch_dims) , default_ed_ch_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 )
     else:
         self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims)
         self.options['ed_ch_dims'] = self.options.get('ed_ch_dims', default_ed_ch_dims)
         
     if is_first_run:
         self.options['lighter_encoder'] = io.input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")
         
         if self.options['archi'] != 'vg':
             self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:n) : ", False, help_message="Multiscale decoder helps to get better details.")
     else:
         self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)
         
         if self.options['archi'] != 'vg':
             self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', False)
         
     default_face_style_power = 0.0        
     default_bg_style_power = 0.0  
     if is_first_run or ask_override:
         def_pixel_loss = self.options.get('pixel_loss', False)
         self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 15-25k epochs to enhance fine details and decrease face jitter.")
     
         default_face_style_power = default_face_style_power if is_first_run else self.options.get('face_style_power', default_face_style_power)
         self.options['face_style_power'] = np.clip ( io.input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power, 
                                                                            help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k epochs, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes."), 0.0, 100.0 )            
                         
         default_bg_style_power = default_bg_style_power if is_first_run else self.options.get('bg_style_power', default_bg_style_power)
         self.options['bg_style_power'] = np.clip ( io.input_number("Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_bg_style_power), default_bg_style_power, 
                                                                            help_message="Learn to transfer image around face. This can make face more like dst."), 0.0, 100.0 )            
     else:
         self.options['pixel_loss'] = self.options.get('pixel_loss', False)
         self.options['face_style_power'] = self.options.get('face_style_power', default_face_style_power)
         self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power)
Beispiel #2
0
def cut_video(input_file,
              from_time=None,
              to_time=None,
              audio_track_id=None,
              bitrate=None):
    input_file_path = Path(input_file)
    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    output_file_path = input_file_path.parent / (
        input_file_path.stem + "_cut" + input_file_path.suffix)

    if from_time is None:
        from_time = io.input_str("From time (skip: 00:00:00.000) : ",
                                 "00:00:00.000")

    if to_time is None:
        to_time = io.input_str("To time (skip: 00:00:00.000) : ",
                               "00:00:00.000")

    if audio_track_id is None:
        audio_track_id = io.input_int("Specify audio track id. ( skip:0 ) : ",
                                      0)

    if bitrate is None:
        bitrate = max(
            1,
            io.input_int("Bitrate of output file in MB/s ? (default:25) : ",
                         25))

    kwargs = {
        "c:v": "libx264",
        "b:v": "%dM" % (bitrate),
        "pix_fmt": "yuv420p",
    }

    job = ffmpeg.input(str(input_file_path), ss=from_time, to=to_time)

    job_v = job['v:0']
    job_a = job['a:' + str(audio_track_id) + '?']

    job = ffmpeg.output(job_v, job_a, str(output_file_path),
                        **kwargs).overwrite_output()

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #3
0
    def onInitializeOptions(self, is_first_run, ask_override):
        default_face_type = 'f'
        if is_first_run:
            self.options['resolution'] = io.input_int(
                "Resolution ( 128,224 ?:help skip:128) : ", 128, [128, 224])
        else:
            self.options['resolution'] = self.options.get('resolution', 128)

        if is_first_run:
            self.options['face_type'] = io.input_str(
                "Half or Full face? (h/f, ?:help skip:f) : ",
                default_face_type, ['h', 'f'],
                help_message="").lower()
        else:
            self.options['face_type'] = self.options.get(
                'face_type', default_face_type)

        if (is_first_run or
                ask_override) and 'tensorflow' in self.device_config.backend:
            def_optimizer_mode = self.options.get('optimizer_mode', 1)
            self.options['optimizer_mode'] = io.input_int(
                "Optimizer mode? ( 1,2,3 ?:help skip:%d) : " %
                (def_optimizer_mode),
                def_optimizer_mode,
                help_message=
                "1 - no changes. 2 - allows you to train x2 bigger network consuming RAM. 3 - allows you to train x3 bigger network consuming huge amount of RAM and slower, depends on CPU power."
            )
        else:
            self.options['optimizer_mode'] = self.options.get(
                'optimizer_mode', 1)
Beispiel #4
0
def denoise_image_sequence(input_dir, ext=None, factor=None):
    input_path = Path(input_dir)

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if ext is None:
        ext = io.input_str(
            "Input image format (extension)? ( default:png ) : ", "png")

    if factor is None:
        factor = np.clip(
            io.input_int("Denoise factor? (1-20 default:5) : ", 5), 1, 20)

    kwargs = {}
    if ext == 'jpg':
        kwargs.update({'q:v': '2'})

    job = (ffmpeg.input(str(input_path / ('%5d.' + ext))).filter(
        "hqdn3d", factor, factor, 5,
        5).output(str(input_path / ('%5d.' + ext)), **kwargs))

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #5
0
 def onInitializeOptions(self, is_first_run, ask_override):
     default_face_type = 'f'
     if is_first_run:
         self.options['face_type'] = io.input_str(
             "Half or Full face? (h/f, ?:help skip:f) : ",
             default_face_type, ['h', 'f'],
             help_message="").lower()
     else:
         self.options['face_type'] = self.options.get(
             'face_type', default_face_type)
Beispiel #6
0
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = Path_utils.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if fps is None:
        fps = io.input_int(
            "Enter FPS ( ?:help skip:fullfps ) : ",
            0,
            help_message=
            "How many frames of every second of the video will be extracted.")

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format? ( jpg png ?:help skip:png ) : ",
            "png", ["png", "jpg"],
            help_message=
            "png is lossless, but extraction is x10 slower for HDD, requires x10 more disk space than jpg."
        )

    for filename in Path_utils.get_image_paths(output_path,
                                               ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #7
0
    def onInitializeOptions(self, is_first_run, ask_override):
        yn_str = {True:'y',False:'n'}

        default_face_type = 'f'
        if is_first_run:
            self.options['face_type'] = io.input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower()
        else:
            self.options['face_type'] = self.options.get('face_type', default_face_type)

        def_train_bgr = self.options.get('train_bgr', True)
        if is_first_run or ask_override:
            self.options['train_bgr'] = io.input_bool ("Train bgr? (y/n, ?:help skip: %s) : " % (yn_str[def_train_bgr]), def_train_bgr)
        else:
            self.options['train_bgr'] = self.options.get('train_bgr', def_train_bgr)
    def ask_settings(self):

        s = """Choose mode: \n"""
        for key in self.mode_dict.keys():
            s += f"""({key}) {self.mode_dict[key]}\n"""
        s += f"""Default: {self.default_mode} : """

        mode = io.input_int (s, self.default_mode)

        self.mode = self.mode_dict.get (mode, self.mode_dict[self.default_mode] )

        if 'raw' not in self.mode:
            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool("Masked hist match? (y/n skip:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) :  ", 255), 0, 255)

        if self.face_type == FaceType.FULL:
            s = """Choose mask mode: \n"""
            for key in self.full_face_mask_mode_dict.keys():
                s += f"""({key}) {self.full_face_mask_mode_dict[key]}\n"""
            s += f"""?:help Default: 1 : """

            self.mask_mode = io.input_int (s, 1, valid_list=self.full_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks.")
        else:
            s = """Choose mask mode: \n"""
            for key in self.half_face_mask_mode_dict.keys():
                s += f"""({key}) {self.half_face_mask_mode_dict[key]}\n"""
            s += f"""?:help , Default: 1 : """
            self.mask_mode = io.input_int (s, 1, valid_list=self.half_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images.")

        if 'raw' not in self.mode:
            self.erode_mask_modifier = self.base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (self.default_erode_mask_modifier), self.default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = self.base_blur_mask_modifier + np.clip ( io.input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (self.default_blur_mask_modifier), self.default_blur_mask_modifier), -200, 200)
            self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power [0..100] (skip:%d) : " % (0), 0), 0, 100)

        self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
            self.color_transfer_mode = self.ctm_str_dict[self.color_transfer_mode]

        super().ask_settings()

        if 'raw' not in self.mode:
            self.color_degrade_power = np.clip (  io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
            self.export_mask_alpha = io.input_bool("Export png with alpha channel of the mask? (y/n skip:n) : ", False)

        io.log_info ("")
Beispiel #9
0
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = Path_utils.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if fps is None:
        fps = io.input_int("输入帧率[FPS] ( ?:帮助 跳过:默认帧率 ) : ",
                           0,
                           help_message="FPS是指每秒多少张图片,一般视频为24,推荐输入12")

    if output_ext is None:
        output_ext = io.input_str(
            "输出格式? ( jpg还是png ?:帮助 默认为png ) : ",
            "png", ["png", "jpg"],
            help_message="png 为无损格式, 但是比JPG慢10倍, 空间也比JPG大十倍,建议使用JPG格式.")

    for filename in Path_utils.get_image_paths(output_path,
                                               ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg 调用失败, 错误提示:" + str(job.compile()))
Beispiel #10
0
    def onInitializeOptions(self, is_first_run, ask_override):
        default_face_type = 'f'
        if is_first_run:
            self.options['resolution'] = io.input_int(
                "分辨率 ( 128,256 帮助:? 跳过:128) : ",
                128, [128, 256],
                help_message="更高的分辨率需要更多的VRAM和训练时间。 数值调整成16的倍数。")
            self.options['face_type'] = io.input_str(
                "半脸(h)全脸(f)? (帮助:? 跳过:f) : ",
                default_face_type, ['h', 'f'],
                help_message="半脸有更好的分辨率,但覆盖的脸颊面积较小。").lower()

        else:
            self.options['resolution'] = self.options.get('resolution', 128)
            self.options['face_type'] = self.options.get(
                'face_type', default_face_type)
Beispiel #11
0
    def onInitializeOptions(self, is_first_run, ask_override):
        default_resolution = 128
        default_face_type = 'f'

        if is_first_run:
            resolution = self.options['resolution'] = io.input_int(f"Resolution ( 64-256 ?:help skip:{default_resolution}) : ", default_resolution, help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
            resolution = np.clip (resolution, 64, 256)
            while np.modf(resolution / 16)[0] != 0.0:
                resolution -= 1
        else:
            self.options['resolution'] = self.options.get('resolution', default_resolution)

        if is_first_run:
            self.options['face_type'] = io.input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="").lower()
        else:
            self.options['face_type'] = self.options.get('face_type', default_face_type)
Beispiel #12
0
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = Path_utils.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format (extension)? ( default:png ) : ", "png")

    if fps is None:
        fps = io.input_int(
            "Enter FPS ( ?:help skip:fullfps ) : ",
            0,
            help_message=
            "How many frames of every second of the video will be extracted.")

    for filename in Path_utils.get_image_paths(output_path,
                                               ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #13
0
    def onInitializeOptions(self, is_first_run, ask_override):
        default_face_type = 'f'
        if is_first_run:
            self.options['resolution'] = io.input_int(
                "Resolution ( 128,256 ?:help skip:128) : ",
                128, [128, 256],
                help_message=
                "More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16."
            )
            self.options['face_type'] = io.input_str(
                "Half or Full face? (h/f, ?:help skip:f) : ",
                default_face_type, ['h', 'f'],
                help_message=
                "Half face has better resolution, but covers less area of cheeks."
            ).lower()

        else:
            self.options['resolution'] = self.options.get('resolution', 128)
            self.options['face_type'] = self.options.get(
                'face_type', default_face_type)
Beispiel #14
0
Datei: F.py Projekt: wa407/YML
def change_workspace():
    wss = []
    for f in os.listdir(get_root_path()):
        fpath = os.path.join(get_root_path(), f)
        if os.path.isfile(fpath) and f.startswith("@workspace"):
            os.remove(fpath)
        elif os.path.isdir(fpath) and f.startswith("workspace"):
            wss.append(f)
    inputs = "1234567890"[0:len(wss)]
    for i in range(0, len(wss)):
        io.log_info("[ %s ] %s" % (inputs[i], wss[i]))
    no = io.input_str("Select Workspace:", 0)[0]
    idx = inputs.find(no)
    if idx < 0:
        raise Exception("Invalid Idx " + no)
    ws = wss[idx]
    io.log_info("Select " + ws)
    f = open(os.path.join(get_root_path(), "@" + ws), 'w')
    f.write(ws)
    f.close()
Beispiel #15
0
    def onInitializeOptions(self, is_first_run, ask_override):
        default_resolution = 128
        default_face_type = 'f'

        if is_first_run:
            resolution = self.options['resolution'] = io.input_int(
                f"Resolution ( 64-256 ?:help skip:{default_resolution}) : ",
                default_resolution,
                help_message=
                "More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16."
            )
            resolution = np.clip(resolution, 64, 256)
            while np.modf(resolution / 16)[0] != 0.0:
                resolution -= 1
        else:
            self.options['resolution'] = self.options.get(
                'resolution', default_resolution)

        if is_first_run:
            self.options['face_type'] = io.input_str(
                "Half or Full face? (h/f, ?:help skip:f) : ",
                default_face_type, ['h', 'f'],
                help_message="").lower()
        else:
            self.options['face_type'] = self.options.get(
                'face_type', default_face_type)

        if (is_first_run or
                ask_override) and 'tensorflow' in self.device_config.backend:
            def_optimizer_mode = self.options.get('optimizer_mode', 1)
            self.options['optimizer_mode'] = io.input_int(
                "Optimizer mode? ( 1,2,3 ?:help skip:%d) : " %
                (def_optimizer_mode),
                def_optimizer_mode,
                help_message=
                "1 - no changes. 2 - allows you to train x2 bigger network consuming RAM. 3 - allows you to train x3 bigger network consuming huge amount of RAM and slower, depends on CPU power."
            )
        else:
            self.options['optimizer_mode'] = self.options.get(
                'optimizer_mode', 1)
Beispiel #16
0
    def __init__(self,
                 predictor_func,
                 predictor_input_size=0,
                 predictor_masked=True,
                 face_type=FaceType.FULL,
                 default_mode=4,
                 base_erode_mask_modifier=0,
                 base_blur_mask_modifier=0,
                 default_erode_mask_modifier=0,
                 default_blur_mask_modifier=0,
                 clip_hborder_mask_per=0):

        super().__init__(predictor_func, Converter.TYPE_FACE)

        #dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
        predictor_func(
            np.zeros((predictor_input_size, predictor_input_size, 3),
                     dtype=np.float32))
        time.sleep(2)

        predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_func_host = AntiPickler(predictor_func_host)
        self.predictor_func = predictor_func

        self.predictor_masked = predictor_masked
        self.predictor_input_size = predictor_input_size
        self.face_type = face_type
        self.clip_hborder_mask_per = clip_hborder_mask_per

        mode = io.input_int(
            "Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : "
            % (default_mode), default_mode)

        mode_dict = {
            1: 'overlay',
            2: 'hist-match',
            3: 'hist-match-bw',
            4: 'seamless',
            5: 'raw'
        }

        self.mode = mode_dict.get(mode, mode_dict[default_mode])

        if self.mode == 'raw':
            mode = io.input_int(
                "Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ",
                2)
            self.raw_mode = {
                1: 'rgb',
                2: 'rgb-mask',
                3: 'mask-only',
                4: 'predicted-only'
            }.get(mode, 'rgb-mask')

        if self.mode != 'raw':

            if self.mode == 'seamless':
                if io.input_bool("Seamless hist match? (y/n skip:n) : ",
                                 False):
                    self.mode = 'seamless-hist-match'

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool(
                    "Masked hist match? (y/n skip:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int(
                        "Hist match threshold [0..255] (skip:255) :  ", 255),
                    0, 255)

        if face_type == FaceType.FULL:
            self.mask_mode = np.clip(
                io.input_int(
                    "Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) help. Default - %d : "
                    % (1),
                    1,
                    help_message=
                    "If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks."
                ), 1, 6)
        else:
            self.mask_mode = np.clip(
                io.input_int(
                    "Mask mode: (1) learned, (2) dst . Default - %d : " % (1),
                    1), 1, 2)

        if self.mask_mode >= 3 and self.mask_mode <= 6:
            self.fan_seg = None

        if self.mode != 'raw':
            self.erode_mask_modifier = base_erode_mask_modifier + np.clip(
                io.input_int(
                    "Choose erode mask modifier [-200..200] (skip:%d) : " %
                    (default_erode_mask_modifier),
                    default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = base_blur_mask_modifier + np.clip(
                io.input_int(
                    "Choose blur mask modifier [-200..200] (skip:%d) : " %
                    (default_blur_mask_modifier), default_blur_mask_modifier),
                -200, 200)

        self.output_face_scale = np.clip(
            1.0 + io.input_int(
                "Choose output face scale modifier [-50..50] (skip:0) : ", 0) *
            0.01, 0.5, 1.5)

        if self.mode != 'raw':
            self.color_transfer_mode = io.input_str(
                "Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ",
                None, ['rct', 'lct'])

        self.super_resolution = io.input_bool(
            "Apply super resolution? (y/n ?:help skip:n) : ",
            False,
            help_message="Enhance details by applying DCSCN network.")

        if self.mode != 'raw':
            self.final_image_color_degrade_power = np.clip(
                io.input_int(
                    "Degrade color power of final image [0..100] (skip:0) : ",
                    0), 0, 100)
            self.alpha = io.input_bool(
                "Export png with alpha channel? (y/n skip:n) : ", False)

        io.log_info("")

        if self.super_resolution:
            host_proc, dc_upscale = SubprocessFunctionCaller.make_pair(
                imagelib.DCSCN().upscale)
            self.dc_host = AntiPickler(host_proc)
            self.dc_upscale = dc_upscale
        else:
            self.dc_host = None
Beispiel #17
0
    def onInitializeOptions(self, is_first_run, ask_override):
        yn_str = {True: 'y', False: 'n'}

        default_resolution = 128
        default_archi = 'df'
        default_face_type = 'f'

        if is_first_run:
            resolution = io.input_int(
                "Resolution ( 64-256 ?:help skip:128) : ",
                default_resolution,
                help_message=
                "More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16."
            )
            resolution = np.clip(resolution, 64, 256)
            while np.modf(resolution / 16)[0] != 0.0:
                resolution -= 1
            self.options['resolution'] = resolution

            self.options['face_type'] = io.input_str(
                "Half or Full face? (h/f, ?:help skip:f) : ",
                default_face_type, ['h', 'f'],
                help_message=
                "Half face has better resolution, but covers less area of cheeks."
            ).lower()
            self.options['learn_mask'] = io.input_bool(
                "Learn mask? (y/n, ?:help skip:y) : ",
                True,
                help_message=
                "Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted. Model with style values can be learned without mask and produce same quality result."
            )
        else:
            self.options['resolution'] = self.options.get(
                'resolution', default_resolution)
            self.options['face_type'] = self.options.get(
                'face_type', default_face_type)
            self.options['learn_mask'] = self.options.get('learn_mask', True)

        if (is_first_run or
                ask_override) and 'tensorflow' in self.device_config.backend:
            def_optimizer_mode = self.options.get('optimizer_mode', 1)
            self.options['optimizer_mode'] = io.input_int(
                "Optimizer mode? ( 1,2,3 ?:help skip:%d) : " %
                (def_optimizer_mode),
                def_optimizer_mode,
                help_message=
                "1 - no changes. 2 - allows you to train x2 bigger network consuming RAM. 3 - allows you to train x3 bigger network consuming huge amount of RAM and slower, depends on CPU power."
            )
        else:
            self.options['optimizer_mode'] = self.options.get(
                'optimizer_mode', 1)

        if is_first_run:
            self.options['archi'] = io.input_str(
                "AE architecture (df, liae ?:help skip:%s) : " %
                (default_archi),
                default_archi, ['df', 'liae'],
                help_message=
                "'df' keeps faces more natural. 'liae' can fix overly different face shapes."
            ).lower(
            )  #-s version is slower, but has decreased change to collapse.
        else:
            self.options['archi'] = self.options.get('archi', default_archi)

        default_ae_dims = 256 if 'liae' in self.options['archi'] else 512
        default_e_ch_dims = 42
        default_d_ch_dims = default_e_ch_dims // 2
        def_ca_weights = False

        if is_first_run:
            self.options['ae_dims'] = np.clip(
                io.input_int(
                    "AutoEncoder dims (32-1024 ?:help skip:%d) : " %
                    (default_ae_dims),
                    default_ae_dims,
                    help_message=
                    "All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 32, 1024)
            self.options['e_ch_dims'] = np.clip(
                io.input_int(
                    "Encoder dims per channel (21-85 ?:help skip:%d) : " %
                    (default_e_ch_dims),
                    default_e_ch_dims,
                    help_message=
                    "More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 21, 85)
            default_d_ch_dims = self.options['e_ch_dims'] // 2
            self.options['d_ch_dims'] = np.clip(
                io.input_int(
                    "Decoder dims per channel (10-85 ?:help skip:%d) : " %
                    (default_d_ch_dims),
                    default_d_ch_dims,
                    help_message=
                    "More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 10, 85)
            self.options['multiscale_decoder'] = io.input_bool(
                "Use multiscale decoder? (y/n, ?:help skip:n) : ",
                False,
                help_message="Multiscale decoder helps to get better details.")
            self.options['ca_weights'] = io.input_bool(
                "Use CA weights? (y/n, ?:help skip: %s ) : " %
                (yn_str[def_ca_weights]),
                def_ca_weights,
                help_message=
                "Initialize network with 'Convolution Aware' weights. This may help to achieve a higher accuracy model, but consumes a time at first run."
            )
        else:
            self.options['ae_dims'] = self.options.get('ae_dims',
                                                       default_ae_dims)
            self.options['e_ch_dims'] = self.options.get(
                'e_ch_dims', default_e_ch_dims)
            self.options['d_ch_dims'] = self.options.get(
                'd_ch_dims', default_d_ch_dims)
            self.options['multiscale_decoder'] = self.options.get(
                'multiscale_decoder', False)
            self.options['ca_weights'] = self.options.get(
                'ca_weights', def_ca_weights)

        default_face_style_power = 0.0
        default_bg_style_power = 0.0
        if is_first_run or ask_override:
            def_pixel_loss = self.options.get('pixel_loss', False)
            self.options['pixel_loss'] = io.input_bool(
                "Use pixel loss? (y/n, ?:help skip: %s ) : " %
                (yn_str[def_pixel_loss]),
                def_pixel_loss,
                help_message=
                "Pixel loss may help to enhance fine details and stabilize face color. Use it only if quality does not improve over time. Enabling this option too early increases the chance of model collapse."
            )

            default_face_style_power = default_face_style_power if is_first_run else self.options.get(
                'face_style_power', default_face_style_power)
            self.options['face_style_power'] = np.clip(
                io.input_number(
                    "Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " %
                    (default_face_style_power),
                    default_face_style_power,
                    help_message=
                    "Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes. Enabling this option increases the chance of model collapse."
                ), 0.0, 100.0)

            default_bg_style_power = default_bg_style_power if is_first_run else self.options.get(
                'bg_style_power', default_bg_style_power)
            self.options['bg_style_power'] = np.clip(
                io.input_number(
                    "Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : "
                    % (default_bg_style_power),
                    default_bg_style_power,
                    help_message=
                    "Learn to transfer image around face. This can make face more like dst. Enabling this option increases the chance of model collapse."
                ), 0.0, 100.0)

            default_apply_random_ct = False if is_first_run else self.options.get(
                'apply_random_ct', False)
            self.options['apply_random_ct'] = io.input_bool(
                "Apply random color transfer to src faceset? (y/n, ?:help skip:%s) : "
                % (yn_str[default_apply_random_ct]),
                default_apply_random_ct,
                help_message=
                "Increase variativity of src samples by apply LCT color transfer from random dst samples. It is like 'face_style' learning, but more precise color transfer and without risk of model collapse, also it does not require additional GPU resources, but the training time may be longer, due to the src faceset is becoming more diverse."
            )

            if nnlib.device.backend != 'plaidML':  # todo https://github.com/plaidml/plaidml/issues/301
                default_clipgrad = False if is_first_run else self.options.get(
                    'clipgrad', False)
                self.options['clipgrad'] = io.input_bool(
                    "Enable gradient clipping? (y/n, ?:help skip:%s) : " %
                    (yn_str[default_clipgrad]),
                    default_clipgrad,
                    help_message=
                    "Gradient clipping reduces chance of model collapse, sacrificing speed of training."
                )
            else:
                self.options['clipgrad'] = False

        else:
            self.options['pixel_loss'] = self.options.get('pixel_loss', False)
            self.options['face_style_power'] = self.options.get(
                'face_style_power', default_face_style_power)
            self.options['bg_style_power'] = self.options.get(
                'bg_style_power', default_bg_style_power)
            self.options['apply_random_ct'] = self.options.get(
                'apply_random_ct', False)
            self.options['clipgrad'] = self.options.get('clipgrad', False)

        if is_first_run:
            self.options['pretrain'] = io.input_bool(
                "Pretrain the model? (y/n, ?:help skip:n) : ",
                False,
                help_message=
                "Pretrain the model with large amount of various faces. This technique may help to train the fake with overly different face shapes and light conditions of src/dst data. Face will be look more like a morphed. To reduce the morph effect, some model files will be initialized but not be updated after pretrain: LIAE: inter_AB.h5 DF: encoder.h5. The longer you pretrain the model the more morphed face will look. After that, save and run the training again."
            )
        else:
            self.options['pretrain'] = False
Beispiel #18
0
    def onInitializeOptions(self, is_first_run, ask_override):
        yn_str = {True:'y',False:'n'}

        default_resolution = 128
        default_archi = 'df'
        default_face_type = 'f'

        if is_first_run:
            resolution = io.input_int("Resolution ( 64-256 ?:help skip:128) : ", default_resolution, help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
            resolution = np.clip (resolution, 64, 256)
            while np.modf(resolution / 16)[0] != 0.0:
                resolution -= 1
            self.options['resolution'] = resolution

            self.options['face_type'] = io.input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower()
            self.options['learn_mask'] = io.input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted. Model with style values can be learned without mask and produce same quality result.")
        else:
            self.options['resolution'] = self.options.get('resolution', default_resolution)
            self.options['face_type'] = self.options.get('face_type', default_face_type)
            self.options['learn_mask'] = self.options.get('learn_mask', True)


        if (is_first_run or ask_override) and 'tensorflow' in self.device_config.backend:
            def_optimizer_mode = self.options.get('optimizer_mode', 1)
            self.options['optimizer_mode'] = io.input_int ("Optimizer mode? ( 1,2,3 ?:help skip:%d) : " % (def_optimizer_mode), def_optimizer_mode, help_message="1 - no changes. 2 - allows you to train x2 bigger network consuming RAM. 3 - allows you to train x3 bigger network consuming huge amount of RAM and slower, depends on CPU power.")
        else:
            self.options['optimizer_mode'] = self.options.get('optimizer_mode', 1)

        if is_first_run:
            self.options['archi'] = io.input_str ("AE architecture (df, liae ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes.").lower()
        else:
            self.options['archi'] = self.options.get('archi', default_archi)

        default_ae_dims = 256 if self.options['archi'] == 'liae' else 512
        default_e_ch_dims = 42
        default_d_ch_dims = default_e_ch_dims // 2

        if is_first_run:
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
            self.options['e_ch_dims'] = np.clip ( io.input_int("Encoder dims per channel (21-85 ?:help skip:%d) : " % (default_e_ch_dims) , default_e_ch_dims, help_message="More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 )
            default_d_ch_dims = self.options['e_ch_dims'] // 2
            self.options['d_ch_dims'] = np.clip ( io.input_int("Decoder dims per channel (10-85 ?:help skip:%d) : " % (default_d_ch_dims) , default_d_ch_dims, help_message="More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU." ), 10, 85 )
            self.options['d_residual_blocks'] = io.input_bool ("Add residual blocks to decoder? (y/n, ?:help skip:n) : ", False, help_message="These blocks help to get better details, but require more computing time.")
            self.options['remove_gray_border'] = io.input_bool ("Remove gray border? (y/n, ?:help skip:n) : ", False, help_message="Removes gray border of predicted face, but requires more computing resources.")
        else:
            self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims)
            self.options['e_ch_dims'] = self.options.get('e_ch_dims', default_e_ch_dims)
            self.options['d_ch_dims'] = self.options.get('d_ch_dims', default_d_ch_dims)
            self.options['d_residual_blocks'] = self.options.get('d_residual_blocks', False)
            self.options['remove_gray_border'] = self.options.get('remove_gray_border', False)

        if is_first_run:
            self.options['lighter_encoder'] = io.input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")

            self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:n) : ", False, help_message="Multiscale decoder helps to get better details.")
        else:
            self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)

            self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', False)

        default_face_style_power = 0.0
        default_bg_style_power = 0.0
        if is_first_run or ask_override:
            def_pixel_loss = self.options.get('pixel_loss', False)
            self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: %s ) : " % (yn_str[def_pixel_loss]), def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 15-25k iters to enhance fine details and decrease face jitter.")

            default_face_style_power = default_face_style_power if is_first_run else self.options.get('face_style_power', default_face_style_power)
            self.options['face_style_power'] = np.clip ( io.input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power,
                                                                               help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes."), 0.0, 100.0 )

            default_bg_style_power = default_bg_style_power if is_first_run else self.options.get('bg_style_power', default_bg_style_power)
            self.options['bg_style_power'] = np.clip ( io.input_number("Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_bg_style_power), default_bg_style_power,
                                                                               help_message="Learn to transfer image around face. This can make face more like dst."), 0.0, 100.0 )
        else:
            self.options['pixel_loss'] = self.options.get('pixel_loss', False)
            self.options['face_style_power'] = self.options.get('face_style_power', default_face_style_power)
            self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power)
Beispiel #19
0
    def ask_settings(self):
        s = """选择模式: \n"""
        for key in mode_dict.keys():
            s += f"""({key}) {mode_dict[key]}\n"""
        s += f"""默认: { mode_str_dict.get(self.default_mode, 1)  } : """

        mode = io.input_int(s, mode_str_dict.get(self.default_mode, 1))

        self.mode = mode_dict.get(mode, self.default_mode)

        if 'raw' not in self.mode:
            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool("蒙面组合匹配?(y/n 跳过:y):  ",
                                                       True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int("组合匹配阈值[0..255](跳过:255) :  ", 255), 0, 255)

        if self.face_type == FaceType.FULL:
            s = """选择遮罩模式: \n"""
            for key in full_face_mask_mode_dict.keys():
                s += f"""({key}) {full_face_mask_mode_dict[key]}\n"""
            s += f"""帮助:? 默认: 1 : """

            self.mask_mode = io.input_int(
                s,
                1,
                valid_list=full_face_mask_mode_dict.keys(),
                help_message=
                "(1)如果你学习面具,那么应该选择选项。(2)'dst'面具是来自dst对齐图像的原始摇晃面具。(3)'FAN-prd' - 使用来自预测面部的预训练FAN模型的超光滑面具。(4)'FAN-dst'  - 使用来自dst face的预训练FAN模型使用超光滑遮罩。(5)'FAN-prd * FAN-dst'或'learned * FAN-prd * FAN-dst' - 使用乘法遮罩。"
            )
        else:
            s = """选择遮罩模式: \n"""
            for key in half_face_mask_mode_dict.keys():
                s += f"""({key}) {half_face_mask_mode_dict[key]}\n"""
            s += f"""帮助:? ,  默认: 1 : """
            self.mask_mode = io.input_int(
                s,
                1,
                valid_list=half_face_mask_mode_dict.keys(),
                help_message="(1)如果你学习面具,那么应该选择选项。(2)'dst'面具是来自dst对齐图像的原始摇晃面具。"
            )

        if 'raw' not in self.mode:
            self.erode_mask_modifier = np.clip(
                io.input_int("选择侵蚀面具修改器 [-400..400] (跳过:%d) : " % 0, 0), -400,
                400)
            self.blur_mask_modifier = np.clip(
                io.input_int("选择模糊遮罩修改器 [-400..400] (跳过:%d) : " % 0, 0), -400,
                400)
            self.motion_blur_power = np.clip(
                io.input_int("选择运动模糊力度 [0..100] (跳过:%d) : " % (0), 0), 0, 100)

        self.output_face_scale = np.clip(
            io.input_int("选择输出面部比例修改器 [-50..50] (跳过:0) : ", 0), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str(
                f"将颜色转移应用于预测的脸部吗? 选择模式 ( {' / '.join ([str(x) for x in list(ctm_str_dict.keys())])} 跳过:None ) : ",
                None, ctm_str_dict.keys())
            self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]

        super().ask_settings()

        if 'raw' not in self.mode:
            self.image_denoise_power = np.clip(
                io.input_int("选择图像降噪强度 [0..500] (跳过:%d) : " % (0), 0), 0, 500)
            self.bicubic_degrade_power = np.clip(
                io.input_int("选择图像双三次降低功率 [0..100] (跳过:%d) : " % (0), 0), 0,
                100)
            self.color_degrade_power = np.clip(
                io.input_int("降低最终图像的色彩力度 [0..100] (跳过:0) : ", 0), 0, 100)
            self.export_mask_alpha = io.input_bool(
                "用alpha通道导出png格式图片? (y/n 跳过:n) : ", False)

        io.log_info("")
Beispiel #20
0
    def onInitializeOptions(self, is_first_run, ask_override):
        yn_str = {True: 'y', False: 'n'}

        default_resolution = 128
        default_archi = 'df'
        default_face_type = 'f'

        if is_first_run:
            resolution = io.input_int(
                "Resolution ( 64-256 ?:help skip:128) : ",
                default_resolution,
                help_message=
                "More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16."
            )
            resolution = np.clip(resolution, 64, 256)
            while np.modf(resolution / 16)[0] != 0.0:
                resolution -= 1
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str(
                "Half, mid full, or full face? (h/mf/f, ?:help skip:f) : ",
                default_face_type, ['h', 'mf', 'f'],
                help_message=
                "Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face."
            ).lower()
        else:
            self.options['resolution'] = self.options.get(
                'resolution', default_resolution)
            self.options['face_type'] = self.options.get(
                'face_type', default_face_type)

        default_learn_mask = self.options.get('learn_mask', True)
        if is_first_run or ask_override:
            self.options['learn_mask'] = io.input_bool(
                f"Learn mask? (y/n, ?:help skip:{yn_str[default_learn_mask]} ) : ",
                default_learn_mask,
                help_message=
                "Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted."
            )
        else:
            self.options['learn_mask'] = self.options.get(
                'learn_mask', default_learn_mask)

        if (is_first_run or
                ask_override) and 'tensorflow' in self.device_config.backend:
            def_optimizer_mode = self.options.get('optimizer_mode', 1)
            self.options['optimizer_mode'] = io.input_int(
                "Optimizer mode? ( 1,2,3 ?:help skip:%d) : " %
                (def_optimizer_mode),
                def_optimizer_mode,
                help_message=
                "1 - no changes. 2 - allows you to train x2 bigger network consuming RAM. 3 - allows you to train x3 bigger network consuming huge amount of RAM and slower, depends on CPU power."
            )
        else:
            self.options['optimizer_mode'] = self.options.get(
                'optimizer_mode', 1)

        if is_first_run:
            self.options['archi'] = io.input_str(
                "AE architecture (df, liae ?:help skip:%s) : " %
                (default_archi),
                default_archi, ['df', 'liae'],
                help_message=
                "'df' keeps faces more natural. 'liae' can fix overly different face shapes."
            ).lower(
            )  #-s version is slower, but has decreased change to collapse.
        else:
            self.options['archi'] = self.options.get('archi', default_archi)

        default_ae_dims = 256
        default_ed_ch_dims = 21

        if is_first_run:
            self.options['ae_dims'] = np.clip(
                io.input_int(
                    "AutoEncoder dims (32-1024 ?:help skip:%d) : " %
                    (default_ae_dims),
                    default_ae_dims,
                    help_message=
                    "All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 32, 1024)
            self.options['ed_ch_dims'] = np.clip(
                io.input_int(
                    "Encoder/Decoder dims per channel (10-85 ?:help skip:%d) : "
                    % (default_ed_ch_dims),
                    default_ed_ch_dims,
                    help_message=
                    "More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU."
                ), 10, 85)
        else:
            self.options['ae_dims'] = self.options.get('ae_dims',
                                                       default_ae_dims)
            self.options['ed_ch_dims'] = self.options.get(
                'ed_ch_dims', default_ed_ch_dims)

        default_true_face_training = self.options.get('true_face_training',
                                                      False)
        default_face_style_power = self.options.get('face_style_power', 0.0)
        default_bg_style_power = self.options.get('bg_style_power', 0.0)

        if is_first_run or ask_override:
            default_lr_dropout = self.options.get('lr_dropout', False)
            self.options['lr_dropout'] = io.input_bool(
                f"Use learning rate dropout? (y/n, ?:help skip:{yn_str[default_lr_dropout]} ) : ",
                default_lr_dropout,
                help_message=
                "When the face is trained enough, you can enable this option to get extra sharpness for less amount of iterations."
            )

            default_random_warp = self.options.get('random_warp', True)
            self.options['random_warp'] = io.input_bool(
                f"Enable random warp of samples? ( y/n, ?:help skip:{yn_str[default_random_warp]}) : ",
                default_random_warp,
                help_message=
                "Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness for less amount of iterations."
            )

            self.options['true_face_training'] = io.input_bool(
                f"Enable 'true face' training? (y/n, ?:help skip:{yn_str[default_true_face_training]}) : ",
                default_true_face_training,
                help_message=
                "The result face will be more like src and will get extra sharpness. Enable it for last 10-20k iterations before conversion."
            )

            self.options['face_style_power'] = np.clip(
                io.input_number(
                    "Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " %
                    (default_face_style_power),
                    default_face_style_power,
                    help_message=
                    "Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes. Enabling this option increases the chance of model collapse."
                ), 0.0, 100.0)

            self.options['bg_style_power'] = np.clip(
                io.input_number(
                    "Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : "
                    % (default_bg_style_power),
                    default_bg_style_power,
                    help_message=
                    "Learn to transfer image around face. This can make face more like dst. Enabling this option increases the chance of model collapse."
                ), 0.0, 100.0)

            default_ct_mode = self.options.get('ct_mode', 'none')
            self.options['ct_mode'] = io.input_str(
                f"Color transfer mode apply to src faceset. ( none/rct/lct/mkl/idt/sot, ?:help skip:{default_ct_mode}) : ",
                default_ct_mode, ['none', 'rct', 'lct', 'mkl', 'idt', 'sot'],
                help_message=
                "Change color distribution of src samples close to dst samples. Try all modes to find the best."
            )

            if nnlib.device.backend != 'plaidML':  # todo https://github.com/plaidml/plaidml/issues/301
                default_clipgrad = False if is_first_run else self.options.get(
                    'clipgrad', False)
                self.options['clipgrad'] = io.input_bool(
                    f"Enable gradient clipping? (y/n, ?:help skip:{yn_str[default_clipgrad]}) : ",
                    default_clipgrad,
                    help_message=
                    "Gradient clipping reduces chance of model collapse, sacrificing speed of training."
                )
            else:
                self.options['clipgrad'] = False
        else:
            self.options['lr_dropout'] = self.options.get('lr_dropout', False)
            self.options['random_warp'] = self.options.get('random_warp', True)
            self.options['true_face_training'] = self.options.get(
                'true_face_training', default_true_face_training)
            self.options['face_style_power'] = self.options.get(
                'face_style_power', default_face_style_power)
            self.options['bg_style_power'] = self.options.get(
                'bg_style_power', default_bg_style_power)
            self.options['ct_mode'] = self.options.get('ct_mode', 'none')
            self.options['clipgrad'] = self.options.get('clipgrad', False)

        if is_first_run:
            self.options['pretrain'] = io.input_bool(
                "Pretrain the model? (y/n, ?:help skip:n) : ",
                False,
                help_message=
                "Pretrain the model with large amount of various faces. This technique may help to train the fake with overly different face shapes and light conditions of src/dst data. Face will be look more like a morphed. To reduce the morph effect, some model files will be initialized but not be updated after pretrain: LIAE: inter_AB.h5 DF: encoder.h5. The longer you pretrain the model the more morphed face will look. After that, save and run the training again."
            )
        else:
            self.options['pretrain'] = False
    def __init__(self,
                 predictor_func,
                 predictor_input_size=0,
                 output_size=0,
                 face_type=FaceType.FULL,
                 default_mode=4,
                 base_erode_mask_modifier=0,
                 base_blur_mask_modifier=0,
                 default_erode_mask_modifier=0,
                 default_blur_mask_modifier=0,
                 clip_hborder_mask_per=0):

        super().__init__(predictor_func, Converter.TYPE_FACE)
        self.predictor_input_size = predictor_input_size
        self.output_size = output_size
        self.face_type = face_type
        self.clip_hborder_mask_per = clip_hborder_mask_per

        mode = io.input_int(
            "Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : "
            % (default_mode), default_mode)

        mode_dict = {
            1: 'overlay',
            2: 'hist-match',
            3: 'hist-match-bw',
            4: 'seamless',
            5: 'raw'
        }

        self.mode = mode_dict.get(mode, mode_dict[default_mode])
        self.suppress_seamless_jitter = False

        if self.mode == 'raw':
            mode = io.input_int(
                "Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ",
                2)
            self.raw_mode = {
                1: 'rgb',
                2: 'rgb-mask',
                3: 'mask-only',
                4: 'predicted-only'
            }.get(mode, 'rgb-mask')

        if self.mode != 'raw':

            if self.mode == 'seamless':
                io.input_bool(
                    "Suppress seamless jitter? [ y/n ] (?:help skip:n ) : ",
                    False,
                    help_message=
                    "Seamless clone produces face jitter. You can suppress it, but process can take a long time."
                )

                if io.input_bool("Seamless hist match? (y/n skip:n) : ",
                                 False):
                    self.mode = 'seamless-hist-match'

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool(
                    "Masked hist match? (y/n skip:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int(
                        "Hist match threshold [0..255] (skip:255) :  ", 255),
                    0, 255)

        if face_type == FaceType.FULL:
            self.mask_mode = io.input_int(
                "Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst (?) help. Default - %d : "
                % (1),
                1,
                help_message=
                "If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face."
            )
        else:
            self.mask_mode = io.input_int(
                "Mask mode: (1) learned, (2) dst . Default - %d : " % (1), 1)

        if self.mask_mode == 3 or self.mask_mode == 4:
            self.fan_seg = None

        if self.mode != 'raw':
            self.erode_mask_modifier = base_erode_mask_modifier + np.clip(
                io.input_int(
                    "Choose erode mask modifier [-200..200] (skip:%d) : " %
                    (default_erode_mask_modifier),
                    default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = base_blur_mask_modifier + np.clip(
                io.input_int(
                    "Choose blur mask modifier [-200..200] (skip:%d) : " %
                    (default_blur_mask_modifier), default_blur_mask_modifier),
                -200, 200)

            self.seamless_erode_mask_modifier = 0
            if 'seamless' in self.mode:
                self.seamless_erode_mask_modifier = np.clip(
                    io.input_int(
                        "Choose seamless erode mask modifier [-100..100] (skip:0) : ",
                        0), -100, 100)

        self.output_face_scale = np.clip(
            1.0 + io.input_int(
                "Choose output face scale modifier [-50..50] (skip:0) : ", 0) *
            0.01, 0.5, 1.5)
        self.color_transfer_mode = io.input_str(
            "Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ",
            None, ['rct', 'lct'])

        if self.mode != 'raw':
            self.final_image_color_degrade_power = np.clip(
                io.input_int(
                    "Degrade color power of final image [0..100] (skip:0) : ",
                    0), 0, 100)
            self.alpha = io.input_bool(
                "Export png with alpha channel? (y/n skip:n) : ", False)

        io.log_info("")
        self.over_res = 4 if self.suppress_seamless_jitter else 1
Beispiel #22
0
    def __init__(self,
                 predictor_func,
                 predictor_input_size=0,
                 output_size=0,
                 face_type=FaceType.FULL,
                 default_mode=4,
                 base_erode_mask_modifier=0,
                 base_blur_mask_modifier=0,
                 default_erode_mask_modifier=0,
                 default_blur_mask_modifier=0,
                 clip_hborder_mask_per=0):

        super().__init__(predictor_func, Converter.TYPE_FACE)
        self.predictor_input_size = predictor_input_size
        self.output_size = output_size
        self.face_type = face_type
        self.clip_hborder_mask_per = clip_hborder_mask_per

        mode = io.input_int(
            "Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) seamless hist match, (6) raw. Default - %d : "
            % (default_mode), default_mode)

        mode_dict = {
            1: 'overlay',
            2: 'hist-match',
            3: 'hist-match-bw',
            4: 'seamless',
            5: 'seamless-hist-match',
            6: 'raw'
        }

        self.mode = mode_dict.get(mode, mode_dict[default_mode])

        if self.mode == 'raw':
            mode = io.input_int(
                "Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ",
                2)
            self.raw_mode = {
                1: 'rgb',
                2: 'rgb-mask',
                3: 'mask-only',
                4: 'predicted-only'
            }.get(mode, 'rgb-mask')

        if self.mode != 'raw':
            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool(
                    "Masked hist match? (y/n skip:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int(
                        "Hist match threshold [0..255] (skip:255) :  ", 255),
                    0, 255)

        self.use_predicted_mask = io.input_bool(
            "Use predicted mask? (y/n skip:y) : ", True)

        if self.mode != 'raw':
            self.erode_mask_modifier = base_erode_mask_modifier + np.clip(
                io.input_int(
                    "Choose erode mask modifier [-200..200] (skip:%d) : " %
                    (default_erode_mask_modifier),
                    default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = base_blur_mask_modifier + np.clip(
                io.input_int(
                    "Choose blur mask modifier [-200..200] (skip:%d) : " %
                    (default_blur_mask_modifier), default_blur_mask_modifier),
                -200, 200)

            self.seamless_erode_mask_modifier = 0
            if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
                self.seamless_erode_mask_modifier = np.clip(
                    io.input_int(
                        "Choose seamless erode mask modifier [-100..100] (skip:0) : ",
                        0), -100, 100)

        self.output_face_scale = np.clip(
            1.0 + io.input_int(
                "Choose output face scale modifier [-50..50] (skip:0) : ", 0) *
            0.01, 0.5, 1.5)
        self.color_transfer_mode = io.input_str(
            "Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ",
            None, ['rct', 'lct'])

        if self.mode != 'raw':
            self.final_image_color_degrade_power = np.clip(
                io.input_int(
                    "Degrade color power of final image [0..100] (skip:0) : ",
                    0), 0, 100)
            self.alpha = io.input_bool(
                "Export png with alpha channel? (y/n skip:n) : ", False)

        io.log_info("")
Beispiel #23
0
def video_from_sequence( input_dir, output_file, reference_file=None, ext=None, fps=None, bitrate=None, lossless=None ):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(reference_file) if reference_file is not None else None

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = io.input_str ("Input image format (extension)? ( default:png ) : ", "png")

    if lossless is None:
        lossless = io.input_bool ("Use lossless codec ? ( default:no ) : ", False)

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == '.*':
            reference_file_path = Path_utils.get_first_file_by_stem (reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            io.log_err("reference_file not found.")
            return

        #probing reference file
        probe = ffmpeg.probe (str(reference_file_path))

        #getting first video and audio streams id with fps
        for stream in probe['streams']:
            if video_id is None and stream['codec_type'] == 'video':
                video_id = stream['index']
                fps = stream['r_frame_rate']

            if audio_id is None and stream['codec_type'] == 'audio':
                audio_id = stream['index']

        if audio_id is not None:
            #has audio track
            ref_in_a = ffmpeg.input (str(reference_file_path))[str(audio_id)]

    if fps is None:
        #if fps not specified and not overwritten by reference-file
        fps = max (1, io.input_int ("FPS ? (default:25) : ", 25) )

    if not lossless and bitrate is None:
        bitrate = max (1, io.input_int ("Bitrate of output file in MB/s ? (default:16) : ", 16) )

    i_in = ffmpeg.input(str (input_path / ('%5d.'+ext)), r=fps)

    output_args = [i_in]

    if ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str (output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update ({"c:v": "png"
                              })
    else:
        output_kwargs.update ({"c:v": "libx264",
                               "b:v": "%dM" %(bitrate),
                               "pix_fmt": "yuv420p",
                              })

    output_kwargs.update ({"c:a": "aac",
                           "b:a": "192k",
                           "ar" : "48000",
                           "strict": "experimental"
                          })

    job = ( ffmpeg.output(*output_args, **output_kwargs).overwrite_output() )
    try:
        job = job.run()
    except:
        io.log_err ("ffmpeg fail, job commandline:" + str(job.compile()) )
Beispiel #24
0
    def __init__(self,
                 predictor_func,
                 predictor_input_size=0,
                 predictor_masked=True,
                 face_type=FaceType.FULL,
                 default_mode=4,
                 base_erode_mask_modifier=0,
                 base_blur_mask_modifier=0,
                 default_erode_mask_modifier=0,
                 default_blur_mask_modifier=0,
                 clip_hborder_mask_per=0,
                 force_mask_mode=-1):

        super().__init__(predictor_func, Converter.TYPE_FACE)

        # dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
        predictor_func(
            np.zeros((predictor_input_size, predictor_input_size, 3),
                     dtype=np.float32))
        time.sleep(2)

        predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_func_host = AntiPickler(predictor_func_host)
        self.predictor_func = predictor_func

        self.predictor_masked = predictor_masked
        self.predictor_input_size = predictor_input_size
        self.face_type = face_type
        self.clip_hborder_mask_per = clip_hborder_mask_per

        mode = io.input_int(
            "选择模式: (1)覆盖,(2)直方图匹配,(3)直方图匹配白平衡,(4)无缝,(5)raw. 默认 - %d : " %
            (default_mode), default_mode)

        mode_dict = {
            1: 'overlay',
            2: 'hist-match',
            3: 'hist-match-bw',
            4: 'seamless',
            5: 'raw'
        }

        self.mode = mode_dict.get(mode, mode_dict[default_mode])

        if self.mode == 'raw':
            mode = io.input_int(
                "选择raw模式: (1) rgb, (2) rgb+掩码(默认),(3)仅掩码,(4)仅预测 : ", 2)
            self.raw_mode = {
                1: 'rgb',
                2: 'rgb-mask',
                3: 'mask-only',
                4: 'predicted-only'
            }.get(mode, 'rgb-mask')

        if self.mode != 'raw':

            if self.mode == 'seamless':
                if io.input_bool("无缝直方图匹配? (y/n 默认:n) : ", False):
                    self.mode = 'seamless-hist-match'

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool(
                    "面部遮罩直方图匹配? (y/n 默认:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int("直方图匹配阈值 [0..255] (skip:255) :  ", 255), 0,
                    255)

        if force_mask_mode != -1:
            self.mask_mode = force_mask_mode
        else:
            if face_type == FaceType.FULL:
                self.mask_mode = np.clip(
                    io.input_int(
                        "面部遮罩模式: (1) 学习, (2) dst原始视频, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) 帮助. 默认 - %d : "
                        % (1),
                        1,
                        help_message=
                        "如果你学过蒙版,那么选择选项1.“dst”遮罩是原始的抖动遮罩从dst对齐的图像.“扇-prd”-使用超光滑的面具,通过预先训练的扇模型从预测的脸.“风扇-dst”-使用超光滑的面具,由预先训练的风扇模型从dst的脸.“FAN-prd*FAN-dst”或“learned*FAN-prd*FAN-dst”——使用多个口罩."
                    ), 1, 6)
            else:
                self.mask_mode = np.clip(
                    io.input_int("面部遮罩模式: (1) 学习, (2) dst . 默认 - %d : " % (1),
                                 1), 1, 2)

        if self.mask_mode >= 3 and self.mask_mode <= 6:
            self.fan_seg = None

        if self.mode != 'raw':
            self.erode_mask_modifier = base_erode_mask_modifier + np.clip(
                io.input_int(
                    "侵蚀遮罩 [-200..200] (默认:%d) : " %
                    (default_erode_mask_modifier),
                    default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = base_blur_mask_modifier + np.clip(
                io.input_int(
                    "选择模糊遮罩边缘 [-200..200] (默认:%d) : " %
                    (default_blur_mask_modifier), default_blur_mask_modifier),
                -200, 200)

        self.output_face_scale = np.clip(
            1.0 + io.input_int("选择输出脸部比例调整器 [-50..50] (默认:0) : ", 0) * 0.01,
            0.5, 1.5)

        if self.mode != 'raw':
            self.color_transfer_mode = io.input_str(
                "应用颜色转移到预测的脸? 选择模式 ( rct/lct 默认:None ) : ", None,
                ['rct', 'lct'])

        self.super_resolution = io.input_bool("应用超分辨率? (y/n ?:帮助 默认:n) : ",
                                              False,
                                              help_message="通过应用DCSCN网络增强细节.")

        if self.mode != 'raw':
            self.final_image_color_degrade_power = np.clip(
                io.input_int("降低最终图像色权 [0..100] (默认:0) : ", 0), 0, 100)
            self.alpha = io.input_bool("使用alpha通道导出png? (y/n 默认:n) : ", False)

        io.log_info("")

        if self.super_resolution:
            host_proc, dc_upscale = SubprocessFunctionCaller.make_pair(
                imagelib.DCSCN().upscale)
            self.dc_host = AntiPickler(host_proc)
            self.dc_upscale = dc_upscale
        else:
            self.dc_host = None