Exemple #1
0
 def DCSCN_upscale(*args, **kwargs):
     if self.dcscn is None:
         self.dcscn = imagelib.DCSCN()
     return self.dcscn.upscale(*args, **kwargs)
Exemple #2
0
    def __init__(self,
                 predictor_func,
                 predictor_input_size=0,
                 predictor_masked=True,
                 face_type=FaceType.FULL,
                 default_mode=4,
                 base_erode_mask_modifier=0,
                 base_blur_mask_modifier=0,
                 default_erode_mask_modifier=0,
                 default_blur_mask_modifier=0,
                 clip_hborder_mask_per=0):

        super().__init__(predictor_func, Converter.TYPE_FACE)

        #dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
        predictor_func(
            np.zeros((predictor_input_size, predictor_input_size, 3),
                     dtype=np.float32))
        time.sleep(2)

        predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_func_host = AntiPickler(predictor_func_host)
        self.predictor_func = predictor_func

        self.predictor_masked = predictor_masked
        self.predictor_input_size = predictor_input_size
        self.face_type = face_type
        self.clip_hborder_mask_per = clip_hborder_mask_per

        mode = io.input_int(
            "Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : "
            % (default_mode), default_mode)

        mode_dict = {
            1: 'overlay',
            2: 'hist-match',
            3: 'hist-match-bw',
            4: 'seamless',
            5: 'raw'
        }

        self.mode = mode_dict.get(mode, mode_dict[default_mode])

        if self.mode == 'raw':
            mode = io.input_int(
                "Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ",
                2)
            self.raw_mode = {
                1: 'rgb',
                2: 'rgb-mask',
                3: 'mask-only',
                4: 'predicted-only'
            }.get(mode, 'rgb-mask')

        if self.mode != 'raw':

            if self.mode == 'seamless':
                if io.input_bool("Seamless hist match? (y/n skip:n) : ",
                                 False):
                    self.mode = 'seamless-hist-match'

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool(
                    "Masked hist match? (y/n skip:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int(
                        "Hist match threshold [0..255] (skip:255) :  ", 255),
                    0, 255)

        if face_type == FaceType.FULL:
            self.mask_mode = np.clip(
                io.input_int(
                    "Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) help. Default - %d : "
                    % (1),
                    1,
                    help_message=
                    "If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks."
                ), 1, 6)
        else:
            self.mask_mode = np.clip(
                io.input_int(
                    "Mask mode: (1) learned, (2) dst . Default - %d : " % (1),
                    1), 1, 2)

        if self.mask_mode >= 3 and self.mask_mode <= 6:
            self.fan_seg = None

        if self.mode != 'raw':
            self.erode_mask_modifier = base_erode_mask_modifier + np.clip(
                io.input_int(
                    "Choose erode mask modifier [-200..200] (skip:%d) : " %
                    (default_erode_mask_modifier),
                    default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = base_blur_mask_modifier + np.clip(
                io.input_int(
                    "Choose blur mask modifier [-200..200] (skip:%d) : " %
                    (default_blur_mask_modifier), default_blur_mask_modifier),
                -200, 200)

        self.output_face_scale = np.clip(
            1.0 + io.input_int(
                "Choose output face scale modifier [-50..50] (skip:0) : ", 0) *
            0.01, 0.5, 1.5)

        if self.mode != 'raw':
            self.color_transfer_mode = io.input_str(
                "Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ",
                None, ['rct', 'lct'])

        self.super_resolution = io.input_bool(
            "Apply super resolution? (y/n ?:help skip:n) : ",
            False,
            help_message="Enhance details by applying DCSCN network.")

        if self.mode != 'raw':
            self.final_image_color_degrade_power = np.clip(
                io.input_int(
                    "Degrade color power of final image [0..100] (skip:0) : ",
                    0), 0, 100)
            self.alpha = io.input_bool(
                "Export png with alpha channel? (y/n skip:n) : ", False)

        io.log_info("")

        if self.super_resolution:
            host_proc, dc_upscale = SubprocessFunctionCaller.make_pair(
                imagelib.DCSCN().upscale)
            self.dc_host = AntiPickler(host_proc)
            self.dc_upscale = dc_upscale
        else:
            self.dc_host = None
Exemple #3
0
    def __init__(self,
                 predictor_func,
                 predictor_input_size=0,
                 predictor_masked=True,
                 face_type=FaceType.FULL,
                 default_mode=4,
                 base_erode_mask_modifier=0,
                 base_blur_mask_modifier=0,
                 default_erode_mask_modifier=0,
                 default_blur_mask_modifier=0,
                 clip_hborder_mask_per=0,
                 force_mask_mode=-1):

        super().__init__(predictor_func, Converter.TYPE_FACE)

        # dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
        predictor_func(
            np.zeros((predictor_input_size, predictor_input_size, 3),
                     dtype=np.float32))
        time.sleep(2)

        predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_func_host = AntiPickler(predictor_func_host)
        self.predictor_func = predictor_func

        self.predictor_masked = predictor_masked
        self.predictor_input_size = predictor_input_size
        self.face_type = face_type
        self.clip_hborder_mask_per = clip_hborder_mask_per

        mode = io.input_int(
            "选择模式: (1)覆盖,(2)直方图匹配,(3)直方图匹配白平衡,(4)无缝,(5)raw. 默认 - %d : " %
            (default_mode), default_mode)

        mode_dict = {
            1: 'overlay',
            2: 'hist-match',
            3: 'hist-match-bw',
            4: 'seamless',
            5: 'raw'
        }

        self.mode = mode_dict.get(mode, mode_dict[default_mode])

        if self.mode == 'raw':
            mode = io.input_int(
                "选择raw模式: (1) rgb, (2) rgb+掩码(默认),(3)仅掩码,(4)仅预测 : ", 2)
            self.raw_mode = {
                1: 'rgb',
                2: 'rgb-mask',
                3: 'mask-only',
                4: 'predicted-only'
            }.get(mode, 'rgb-mask')

        if self.mode != 'raw':

            if self.mode == 'seamless':
                if io.input_bool("无缝直方图匹配? (y/n 默认:n) : ", False):
                    self.mode = 'seamless-hist-match'

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                self.masked_hist_match = io.input_bool(
                    "面部遮罩直方图匹配? (y/n 默认:y) : ", True)

            if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int("直方图匹配阈值 [0..255] (skip:255) :  ", 255), 0,
                    255)

        if force_mask_mode != -1:
            self.mask_mode = force_mask_mode
        else:
            if face_type == FaceType.FULL:
                self.mask_mode = np.clip(
                    io.input_int(
                        "面部遮罩模式: (1) 学习, (2) dst原始视频, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) 帮助. 默认 - %d : "
                        % (1),
                        1,
                        help_message=
                        "如果你学过蒙版,那么选择选项1.“dst”遮罩是原始的抖动遮罩从dst对齐的图像.“扇-prd”-使用超光滑的面具,通过预先训练的扇模型从预测的脸.“风扇-dst”-使用超光滑的面具,由预先训练的风扇模型从dst的脸.“FAN-prd*FAN-dst”或“learned*FAN-prd*FAN-dst”——使用多个口罩."
                    ), 1, 6)
            else:
                self.mask_mode = np.clip(
                    io.input_int("面部遮罩模式: (1) 学习, (2) dst . 默认 - %d : " % (1),
                                 1), 1, 2)

        if self.mask_mode >= 3 and self.mask_mode <= 6:
            self.fan_seg = None

        if self.mode != 'raw':
            self.erode_mask_modifier = base_erode_mask_modifier + np.clip(
                io.input_int(
                    "侵蚀遮罩 [-200..200] (默认:%d) : " %
                    (default_erode_mask_modifier),
                    default_erode_mask_modifier), -200, 200)
            self.blur_mask_modifier = base_blur_mask_modifier + np.clip(
                io.input_int(
                    "选择模糊遮罩边缘 [-200..200] (默认:%d) : " %
                    (default_blur_mask_modifier), default_blur_mask_modifier),
                -200, 200)

        self.output_face_scale = np.clip(
            1.0 + io.input_int("选择输出脸部比例调整器 [-50..50] (默认:0) : ", 0) * 0.01,
            0.5, 1.5)

        if self.mode != 'raw':
            self.color_transfer_mode = io.input_str(
                "应用颜色转移到预测的脸? 选择模式 ( rct/lct 默认:None ) : ", None,
                ['rct', 'lct'])

        self.super_resolution = io.input_bool("应用超分辨率? (y/n ?:帮助 默认:n) : ",
                                              False,
                                              help_message="通过应用DCSCN网络增强细节.")

        if self.mode != 'raw':
            self.final_image_color_degrade_power = np.clip(
                io.input_int("降低最终图像色权 [0..100] (默认:0) : ", 0), 0, 100)
            self.alpha = io.input_bool("使用alpha通道导出png? (y/n 默认:n) : ", False)

        io.log_info("")

        if self.super_resolution:
            host_proc, dc_upscale = SubprocessFunctionCaller.make_pair(
                imagelib.DCSCN().upscale)
            self.dc_host = AntiPickler(host_proc)
            self.dc_upscale = dc_upscale
        else:
            self.dc_host = None