def ask_write_preview_history(self, default_value=False):
        default_write_preview_history = self.load_or_def_option('write_preview_history', default_value)
        self.options['write_preview_history'] = io.input_bool(f"Write preview history", default_write_preview_history, help_message="Preview history will be writed to <ModelName>_history folder.")

        if self.options['write_preview_history']:
            if io.is_support_windows():
                self.choose_preview_history = io.input_bool("Choose image for the preview history", False)
            elif io.is_colab():
                self.choose_preview_history = io.input_bool("Randomly choose new image for preview history", False, help_message="Preview image history will stay stuck with old faces if you reuse the same model on different celebs. Choose no unless you are changing src/dst to a new person")
def process_folder(dirpath, cpu_only=False, force_gpu_idxs=None):
    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_dirpath = dirpath.parent / (dirpath.name + '_enhanced')
    output_dirpath.mkdir(exist_ok=True, parents=True)

    dirpath_parts = '/'.join(dirpath.parts[-2:])
    output_dirpath_parts = '/'.join(output_dirpath.parts[-2:])
    io.log_info(f"Enhancing faceset in {dirpath_parts}")
    io.log_info(f"Processing to {output_dirpath_parts}")

    output_images_paths = pathex.get_image_paths(output_dirpath)
    if len(output_images_paths) > 0:
        for filename in output_images_paths:
            Path(filename).unlink()

    image_paths = [Path(x) for x in pathex.get_image_paths(dirpath)]
    result = FacesetEnhancerSubprocessor(image_paths,
                                         output_dirpath,
                                         device_config=device_config).run()

    is_merge = io.input_bool(
        f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
    if is_merge:
        io.log_info(f"Copying processed files to {dirpath_parts}")

        for (filepath, output_filepath) in result:
            try:
                shutil.copy(output_filepath, filepath)
            except:
                pass

        io.log_info(f"Removing {output_dirpath_parts}")
        shutil.rmtree(output_dirpath)
Beispiel #3
0
def fetch_xseg(input_path):
    if not input_path.exists():
        raise ValueError(f'{input_path} not found. Please ensure it exists.')
    
    output_path = input_path.parent / (input_path.name + '_xseg')
    output_path.mkdir(exist_ok=True, parents=True)
    
    io.log_info(f'Copying faces containing XSeg polygons to {output_path.name}/ folder.')
    
    images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
    
    
    files_copied = []
    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        dflimg = DFLIMG.load(filepath)
        if dflimg is None or not dflimg.has_data():
            io.log_info(f'{filepath} is not a DFLIMG')
            continue
        
        ie_polys = dflimg.get_seg_ie_polys()

        if ie_polys.has_polys():
            files_copied.append(filepath)
            shutil.copy ( str(filepath), str(output_path / filepath.name) )
    
    io.log_info(f'Files copied: {len(files_copied)}')
    
    is_delete = io.input_bool (f"\r\nDelete original files?", True)
    if is_delete:
        for filepath in files_copied:
            Path(filepath).unlink()
Beispiel #4
0
def process_folder ( dirpath):
    
    image_size = io.input_int(f"New image size", 512, valid_range=[256,2048])


    output_dirpath = dirpath.parent / (dirpath.name + '_resized')
    output_dirpath.mkdir (exist_ok=True, parents=True)

    dirpath_parts = '/'.join( dirpath.parts[-2:])
    output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] )
    io.log_info (f"Resizing faceset in {dirpath_parts}")
    io.log_info ( f"Processing to {output_dirpath_parts}")

    output_images_paths = pathex.get_image_paths(output_dirpath)
    if len(output_images_paths) > 0:
        for filename in output_images_paths:
            Path(filename).unlink()

    image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )]
    result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size).run()

    is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
    if is_merge:
        io.log_info (f"Copying processed files to {dirpath_parts}")

        for (filepath, output_filepath) in result:
            try:
                shutil.copy (output_filepath, filepath)
            except:
                pass

        io.log_info (f"Removing {output_dirpath_parts}")
        shutil.rmtree(output_dirpath)
Beispiel #5
0
 def ask_random_flip(self):
     default_random_flip = self.load_or_def_option('random_flip', True)
     self.options['random_flip'] = io.input_bool(
         "Flip faces randomly",
         default_random_flip,
         help_message=
         "Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset."
     )
Beispiel #6
0
 def ask_random_dst_flip(self):
     default_random_dst_flip = self.load_or_def_option(
         'random_dst_flip', True)
     self.options['random_dst_flip'] = io.input_bool(
         "Flip DST faces randomly",
         default_random_dst_flip,
         help_message=
         "Random horizontal flip DST faceset. Makes generalization of src->dst better, if src random flip is not enabled."
     )
Beispiel #7
0
 def ask_enable_autobackup(self):
     default_autobackup = self.options[
         'autobackup'] = self.load_or_def_option('autobackup', False)
     self.options['autobackup'] = io.input_bool(
         f"Enable autobackup",
         default_autobackup,
         help_message=
         "Autobackup model files with preview every hour for last 15 hours. Latest backup located in model/<>_autobackups/01"
     )
Beispiel #8
0
 def ask_random_src_flip(self):
     default_random_src_flip = self.load_or_def_option(
         'random_src_flip', False)
     self.options['random_src_flip'] = io.input_bool(
         "Flip SRC faces randomly",
         default_random_src_flip,
         help_message=
         "Random horizontal flip SRC faceset. Covers more angles, but the face may look less naturally."
     )
Beispiel #9
0
    def ask_settings(self):
        s = """Choose mode: \n"""
        for key in mode_dict.keys():
            s += f"""({key}) {mode_dict[key]}\n"""
        io.log_info(s)
        mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) )

        self.mode = mode_dict.get (mode, self.default_mode )

        if 'raw' not in self.mode:
            if self.mode == 'hist-match':
                self.masked_hist_match = io.input_bool("Masked hist match?", True)

            if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255)

        if self.face_type == FaceType.FULL:
            s = """Choose mask mode: \n"""
            for key in full_face_mask_mode_dict.keys():
                s += f"""({key}) {full_face_mask_mode_dict[key]}\n"""
            io.log_info(s)

            self.mask_mode = io.input_int ("", 1, valid_list=full_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks.")
        else:
            s = """Choose mask mode: \n"""
            for key in half_face_mask_mode_dict.keys():
                s += f"""({key}) {half_face_mask_mode_dict[key]}\n"""
            io.log_info(s)
            self.mask_mode = io.input_int ("", 1, valid_list=half_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images.")

        if 'raw' not in self.mode:
            self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400)
            self.blur_mask_modifier =  np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400)
            self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100)

        self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", None, valid_list=list(ctm_str_dict.keys())[1:] )
            self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]

        super().ask_settings()
 
        self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100)

        if 'raw' not in self.mode:
            self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500)
            self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100)
            self.color_degrade_power = np.clip (  io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100)

        io.log_info ("")
Beispiel #10
0
    def on_initialize_options(self):
        ask_override = self.ask_override()

        if not self.is_first_run() and ask_override:
            if io.input_bool(f"Restart training?", False, help_message="Reset model weights and start training from scratch."):
                self.set_iter(0)

        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'wf')

        if self.is_first_run():
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Choose the same as your deepfake model.").lower()

        if self.is_first_run() or ask_override:
            self.ask_batch_size(4, range=[2,16])
Beispiel #11
0
    def on_initialize_options(self):
        ask_override = self.ask_override()

        if not self.is_first_run() and ask_override:
            if io.input_bool(
                    f"Restart training?",
                    False,
                    help_message=
                    "Reset model weights and start training from scratch."):
                self.set_iter(0)

        default_face_type = self.options[
            'face_type'] = self.load_or_def_option('face_type', 'wf')
        default_pretrain = self.options['pretrain'] = self.load_or_def_option(
            'pretrain', False)

        if self.is_first_run():
            self.options['face_type'] = io.input_str(
                "Face type",
                default_face_type, ['h', 'mf', 'f', 'wf', 'head'],
                help_message=
                "Half / mid face / full face / whole face / head. Choose the same as your deepfake model."
            ).lower()

        if self.is_first_run() or ask_override:
            self.ask_batch_size(4, range=[2, 16])
            self.options['pretrain'] = io.input_bool("Enable pretraining mode",
                                                     default_pretrain)

        if not self.is_exporting and (
                self.options['pretrain']
                and self.get_pretraining_data_path() is None):
            raise Exception("pretraining_data_path is not defined")

        self.pretrain_just_disabled = (default_pretrain == True
                                       and self.options['pretrain'] == False)
Beispiel #12
0
    def ask_settings(self):
        s = """Choose mode: \n"""
        for key in mode_dict.keys():
            s += f"""({key}) {mode_dict[key]}\n"""
        io.log_info(s)
        mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) )

        self.mode = mode_dict.get (mode, self.default_mode )

        if 'raw' not in self.mode:
            if self.mode == 'hist-match':
                self.masked_hist_match = io.input_bool("Masked hist match?", True)

            if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255)

        s = """Choose mask mode: \n"""
        for key in mask_mode_dict.keys():
            s += f"""({key}) {mask_mode_dict[key]}\n"""
        io.log_info(s)
        self.mask_mode = io.input_int ("", 4, valid_list=mask_mode_dict.keys() ) #

        if 'raw' not in self.mode:
            self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400)
            self.blur_mask_modifier =  np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400)
            self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100)

        self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", "rct", valid_list=list(ctm_str_dict.keys())[1:] )
            self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode] #

        super().ask_settings()

        self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100)

        if 'raw' not in self.mode:
            self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500)
            self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100)
            self.color_degrade_power = np.clip (  io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100)

        io.log_info ("")
Beispiel #13
0
def process_folder ( dirpath):
    
    image_size = io.input_int(f"New image size", 512, valid_range=[128,2048])
    
    face_type = io.input_str ("Change face type", 'same', ['h','mf','f','wf','head','same']).lower()
    if face_type == 'same':
        face_type = None
    else:
        face_type = {'h'  : FaceType.HALF,
                     'mf' : FaceType.MID_FULL,
                     'f'  : FaceType.FULL,
                     'wf' : FaceType.WHOLE_FACE,
                     'head' : FaceType.HEAD}[face_type]
                     

    output_dirpath = dirpath.parent / (dirpath.name + '_resized')
    output_dirpath.mkdir (exist_ok=True, parents=True)

    dirpath_parts = '/'.join( dirpath.parts[-2:])
    output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] )
    io.log_info (f"Resizing faceset in {dirpath_parts}")
    io.log_info ( f"Processing to {output_dirpath_parts}")

    output_images_paths = pathex.get_image_paths(output_dirpath)
    if len(output_images_paths) > 0:
        for filename in output_images_paths:
            Path(filename).unlink()

    image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )]
    result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size, face_type).run()

    is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
    if is_merge:
        io.log_info (f"Copying processed files to {dirpath_parts}")

        for (filepath, output_filepath) in result:
            try:
                shutil.copy (output_filepath, filepath)
            except:
                pass

        io.log_info (f"Removing {output_dirpath_parts}")
        shutil.rmtree(output_dirpath)
Beispiel #14
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()
        yn_str = {True: 'y', False: 'n'}

        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_target_iter()
            self.ask_batch_size(24)

        default_lr_dropout = self.options[
            'lr_dropout'] = self.load_or_def_option('lr_dropout', False)

        if self.is_first_run() or ask_override:
            self.options['lr_dropout'] = io.input_bool(
                "Use learning rate dropout",
                default_lr_dropout,
                help_message=
                "When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations."
            )
Beispiel #15
0
    def pack(samples_path):
        samples_dat_path = samples_path / packed_faceset_filename

        if samples_dat_path.exists():
            io.log_info(f"{samples_dat_path} : file already exists !")
            io.input("Press enter to continue and overwrite.")

        as_person_faceset = False
        dir_names = pathex.get_all_dir_names(samples_path)
        if len(dir_names) != 0:
            as_person_faceset = io.input_bool(f"{len(dir_names)} subdirectories found, process as person faceset?", True)

        if as_person_faceset:
            image_paths = []

            for dir_name in dir_names:
                image_paths += pathex.get_image_paths(samples_path / dir_name)
        else:
            image_paths = pathex.get_image_paths(samples_path)

        samples = samplelib.SampleLoader.load_face_samples(image_paths)
        samples_len = len(samples)

        samples_configs = []
        for sample in io.progress_bar_generator (samples, "Processing"):
            sample_filepath = Path(sample.filename)
            sample.filename = sample_filepath.name

            if as_person_faceset:
                sample.person_name = sample_filepath.parent.name
            samples_configs.append ( sample.get_config() )
        samples_bytes = pickle.dumps(samples_configs, 4)

        of = open(samples_dat_path, "wb")
        of.write ( struct.pack ("Q", PackedFaceset.VERSION ) )
        of.write ( struct.pack ("Q", len(samples_bytes) ) )
        of.write ( samples_bytes )

        del samples_bytes   #just free mem
        del samples_configs

        sample_data_table_offset = of.tell()
        of.write ( bytes( 8*(samples_len+1) ) ) #sample data offset table

        data_start_offset = of.tell()
        offsets = []

        for sample in io.progress_bar_generator(samples, "Packing"):
            try:
                if sample.person_name is not None:
                    sample_path = samples_path / sample.person_name / sample.filename
                else:
                    sample_path = samples_path / sample.filename


                with open(sample_path, "rb") as f:
                   b = f.read()

                offsets.append ( of.tell() - data_start_offset )
                of.write(b)
            except:
                raise Exception(f"error while processing sample {sample_path}")

        offsets.append ( of.tell() )

        of.seek(sample_data_table_offset, 0)
        for offset in offsets:
            of.write ( struct.pack("Q", offset) )
        of.seek(0,2)
        of.close()

        for filename in io.progress_bar_generator(image_paths, "Deleting files"):
            Path(filename).unlink()

        if as_person_faceset:
            for dir_name in io.progress_bar_generator(dir_names, "Deleting dirs"):
                dir_path = samples_path / dir_name
                try:
                    shutil.rmtree(dir_path)
                except:
                    io.log_info (f"unable to remove: {dir_path} ")
Beispiel #16
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4

        yn_str = {True:'y',False:'n'}
        min_res = 64
        max_res = 640

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 128)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'f')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)

        archi = self.load_or_def_option('archi', 'df')
        archi = {'dfuhd':'df-u','liaeuhd':'liae-u'}.get(archi, archi) #backward comp
        default_archi              = self.options['archi'] = archi

        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.options.get('d_dims', None)
        default_d_mask_dims        = self.options['d_mask_dims']        = self.options.get('d_mask_dims', None)
        default_masked_training    = self.options['masked_training']    = self.load_or_def_option('masked_training', True)
        default_eyes_prio          = self.options['eyes_prio']          = self.load_or_def_option('eyes_prio', False)
        default_uniform_yaw        = self.options['uniform_yaw']        = self.load_or_def_option('uniform_yaw', False)

        lr_dropout = self.load_or_def_option('lr_dropout', 'n')
        lr_dropout = {True:'y', False:'n'}.get(lr_dropout, lr_dropout) #backward comp
        default_lr_dropout         = self.options['lr_dropout'] = lr_dropout

        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_gan_power          = self.options['gan_power']          = self.load_or_def_option('gan_power', 0.0)
        default_true_face_power    = self.options['true_face_power']    = self.load_or_def_option('true_face_power', 0.0)
        default_face_style_power   = self.options['face_style_power']   = self.load_or_def_option('face_style_power', 0.0)
        default_bg_style_power     = self.options['bg_style_power']     = self.load_or_def_option('bg_style_power', 0.0)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']           = self.load_or_def_option('pretrain', False)

        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_flip()
            self.ask_batch_size(suggest_batch_size)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16 and 32 for -d archi.")
            resolution = np.clip ( (resolution // 16) * 16, min_res, max_res)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset.").lower()

            while True:
                archi = io.input_str ("AE architecture", default_archi, help_message=\
"""
'df' keeps more identity-preserved face.
'liae' can fix overly different face shapes.
'-u' increased likeness of the face.
'-d' (experimental) doubling the resolution using the same computation cost.
Examples: df, liae, df-d, df-ud, liae-ud, ...
""").lower()

                archi_split = archi.split('-')

                if len(archi_split) == 2:
                    archi_type, archi_opts = archi_split
                elif len(archi_split) == 1:
                    archi_type, archi_opts = archi_split[0], None
                else:
                    continue

                if archi_type not in ['df', 'liae']:
                    continue

                if archi_opts is not None:
                    if len(archi_opts) == 0:
                        continue
                    if len([ 1 for opt in archi_opts if opt not in ['u','d'] ]) != 0:
                        continue

                    if 'd' in archi_opts:
                        self.options['resolution'] = np.clip ( (self.options['resolution'] // 32) * 32, min_res, max_res)

                break
            self.options['archi'] = archi

        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', 64)

        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)

        if self.is_first_run():
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )

            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2

            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2

            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2

        if self.is_first_run() or ask_override:
            if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head':
                self.options['masked_training']  = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")

            self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
            self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')

        if self.is_first_run() or ask_override:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

            self.options['lr_dropout']  = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")

            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")

            self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Forces the neural network to learn small details of the face. Enable it only when the face is trained enough and don't disable. Typical value is 0.1"), 0.0, 10.0 )

            if 'df' in self.options['archi']:
                self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
            else:
                self.options['true_face_power'] = 0.0

            self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn the color of the predicted face to be the same as dst inside mask. If you want to use this option with 'whole_face' you have to use XSeg trained mask. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.001 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
            self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn the area outside mask of the predicted face to be the same as dst. If you want to use this option with 'whole_face' you have to use XSeg trained mask. For whole_face you have to use XSeg trained mask. This can make face more like dst. Enabling this option increases the chance of model collapse. Typical value is 2.0"), 0.0, 100.0 )

            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")

            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.")

        if self.options['pretrain'] and self.get_pretraining_data_path() is None:
            raise Exception("pretraining_data_path is not defined")

        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
Beispiel #17
0
def extract_umd_csv(input_file_csv, face_type='full_face', device_args={}):

    #extract faces from umdfaces.io dataset csv file with pitch,yaw,roll info.
    multi_gpu = device_args.get('multi_gpu', False)
    cpu_only = device_args.get('cpu_only', False)
    face_type = FaceType.fromString(face_type)

    input_file_csv_path = Path(input_file_csv)
    if not input_file_csv_path.exists():
        raise ValueError('input_file_csv not found. Please ensure it exists.')

    input_file_csv_root_path = input_file_csv_path.parent
    output_path = input_file_csv_path.parent / ('aligned_' +
                                                input_file_csv_path.name)

    io.log_info("Output dir is %s." % (str(output_path)))

    if output_path.exists():
        output_images_paths = pathex.get_image_paths(output_path)
        if len(output_images_paths) > 0:
            io.input_bool(
                "WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue."
                % (str(output_path)), False)
            for filename in output_images_paths:
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    try:
        with open(str(input_file_csv_path), 'r') as f:
            csv_file = f.read()
    except Exception as e:
        io.log_err("Unable to open or read file " + str(input_file_csv_path) +
                   ": " + str(e))
        return

    strings = csv_file.split('\n')
    keys = strings[0].split(',')
    keys_len = len(keys)
    csv_data = []
    for i in range(1, len(strings)):
        values = strings[i].split(',')
        if keys_len != len(values):
            io.log_err("Wrong string in csv file, skipping.")
            continue

        csv_data += [{keys[n]: values[n] for n in range(keys_len)}]

    data = []
    for d in csv_data:
        filename = input_file_csv_root_path / d['FILE']

        x, y, w, h = float(d['FACE_X']), float(d['FACE_Y']), float(
            d['FACE_WIDTH']), float(d['FACE_HEIGHT'])

        data += [
            ExtractSubprocessor.Data(filename=filename,
                                     rects=[[x, y, x + w, y + h]])
        ]

    images_found = len(data)
    faces_detected = 0
    if len(data) > 0:
        io.log_info("Performing 2nd pass from csv file...")
        data = ExtractSubprocessor(data,
                                   'landmarks',
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only).run()

        io.log_info('Performing 3rd pass...')
        data = ExtractSubprocessor(data,
                                   'final',
                                   face_type,
                                   None,
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only,
                                   manual=False,
                                   final_output_path=output_path).run()
        faces_detected += sum([d.faces_detected for d in data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #18
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    face_type='full_face',
    max_faces_from_image=None,
    image_size=None,
    jpeg_quality=None,
    cpu_only=False,
    force_gpu_idxs=None,
):

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if not output_path.exists():
        output_path.mkdir(parents=True, exist_ok=True)

    if face_type is not None:
        face_type = FaceType.fromString(face_type)

    if face_type is None:
        if manual_output_debug_fix:
            files = pathex.get_image_paths(output_path)
            if len(files) != 0:
                dflimg = DFLIMG.load(Path(files[0]))
                if dflimg is not None and dflimg.has_data():
                    face_type = FaceType.fromString(dflimg.get_face_type())

    input_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)
    output_images_paths = pathex.get_image_paths(output_path)
    output_debug_path = output_path.parent / (output_path.name + '_debug')

    continue_extraction = False
    if not manual_output_debug_fix and len(output_images_paths) > 0:
        if len(output_images_paths) > 128:
            continue_extraction = io.input_bool(
                "Continue extraction?",
                True,
                help_message=
                "Extraction can be continued, but you must specify the same options again."
            )

        if len(output_images_paths) > 128 and continue_extraction:
            try:
                input_image_paths = input_image_paths[
                    [Path(x).stem for x in input_image_paths].
                    index(Path(output_images_paths[-128]).stem.split('_')[0]):]
            except:
                io.log_err(
                    "Error in fetching the last index. Extraction cannot be continued."
                )
                return
        elif input_path != output_path:
            io.input(
                f"\n WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue.\n"
            )
            for filename in output_images_paths:
                Path(filename).unlink()

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    if face_type is None:
        face_type = io.input_str(
            "Face type",
            'wf', ['f', 'wf', 'head'],
            help_message=
            "Full face / whole face / head. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset."
        ).lower()
        face_type = {
            'f': FaceType.FULL,
            'wf': FaceType.WHOLE_FACE,
            'head': FaceType.HEAD
        }[face_type]

    if max_faces_from_image is None:
        max_faces_from_image = io.input_int(
            f"Max number of faces from image",
            0,
            help_message=
            "If you extract a src faceset that has frames with a large number of faces, it is advisable to set max faces to 3 to speed up extraction. 0 - unlimited"
        )

    if image_size is None:
        image_size = io.input_int(
            f"Image size",
            512 if face_type < FaceType.HEAD else 768,
            valid_range=[256, 2048],
            help_message=
            "Output image size. The higher image size, the worse face-enhancer works. Use higher than 512 value only if the source image is sharp enough and the face does not need to be enhanced."
        )

    if jpeg_quality is None:
        jpeg_quality = io.input_int(
            f"Jpeg quality",
            90,
            valid_range=[1, 100],
            help_message=
            "Jpeg quality. The higher jpeg quality the larger the output file size."
        )

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_debug:
        output_debug_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_image_paths = DeletedFilesSearcherSubprocessor(
                input_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_image_paths = sorted(input_image_paths)
            io.log_info('Found %d images.' % (len(input_image_paths)))
    else:
        if not continue_extraction and output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()

    images_found = len(input_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'landmarks-manual',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'all',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #19
0
 def ask_settings(self):
     self.add_source_image = io.input_bool(
         "Add source image?",
         False,
         help_message="Add source image for comparison.")
     super().ask_settings()
Beispiel #20
0
def sort_by_absdiff(input_path):
    io.log_info("Sorting by absolute difference...")

    is_sim = io.input_bool("Sort by similar?",
                           True,
                           help_message="Otherwise sort by dissimilar.")

    from core.leras import nn

    device_config = nn.ask_choose_device_idxs(choose_only_one=True,
                                              return_device_config=True)
    nn.initialize(device_config=device_config)
    tf = nn.tf

    image_paths = pathex.get_image_paths(input_path)
    image_paths_len = len(image_paths)

    batch_size = 1024
    batch_size_remain = image_paths_len % batch_size

    i_t = tf.placeholder(tf.float32, (None, 256, 256, 3))
    j_t = tf.placeholder(tf.float32, (None, 256, 256, 3))

    outputs_full = []
    outputs_remain = []

    for i in range(batch_size):
        diff_t = tf.reduce_sum(tf.abs(i_t - j_t[i]), axis=[1, 2, 3])
        outputs_full.append(diff_t)
        if i < batch_size_remain:
            outputs_remain.append(diff_t)

    def func_bs_full(i, j):
        return nn.tf_sess.run(outputs_full, feed_dict={i_t: i, j_t: j})

    def func_bs_remain(i, j):
        return nn.tf_sess.run(outputs_remain, feed_dict={i_t: i, j_t: j})

    import h5py
    db_file_path = Path(tempfile.gettempdir()) / 'sort_cache.hdf5'
    db_file = h5py.File(str(db_file_path), "w")
    db = db_file.create_dataset("results", (image_paths_len, image_paths_len),
                                compression="gzip")

    pg_len = image_paths_len // batch_size
    if batch_size_remain != 0:
        pg_len += 1

    pg_len = int((pg_len * pg_len - pg_len) / 2 + pg_len)

    io.progress_bar("Computing", pg_len)
    j = 0
    while j < image_paths_len:
        j_images = [cv2_imread(x) for x in image_paths[j:j + batch_size]]
        j_images_len = len(j_images)

        func = func_bs_remain if image_paths_len - j < batch_size else func_bs_full

        i = 0
        while i < image_paths_len:
            if i >= j:
                i_images = [
                    cv2_imread(x) for x in image_paths[i:i + batch_size]
                ]
                i_images_len = len(i_images)
                result = func(i_images, j_images)
                db[j:j + j_images_len, i:i + i_images_len] = np.array(result)
                io.progress_bar_inc(1)

            i += batch_size
        db_file.flush()
        j += batch_size

    io.progress_bar_close()

    next_id = 0
    sorted = [next_id]
    for i in io.progress_bar_generator(range(image_paths_len - 1), "Sorting"):
        id_ar = np.concatenate([db[:next_id, next_id], db[next_id, next_id:]])
        id_ar = np.argsort(id_ar)

        next_id = np.setdiff1d(id_ar, sorted, True)[0 if is_sim else -1]
        sorted += [next_id]
    db_file.close()
    db_file_path.unlink()

    img_list = [(image_paths[x], ) for x in sorted]
    return img_list, []
Beispiel #21
0
def dev_segmented_extract(input_dir, output_dir ):
    # extract and merge .json labelme files within the faces

    device_config = nn.DeviceConfig.GPUIndexes( nn.ask_choose_device_idxs(suggest_all_gpu=True) )

    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('input_dir not found. Please ensure it exists.')

    output_path = Path(output_dir)
    io.log_info("Performing extract segmented faces.")
    io.log_info(f'Output dir is {output_path}')

    if output_path.exists():
        output_images_paths = pathex.get_image_paths(output_path, subdirs=True)
        if len(output_images_paths) > 0:
            io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
            for filename in output_images_paths:
                Path(filename).unlink()
            shutil.rmtree(str(output_path))
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    images_paths = pathex.get_image_paths(input_path, subdirs=True, return_Path_class=True)

    extract_data = []
    images_jsons = {}
    images_processed = 0


    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        json_filepath = filepath.parent / (filepath.stem+'.json')

        if json_filepath.exists():
            try:
                json_dict = json.loads(json_filepath.read_text())
                images_jsons[filepath] = json_dict

                total_points = [ [x,y] for shape in json_dict['shapes'] for x,y in shape['points'] ]
                total_points = np.array(total_points)

                if len(total_points) == 0:
                    io.log_info(f"No points found in {json_filepath}, skipping.")
                    continue

                l,r = int(total_points[:,0].min()), int(total_points[:,0].max())
                t,b = int(total_points[:,1].min()), int(total_points[:,1].max())

                force_output_path=output_path / filepath.relative_to(input_path).parent
                force_output_path.mkdir(exist_ok=True, parents=True)

                extract_data.append ( ExtractSubprocessor.Data(filepath,
                                                               rects=[ [l,t,r,b] ],
                                                               force_output_path=force_output_path ) )
                images_processed += 1
            except:
                io.log_err(f"err {filepath}, {traceback.format_exc()}")
                return
        else:
            io.log_info(f"No .json file for {filepath.relative_to(input_path)}, skipping.")
            continue

    image_size = 1024
    face_type = FaceType.HEAD
    extract_data = ExtractSubprocessor (extract_data, 'landmarks', image_size, face_type, device_config=device_config).run()
    extract_data = ExtractSubprocessor (extract_data, 'final', image_size, face_type, device_config=device_config).run()

    for data in extract_data:
        filepath = data.force_output_path / (data.filepath.stem+'_0.jpg')

        dflimg = DFLIMG.load(filepath)
        image_to_face_mat = dflimg.get_image_to_face_mat()

        json_dict = images_jsons[data.filepath]

        ie_polys = IEPolys()
        for shape in json_dict['shapes']:
            ie_poly = ie_polys.add(1)

            points = np.array( [ [x,y] for x,y in shape['points'] ] )
            points = LandmarksProcessor.transform_points(points, image_to_face_mat)

            for x,y in points:
                ie_poly.add( int(x), int(y) )

        dflimg.embed_and_set (filepath, ie_polys=ie_polys)

    io.log_info(f"Images found:     {len(images_paths)}")
    io.log_info(f"Images processed: {images_processed}")
Beispiel #22
0
def main(model_class_name=None,
         saved_models_path=None,
         training_data_src_path=None,
         force_model_name=None,
         input_path=None,
         output_path=None,
         aligned_path=None,
         force_gpu_idxs=None,
         cpu_only=None):
    io.log_info("Running merger.\r\n")

    try:
        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not saved_models_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        is_interactive = io.input_bool("Use interactive merger?",
                                       True) if not io.is_colab() else False

        import models
        model = models.import_model(model_class_name)(
            is_training=False,
            saved_models_path=saved_models_path,
            training_data_src_path=training_data_src_path,
            force_gpu_idxs=force_gpu_idxs,
            cpu_only=cpu_only)
        merger_session_filepath = model.get_strpath_storage_for_file(
            'merger_session.dat')
        predictor_func, predictor_input_shape, cfg = model.get_MergerConfig()

        if not is_interactive:
            cfg.ask_settings()

        input_path_image_paths = pathex.get_image_paths(input_path)

        if cfg.type == MergerConfig.TYPE_MASKED:
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            packed_samples = None
            try:
                packed_samples = samplelib.PackedFaceset.load(aligned_path)
            except:
                io.log_err(
                    f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}"
                )

            if packed_samples is not None:
                io.log_info("Using packed faceset.")

                def generator():
                    for sample in io.progress_bar_generator(
                            packed_samples, "Collecting alignments"):
                        filepath = Path(sample.filename)
                        yield DFLIMG.load(
                            filepath,
                            loader_func=lambda x: sample.read_raw_file())
            else:

                def generator():
                    for filepath in io.progress_bar_generator(
                            pathex.get_image_paths(aligned_path),
                            "Collecting alignments"):
                        filepath = Path(filepath)
                        yield DFLIMG.load(filepath)

            alignments = {}
            multiple_faces_detected = False

            for dflimg in generator():
                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue

                source_filename = dflimg.get_source_filename()
                if source_filename is None or source_filename == "_":
                    continue

                source_filename = Path(source_filename)
                source_filename_stem = source_filename.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(dflimg.get_source_landmarks())
                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Strongly recommended to process them separately."
                )

            frames = [
                MergeSubprocessor.Frame(frame_info=FrameInfo(
                    filepath=Path(p),
                    landmarks_list=alignments.get(Path(p).stem, None)))
                for p in input_path_image_paths
            ]

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        elif cfg.type == MergerConfig.TYPE_FACE_AVATAR:
            filesdata = []
            for filepath in io.progress_bar_generator(input_path_image_paths,
                                                      "Collecting info"):
                filepath = Path(filepath)

                dflimg = DFLIMG.load(filepath)
                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue
                filesdata += [
                    (FrameInfo(filepath=filepath,
                               landmarks_list=[dflimg.get_landmarks()]),
                     dflimg.get_source_filename())
                ]

            filesdata = sorted(
                filesdata,
                key=operator.itemgetter(1))  #sort by source_filename
            frames = []
            filesdata_len = len(filesdata)
            for i in range(len(filesdata)):
                frame_info = filesdata[i][0]

                prev_temporal_frame_infos = []
                next_temporal_frame_infos = []

                for t in range(cfg.temporal_face_count):
                    prev_frame_info = filesdata[max(i - t, 0)][0]
                    next_frame_info = filesdata[min(i + t,
                                                    filesdata_len - 1)][0]

                    prev_temporal_frame_infos.insert(0, prev_frame_info)
                    next_temporal_frame_infos.append(next_frame_info)

                frames.append(
                    MergeSubprocessor.Frame(
                        prev_temporal_frame_infos=prev_temporal_frame_infos,
                        frame_info=frame_info,
                        next_temporal_frame_infos=next_temporal_frame_infos))

        if len(frames) == 0:
            io.log_info("No frames to merge in input_dir.")
        else:
            MergeSubprocessor(is_interactive=is_interactive,
                              merger_session_filepath=merger_session_filepath,
                              predictor_func=predictor_func,
                              predictor_input_shape=predictor_input_shape,
                              merger_config=cfg,
                              frames=frames,
                              frames_root_path=input_path,
                              output_path=output_path,
                              model_iter=model.get_iter()).run()

        model.finalize()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Beispiel #23
0
    def __init__(self, is_interactive, merger_session_filepath, predictor_func,
                 predictor_input_shape, merger_config, frames,
                 frames_root_path, output_path, model_iter):
        if len(frames) == 0:
            raise ValueError("len (frames) == 0")

        super().__init__('Merger',
                         MergeSubprocessor.Cli,
                         86400 if MERGER_DEBUG else 60,
                         io_loop_sleep_time=0.001)

        self.is_interactive = is_interactive
        self.merger_session_filepath = Path(merger_session_filepath)
        self.merger_config = merger_config

        self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_input_shape = predictor_input_shape

        self.face_enhancer = None

        def superres_func(mode, face_bgr):
            if mode == 1:
                if self.face_enhancer is None:
                    self.face_enhancer = FaceEnhancer(place_model_on_cpu=True)

                return self.face_enhancer.enhance(face_bgr,
                                                  is_tanh=True,
                                                  preserve_size=False)

        self.superres_host, self.superres_func = SubprocessFunctionCaller.make_pair(
            superres_func)

        self.fanseg_by_face_type = {}
        self.fanseg_input_size = 256

        def fanseg_extract_func(face_type, *args, **kwargs):
            fanseg = self.fanseg_by_face_type.get(face_type, None)
            if self.fanseg_by_face_type.get(face_type, None) is None:
                cpu_only = len(nn.getCurrentDeviceConfig().devices) == 0

                with nn.tf.device('/CPU:0' if cpu_only else '/GPU:0'):
                    fanseg = TernausNet("FANSeg",
                                        self.fanseg_input_size,
                                        FaceType.toString(face_type),
                                        place_model_on_cpu=True)

                self.fanseg_by_face_type[face_type] = fanseg
            return fanseg.extract(*args, **kwargs)

        self.fanseg_host, self.fanseg_extract_func = SubprocessFunctionCaller.make_pair(
            fanseg_extract_func)

        self.frames_root_path = frames_root_path
        self.output_path = output_path
        self.model_iter = model_iter

        self.prefetch_frame_count = self.process_count = min(
            6, multiprocessing.cpu_count())

        session_data = None
        if self.is_interactive and self.merger_session_filepath.exists():
            io.input_skip_pending()
            if io.input_bool("Use saved session?", True):
                try:
                    with open(str(self.merger_session_filepath), "rb") as f:
                        session_data = pickle.loads(f.read())

                except Exception as e:
                    pass

        self.frames = frames
        self.frames_idxs = [*range(len(self.frames))]
        self.frames_done_idxs = []

        if self.is_interactive and session_data is not None:
            # Loaded session data, check it
            s_frames = session_data.get('frames', None)
            s_frames_idxs = session_data.get('frames_idxs', None)
            s_frames_done_idxs = session_data.get('frames_done_idxs', None)
            s_model_iter = session_data.get('model_iter', None)

            frames_equal = (s_frames is not None) and \
                           (s_frames_idxs is not None) and \
                           (s_frames_done_idxs is not None) and \
                           (s_model_iter is not None) and \
                           (len(frames) == len(s_frames)) # frames count must match

            if frames_equal:
                for i in range(len(frames)):
                    frame = frames[i]
                    s_frame = s_frames[i]
                    # frames filenames must match
                    if frame.frame_info.filepath.name != s_frame.frame_info.filepath.name:
                        frames_equal = False
                    if not frames_equal:
                        break

            if frames_equal:
                io.log_info('Using saved session from ' +
                            '/'.join(self.merger_session_filepath.parts[-2:]))

                for frame in s_frames:
                    if frame.cfg is not None:
                        # recreate MergerConfig class using constructor with get_config() as dict params
                        # so if any new param will be added, old merger session will work properly
                        frame.cfg = frame.cfg.__class__(
                            **frame.cfg.get_config())

                self.frames = s_frames
                self.frames_idxs = s_frames_idxs
                self.frames_done_idxs = s_frames_done_idxs

                rewind_to_begin = len(
                    self.frames_idxs) == 0  # all frames are done?

                if self.model_iter != s_model_iter:
                    # model was more trained, recompute all frames
                    rewind_to_begin = True
                    for frame in self.frames:
                        frame.is_done = False

                if rewind_to_begin:
                    while len(self.frames_done_idxs) > 0:
                        prev_frame = self.frames[self.frames_done_idxs.pop()]
                        self.frames_idxs.insert(0, prev_frame.idx)

                if len(self.frames_idxs) != 0:
                    cur_frame = self.frames[self.frames_idxs[0]]
                    cur_frame.is_shown = False

            if not frames_equal:
                session_data = None

        if session_data is None:
            for filename in pathex.get_image_paths(
                    self.output_path):  #remove all images in output_path
                Path(filename).unlink()

            frames[0].cfg = self.merger_config.copy()

        for i in range(len(self.frames)):
            frame = self.frames[i]
            frame.idx = i
            frame.output_filepath = self.output_path / (
                frame.frame_info.filepath.stem + '.png')
Beispiel #24
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()
        
        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb
            
        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4
        
        yn_str = {True:'y',False:'n'}
        ask_override = self.ask_override()

        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_flip()
            self.ask_batch_size(suggest_batch_size)

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 128)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'f')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)
        default_archi              = self.options['archi']              = self.load_or_def_option('archi', 'dfhd')
        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', 64)
        
        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
        
        default_learn_mask         = self.options['learn_mask']         = self.load_or_def_option('learn_mask', True)
        default_lr_dropout         = self.options['lr_dropout']         = self.load_or_def_option('lr_dropout', False)
        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_true_face_training = self.options['true_face_training'] = self.load_or_def_option('true_face_training', False)
        default_face_style_power   = self.options['face_style_power']   = self.load_or_def_option('face_style_power', 0.0)
        default_bg_style_power     = self.options['bg_style_power']     = self.load_or_def_option('bg_style_power', 0.0)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']           = self.load_or_def_option('pretrain', False)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-256", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
            resolution = np.clip ( (resolution // 16) * 16, 64, 256)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f'], help_message="Half / mid face / full face. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face.").lower()

        if (self.is_first_run() or ask_override) and len(device_config.devices) == 1:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

        if self.is_first_run():
            self.options['archi'] = io.input_str ("AE architecture", default_archi, ['dfhd','liaehd','df','liae'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'hd' is heavyweight version for the best quality.").lower() #-s version is slower, but has decreased change to collapse.
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
            
            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2
            
            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2
            
            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
            
        if self.is_first_run() or ask_override:
            self.options['learn_mask']  = io.input_bool ("Learn mask", default_learn_mask, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case merger forced to use 'not predicted mask' that is not smooth as predicted.")
            self.options['lr_dropout']  = io.input_bool ("Use learning rate dropout", default_lr_dropout, help_message="When the face is trained enough, you can enable this option to get extra sharpness for less amount of iterations.")
            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness for less amount of iterations.")

            if 'df' in self.options['archi']:
                self.options['true_face_training'] = io.input_bool ("Enable 'true face' training", default_true_face_training, help_message="The result face will be more like src and will get extra sharpness. Enable it for last 10-20k iterations before conversion.")
            else:
                self.options['true_face_training'] = False

            self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
            self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn to transfer background around face. This can make face more like dst. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.")

        if self.options['pretrain'] and self.get_pretraining_data_path() is None:
            raise Exception("pretraining_data_path is not defined") 

        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
        
        if self.pretrain_just_disabled:
            self.set_iter(1)
Beispiel #25
0
    def on_initialize_options(self):
        device_config = nn.getCurrentDeviceConfig()

        lowest_vram = 2
        if len(device_config.devices) != 0:
            lowest_vram = device_config.devices.get_worst_device().total_mem_gb

        if lowest_vram >= 4:
            suggest_batch_size = 8
        else:
            suggest_batch_size = 4

        yn_str = {True:'y',False:'n'}

        default_resolution         = self.options['resolution']         = self.load_or_def_option('resolution', 128)
        default_face_type          = self.options['face_type']          = self.load_or_def_option('face_type', 'f')
        default_models_opt_on_gpu  = self.options['models_opt_on_gpu']  = self.load_or_def_option('models_opt_on_gpu', True)
        default_archi              = self.options['archi']              = self.load_or_def_option('archi', 'df')
        default_ae_dims            = self.options['ae_dims']            = self.load_or_def_option('ae_dims', 256)
        default_e_dims             = self.options['e_dims']             = self.load_or_def_option('e_dims', 64)
        default_d_dims             = self.options['d_dims']             = self.options.get('d_dims', None)
        default_d_mask_dims        = self.options['d_mask_dims']        = self.options.get('d_mask_dims', None)
        default_masked_training    = self.options['masked_training']    = self.load_or_def_option('masked_training', True)
        default_eyes_prio          = self.options['eyes_prio']          = self.load_or_def_option('eyes_prio', False)
        default_lr_dropout         = self.options['lr_dropout']         = self.load_or_def_option('lr_dropout', False)
        default_random_warp        = self.options['random_warp']        = self.load_or_def_option('random_warp', True)
        default_gan_power          = self.options['gan_power']          = self.load_or_def_option('gan_power', 0.0)
        default_true_face_power    = self.options['true_face_power']    = self.load_or_def_option('true_face_power', 0.0)
        default_face_style_power   = self.options['face_style_power']   = self.load_or_def_option('face_style_power', 0.0)
        default_bg_style_power     = self.options['bg_style_power']     = self.load_or_def_option('bg_style_power', 0.0)
        default_ct_mode            = self.options['ct_mode']            = self.load_or_def_option('ct_mode', 'none')
        default_clipgrad           = self.options['clipgrad']           = self.load_or_def_option('clipgrad', False)
        default_pretrain           = self.options['pretrain']           = self.load_or_def_option('pretrain', False)

        ask_override = self.ask_override()
        if self.is_first_run() or ask_override:
            self.ask_autobackup_hour()
            self.ask_write_preview_history()
            self.ask_target_iter()
            self.ask_random_flip()
            self.ask_batch_size(suggest_batch_size)

        if self.is_first_run():
            resolution = io.input_int("Resolution", default_resolution, add_info="64-512", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
            resolution = np.clip ( (resolution // 16) * 16, 64, 512)
            self.options['resolution'] = resolution
            self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf'], help_message="Half / mid face / full face / whole face. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead, but requires manual merge in Adobe After Effects.").lower()
            self.options['archi'] = io.input_str ("AE architecture", default_archi, ['df','liae','dfhd','liaehd'], help_message="'df' keeps faces more natural.\n'liae' can fix overly different face shapes.\n'hd' are experimental versions.").lower()

        default_d_dims             = 48 if self.options['archi'] == 'dfhd' else 64
        default_d_dims             = self.options['d_dims']             = self.load_or_def_option('d_dims', default_d_dims)

        default_d_mask_dims        = default_d_dims // 3
        default_d_mask_dims        += default_d_mask_dims % 2
        default_d_mask_dims        = self.options['d_mask_dims']        = self.load_or_def_option('d_mask_dims', default_d_mask_dims)

        if self.is_first_run():
            self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )

            e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['e_dims'] = e_dims + e_dims % 2


            d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
            self.options['d_dims'] = d_dims + d_dims % 2

            d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
            self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2

        if self.is_first_run() or ask_override:
            if self.options['face_type'] == 'wf':
                self.options['masked_training']  = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' type. Masked training clips training area to full_face mask, thus network will train the faces properly.  When the face is trained enough, disable this option to train all area of the frame. Merge with 'raw-rgb' mode, then use Adobe After Effects to manually mask and compose whole face include forehead.")
            
            self.options['eyes_prio']  = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
      
        if self.is_first_run() or ask_override:
            self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")

            self.options['lr_dropout']  = io.input_bool ("Use learning rate dropout", default_lr_dropout, help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations.")
            self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")

            self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Accelerates the speed of training. Forces the neural network to learn small details of the face. You can enable/disable this option at any time. Typical value is 1.0"), 0.0, 10.0 )

            if 'df' in self.options['archi']:
                self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
            else:
                self.options['true_face_power'] = 0.0

            if self.options['face_type'] != 'wf':
                self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.001 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
                self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn to transfer background around face. This can make face more like dst. Enabling this option increases the chance of model collapse. Typical value is 2.0"), 0.0, 100.0 )
                
            self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
            self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
            
            self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.")

        if self.options['pretrain'] and self.get_pretraining_data_path() is None:
            raise Exception("pretraining_data_path is not defined")

        self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
Beispiel #26
0
    def __init__(self,
                 is_interactive,
                 merger_session_filepath,
                 predictor_func,
                 predictor_input_shape,
                 face_enhancer_func,
                 xseg_256_extract_func,
                 merger_config,
                 frames,
                 frames_root_path,
                 output_path,
                 output_mask_path,
                 model_iter,
                 subprocess_count=4):
        if len(frames) == 0:
            raise ValueError("len (frames) == 0")

        super().__init__('Merger',
                         InteractiveMergerSubprocessor.Cli,
                         io_loop_sleep_time=0.001)

        self.is_interactive = is_interactive
        self.merger_session_filepath = Path(merger_session_filepath)
        self.merger_config = merger_config

        self.predictor_func = predictor_func
        self.predictor_input_shape = predictor_input_shape

        self.face_enhancer_func = face_enhancer_func
        self.xseg_256_extract_func = xseg_256_extract_func

        self.frames_root_path = frames_root_path
        self.output_path = output_path
        self.output_mask_path = output_mask_path
        self.model_iter = model_iter

        self.prefetch_frame_count = self.process_count = subprocess_count

        session_data = None
        if self.is_interactive and self.merger_session_filepath.exists():
            io.input_skip_pending()
            if io.input_bool("Use saved session?", True):
                try:
                    with open(str(self.merger_session_filepath), "rb") as f:
                        session_data = pickle.loads(f.read())

                except Exception as e:
                    pass

        rewind_to_frame_idx = None
        self.frames = frames
        self.frames_idxs = [*range(len(self.frames))]
        self.frames_done_idxs = []

        if self.is_interactive and session_data is not None:
            # Loaded session data, check it
            s_frames = session_data.get('frames', None)
            s_frames_idxs = session_data.get('frames_idxs', None)
            s_frames_done_idxs = session_data.get('frames_done_idxs', None)
            s_model_iter = session_data.get('model_iter', None)

            frames_equal = (s_frames is not None) and \
                           (s_frames_idxs is not None) and \
                           (s_frames_done_idxs is not None) and \
                           (s_model_iter is not None) and \
                           (len(frames) == len(s_frames)) # frames count must match

            if frames_equal:
                for i in range(len(frames)):
                    frame = frames[i]
                    s_frame = s_frames[i]
                    # frames filenames must match
                    if frame.frame_info.filepath.name != s_frame.frame_info.filepath.name:
                        frames_equal = False
                    if not frames_equal:
                        break

            if frames_equal:
                io.log_info('Using saved session from ' +
                            '/'.join(self.merger_session_filepath.parts[-2:]))

                for frame in s_frames:
                    if frame.cfg is not None:
                        # recreate MergerConfig class using constructor with get_config() as dict params
                        # so if any new param will be added, old merger session will work properly
                        frame.cfg = frame.cfg.__class__(
                            **frame.cfg.get_config())

                self.frames = s_frames
                self.frames_idxs = s_frames_idxs
                self.frames_done_idxs = s_frames_done_idxs

                if self.model_iter != s_model_iter:
                    # model was more trained, recompute all frames
                    rewind_to_frame_idx = -1
                    for frame in self.frames:
                        frame.is_done = False
                elif len(self.frames_idxs) == 0:
                    # all frames are done?
                    rewind_to_frame_idx = -1

                if len(self.frames_idxs) != 0:
                    cur_frame = self.frames[self.frames_idxs[0]]
                    cur_frame.is_shown = False

            if not frames_equal:
                session_data = None

        if session_data is None:
            for filename in pathex.get_image_paths(
                    self.output_path):  #remove all images in output_path
                Path(filename).unlink()

            for filename in pathex.get_image_paths(
                    self.output_mask_path
            ):  #remove all images in output_mask_path
                Path(filename).unlink()

            frames[0].cfg = self.merger_config.copy()

        for i in range(len(self.frames)):
            frame = self.frames[i]
            frame.idx = i
            frame.output_filepath = self.output_path / (
                frame.frame_info.filepath.stem + '.png')
            frame.output_mask_filepath = self.output_mask_path / (
                frame.frame_info.filepath.stem + '.png')

            if not frame.output_filepath.exists() or \
               not frame.output_mask_filepath.exists():
                # if some frame does not exist, recompute and rewind
                frame.is_done = False
                frame.is_shown = False

                if rewind_to_frame_idx is None:
                    rewind_to_frame_idx = i - 1
                else:
                    rewind_to_frame_idx = min(rewind_to_frame_idx, i - 1)

        if rewind_to_frame_idx is not None:
            while len(self.frames_done_idxs) > 0:
                if self.frames_done_idxs[-1] > rewind_to_frame_idx:
                    prev_frame = self.frames[self.frames_done_idxs.pop()]
                    self.frames_idxs.insert(0, prev_frame.idx)
                else:
                    break
Beispiel #27
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    image_size=256,
    face_type='full_face',
    max_faces_from_image=0,
    cpu_only=False,
    force_gpu_idxs=None,
):
    face_type = FaceType.fromString(face_type)

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_debug_path = output_path.parent / (output_path.name + '_debug')

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            output_images_paths = pathex.get_image_paths(output_path)
            if len(output_images_paths) > 0:
                io.input(
                    f"WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue."
                )
                for filename in output_images_paths:
                    Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    input_path_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
            io.log_info('Found %d images.' % (len(input_path_image_paths)))
    else:
        if output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()
        else:
            output_debug_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'landmarks-manual',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'all',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #28
0
def main(model_class_name=None,
         saved_models_path=None,
         training_data_src_path=None,
         force_model_name=None,
         input_path=None,
         output_path=None,
         output_mask_path=None,
         aligned_path=None,
         force_gpu_idxs=None,
         cpu_only=None):
    io.log_info("Running merger.\r\n")

    try:
        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not output_mask_path.exists():
            output_mask_path.mkdir(parents=True, exist_ok=True)

        if not saved_models_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        # Initialize model
        import models
        model = models.import_model(model_class_name)(
            is_training=False,
            saved_models_path=saved_models_path,
            force_gpu_idxs=force_gpu_idxs,
            cpu_only=cpu_only)

        predictor_func, predictor_input_shape, cfg = model.get_MergerConfig()

        # Preparing MP functions
        predictor_func = MPFunc(predictor_func)

        run_on_cpu = len(nn.getCurrentDeviceConfig().devices) == 0
        xseg_256_extract_func = MPClassFuncOnDemand(
            XSegNet,
            'extract',
            name='XSeg',
            resolution=256,
            weights_file_root=saved_models_path,
            place_model_on_cpu=True,
            run_on_cpu=run_on_cpu)

        face_enhancer_func = MPClassFuncOnDemand(FaceEnhancer,
                                                 'enhance',
                                                 place_model_on_cpu=True,
                                                 run_on_cpu=run_on_cpu)

        is_interactive = io.input_bool("Use interactive merger?",
                                       True) if not io.is_colab() else False

        #         if not is_interactive:
        #             cfg.ask_settings()

        subprocess_count = multiprocessing.cpu_count()
        #         subprocess_count = io.input_int("Number of workers?", max(8, multiprocessing.cpu_count()),
        #                                         valid_range=[1, multiprocessing.cpu_count()], help_message="Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores." )

        input_path_image_paths = pathex.get_image_paths(input_path)

        if cfg.type == MergerConfig.TYPE_MASKED:
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            packed_samples = None
            try:
                packed_samples = samplelib.PackedFaceset.load(aligned_path)
            except:
                io.log_err(
                    f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}"
                )

            if packed_samples is not None:
                io.log_info("Using packed faceset.")

                def generator():
                    for sample in io.progress_bar_generator(
                            packed_samples, "Collecting alignments"):
                        filepath = Path(sample.filename)
                        yield filepath, DFLIMG.load(
                            filepath,
                            loader_func=lambda x: sample.read_raw_file())
            else:

                def generator():
                    for filepath in io.progress_bar_generator(
                            pathex.get_image_paths(aligned_path),
                            "Collecting alignments"):
                        filepath = Path(filepath)
                        yield filepath, DFLIMG.load(filepath)

            alignments = {}
            multiple_faces_detected = False

            for filepath, dflimg in generator():
                if dflimg is None or not dflimg.has_data():
                    io.log_err(f"{filepath.name} is not a dfl image file")
                    continue

                source_filename = dflimg.get_source_filename()
                if source_filename is None:
                    continue

                source_filepath = Path(source_filename)
                source_filename_stem = source_filepath.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(
                    (dflimg.get_source_landmarks(), filepath, source_filepath))

                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info("")
                io.log_info(
                    "Warning: multiple faces detected. Only one alignment file should refer one source file."
                )
                io.log_info("")

            for a_key in list(alignments.keys()):
                a_ar = alignments[a_key]
                if len(a_ar) > 1:
                    for _, filepath, source_filepath in a_ar:
                        io.log_info(
                            f"alignment {filepath.name} refers to {source_filepath.name} "
                        )
                    io.log_info("")

                alignments[a_key] = [a[0] for a in a_ar]

            if multiple_faces_detected:
                io.log_info(
                    "It is strongly recommended to process the faces separatelly."
                )
                io.log_info(
                    "Use 'recover original filename' to determine the exact duplicates."
                )
                io.log_info("")

            frames = [
                InteractiveMergerSubprocessor.Frame(frame_info=FrameInfo(
                    filepath=Path(p),
                    landmarks_list=alignments.get(Path(p).stem, None)))
                for p in input_path_image_paths
            ]

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
                io.log_info("")
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        if len(frames) == 0:
            io.log_info("No frames to merge in input_dir.")
        else:
            if False:
                pass
            else:
                InteractiveMergerSubprocessor(
                    is_interactive=is_interactive,
                    merger_session_filepath=model.get_strpath_storage_for_file(
                        'merger_session.dat'),
                    predictor_func=predictor_func,
                    predictor_input_shape=predictor_input_shape,
                    face_enhancer_func=face_enhancer_func,
                    xseg_256_extract_func=xseg_256_extract_func,
                    merger_config=cfg,
                    frames=frames,
                    frames_root_path=input_path,
                    output_path=output_path,
                    output_mask_path=output_mask_path,
                    model_iter=model.get_iter(),
                    subprocess_count=subprocess_count,
                ).run()

        model.finalize()

    except Exception as e:
        print(traceback.format_exc())
Beispiel #29
0
def video_from_sequence_(input_dir,
                         output_file,
                         reference_file=None,
                         ext=None,
                         fps=None,
                         bitrate=None,
                         include_audio=False,
                         lossless=None):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(
        reference_file) if reference_file is not None else None

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = io.input_str("Input image format (extension)", "png")

    if lossless is None:
        lossless = io.input_bool("Use lossless codec", False)

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == '.*':
            reference_file_path = pathex.get_first_file_by_stem(
                reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            io.log_err("reference_file not found.")
            return

        #probing reference file
        probe = ffmpeg.probe(str(reference_file_path))

        #getting first video and audio streams id with fps
        for stream in probe['streams']:
            if video_id is None and stream['codec_type'] == 'video':
                video_id = stream['index']
                fps = stream['r_frame_rate']

            if audio_id is None and stream['codec_type'] == 'audio':
                audio_id = stream['index']

        if audio_id is not None:
            #has audio track
            ref_in_a = ffmpeg.input(str(reference_file_path))[str(audio_id)]

    if fps is None:
        #if fps not specified and not overwritten by reference-file
        fps = max(1, io.input_int("Enter FPS", 25))

    if not lossless and bitrate is None:
        bitrate = 1  #max (1, settings.bitrate)#io.input_int ("Bitrate of output file in MB/s", 16)

    input_image_paths = pathex.get_image_paths(input_path)

    i_in = ffmpeg.input('pipe:', format='image2pipe', r=fps)

    output_args = [i_in]

    if include_audio and ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str(output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update({
            "c:v": "libx264",
            "crf": "0",
            "pix_fmt": "yuv420p",
        })
    else:
        output_kwargs.update({
            "c:v": "libx264",
            "b:v": "%dM" % (1),
            "pix_fmt": "yuv420p",
        })

    if include_audio and ref_in_a is not None:
        output_kwargs.update({
            "c:a": "aac",
            "b:a": "192k",
            "ar": "48000",
            "strict": "experimental"
        })

    job = (ffmpeg.output(*output_args, **output_kwargs).overwrite_output())

    try:
        job_run = job.run_async(pipe_stdin=True)

        for image_path in input_image_paths:
            with open(image_path, "rb") as f:
                image_bytes = f.read()
                job_run.stdin.write(image_bytes)

        job_run.stdin.close()
        job_run.wait()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #30
0
def dev_test_68(input_dir ):
    # process 68 landmarks dataset with .pts files
    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('input_dir not found. Please ensure it exists.')

    output_path = input_path.parent / (input_path.name+'_aligned')

    io.log_info(f'Output dir is % {output_path}')

    if output_path.exists():
        output_images_paths = pathex.get_image_paths(output_path)
        if len(output_images_paths) > 0:
            io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
            for filename in output_images_paths:
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    images_paths = pathex.get_image_paths(input_path)

    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        filepath = Path(filepath)


        pts_filepath = filepath.parent / (filepath.stem+'.pts')
        if pts_filepath.exists():
            pts = pts_filepath.read_text()
            pts_lines = pts.split('\n')

            lmrk_lines = None
            for pts_line in pts_lines:
                if pts_line == '{':
                    lmrk_lines = []
                elif pts_line == '}':
                    break
                else:
                    if lmrk_lines is not None:
                        lmrk_lines.append (pts_line)

            if lmrk_lines is not None and len(lmrk_lines) == 68:
                try:
                    lmrks = [ np.array ( lmrk_line.strip().split(' ') ).astype(np.float32).tolist() for lmrk_line in lmrk_lines]
                except Exception as e:
                    print(e)
                    print(filepath)
                    continue

                rect = LandmarksProcessor.get_rect_from_landmarks(lmrks)

                output_filepath = output_path / (filepath.stem+'.jpg')

                img = cv2_imread(filepath)
                img = imagelib.normalize_channels(img, 3)
                cv2_imwrite(output_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95] )

                DFLJPG.embed_data(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
                                                landmarks=lmrks,
                                                source_filename=filepath.name,
                                                source_rect=rect,
                                                source_landmarks=lmrks
                                    )

    io.log_info("Done.")