def process_folder(dirpath, cpu_only=False, force_gpu_idxs=None):
    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_dirpath = dirpath.parent / (dirpath.name + '_enhanced')
    output_dirpath.mkdir(exist_ok=True, parents=True)

    dirpath_parts = '/'.join(dirpath.parts[-2:])
    output_dirpath_parts = '/'.join(output_dirpath.parts[-2:])
    io.log_info(f"Enhancing faceset in {dirpath_parts}")
    io.log_info(f"Processing to {output_dirpath_parts}")

    output_images_paths = pathex.get_image_paths(output_dirpath)
    if len(output_images_paths) > 0:
        for filename in output_images_paths:
            Path(filename).unlink()

    image_paths = [Path(x) for x in pathex.get_image_paths(dirpath)]
    result = FacesetEnhancerSubprocessor(image_paths,
                                         output_dirpath,
                                         device_config=device_config).run()

    is_merge = io.input_bool(
        f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
    if is_merge:
        io.log_info(f"Copying processed files to {dirpath_parts}")

        for (filepath, output_filepath) in result:
            try:
                shutil.copy(output_filepath, filepath)
            except:
                pass

        io.log_info(f"Removing {output_dirpath_parts}")
        shutil.rmtree(output_dirpath)
示例#2
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    image_size=256,
    face_type='full_face',
    max_faces_from_image=0,
    cpu_only=False,
    force_gpu_idxs=None,
):
    face_type = FaceType.fromString(face_type)

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_debug_path = output_path.parent / (output_path.name + '_debug')

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            output_images_paths = pathex.get_image_paths(output_path)
            if len(output_images_paths) > 0:
                io.input(
                    f"WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue."
                )
                for filename in output_images_paths:
                    Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    input_path_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
            io.log_info('Found %d images.' % (len(input_path_image_paths)))
    else:
        if output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()
        else:
            output_debug_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'landmarks-manual',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'all',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
示例#3
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    face_type='full_face',
    max_faces_from_image=None,
    image_size=None,
    jpeg_quality=None,
    cpu_only=False,
    force_gpu_idxs=None,
):

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if not output_path.exists():
        output_path.mkdir(parents=True, exist_ok=True)

    if face_type is not None:
        face_type = FaceType.fromString(face_type)

    if face_type is None:
        if manual_output_debug_fix:
            files = pathex.get_image_paths(output_path)
            if len(files) != 0:
                dflimg = DFLIMG.load(Path(files[0]))
                if dflimg is not None and dflimg.has_data():
                    face_type = FaceType.fromString(dflimg.get_face_type())

    input_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)
    output_images_paths = pathex.get_image_paths(output_path)
    output_debug_path = output_path.parent / (output_path.name + '_debug')

    continue_extraction = False
    if not manual_output_debug_fix and len(output_images_paths) > 0:
        if len(output_images_paths) > 128:
            continue_extraction = io.input_bool(
                "Continue extraction?",
                True,
                help_message=
                "Extraction can be continued, but you must specify the same options again."
            )

        if len(output_images_paths) > 128 and continue_extraction:
            try:
                input_image_paths = input_image_paths[
                    [Path(x).stem for x in input_image_paths].
                    index(Path(output_images_paths[-128]).stem.split('_')[0]):]
            except:
                io.log_err(
                    "Error in fetching the last index. Extraction cannot be continued."
                )
                return
        elif input_path != output_path:
            io.input(
                f"\n WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue.\n"
            )
            for filename in output_images_paths:
                Path(filename).unlink()

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    if face_type is None:
        face_type = io.input_str(
            "Face type",
            'wf', ['f', 'wf', 'head'],
            help_message=
            "Full face / whole face / head. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset."
        ).lower()
        face_type = {
            'f': FaceType.FULL,
            'wf': FaceType.WHOLE_FACE,
            'head': FaceType.HEAD
        }[face_type]

    if max_faces_from_image is None:
        max_faces_from_image = io.input_int(
            f"Max number of faces from image",
            0,
            help_message=
            "If you extract a src faceset that has frames with a large number of faces, it is advisable to set max faces to 3 to speed up extraction. 0 - unlimited"
        )

    if image_size is None:
        image_size = io.input_int(
            f"Image size",
            512 if face_type < FaceType.HEAD else 768,
            valid_range=[256, 2048],
            help_message=
            "Output image size. The higher image size, the worse face-enhancer works. Use higher than 512 value only if the source image is sharp enough and the face does not need to be enhanced."
        )

    if jpeg_quality is None:
        jpeg_quality = io.input_int(
            f"Jpeg quality",
            90,
            valid_range=[1, 100],
            help_message=
            "Jpeg quality. The higher jpeg quality the larger the output file size."
        )

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_debug:
        output_debug_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_image_paths = DeletedFilesSearcherSubprocessor(
                input_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_image_paths = sorted(input_image_paths)
            io.log_info('Found %d images.' % (len(input_image_paths)))
    else:
        if not continue_extraction and output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()

    images_found = len(input_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'landmarks-manual',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'all',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
示例#4
0
def sort_by_absdiff(input_path):
    io.log_info("Sorting by absolute difference...")

    is_sim = io.input_bool("Sort by similar?",
                           True,
                           help_message="Otherwise sort by dissimilar.")

    from core.leras import nn

    device_config = nn.ask_choose_device_idxs(choose_only_one=True,
                                              return_device_config=True)
    nn.initialize(device_config=device_config)
    tf = nn.tf

    image_paths = pathex.get_image_paths(input_path)
    image_paths_len = len(image_paths)

    batch_size = 1024
    batch_size_remain = image_paths_len % batch_size

    i_t = tf.placeholder(tf.float32, (None, 256, 256, 3))
    j_t = tf.placeholder(tf.float32, (None, 256, 256, 3))

    outputs_full = []
    outputs_remain = []

    for i in range(batch_size):
        diff_t = tf.reduce_sum(tf.abs(i_t - j_t[i]), axis=[1, 2, 3])
        outputs_full.append(diff_t)
        if i < batch_size_remain:
            outputs_remain.append(diff_t)

    def func_bs_full(i, j):
        return nn.tf_sess.run(outputs_full, feed_dict={i_t: i, j_t: j})

    def func_bs_remain(i, j):
        return nn.tf_sess.run(outputs_remain, feed_dict={i_t: i, j_t: j})

    import h5py
    db_file_path = Path(tempfile.gettempdir()) / 'sort_cache.hdf5'
    db_file = h5py.File(str(db_file_path), "w")
    db = db_file.create_dataset("results", (image_paths_len, image_paths_len),
                                compression="gzip")

    pg_len = image_paths_len // batch_size
    if batch_size_remain != 0:
        pg_len += 1

    pg_len = int((pg_len * pg_len - pg_len) / 2 + pg_len)

    io.progress_bar("Computing", pg_len)
    j = 0
    while j < image_paths_len:
        j_images = [cv2_imread(x) for x in image_paths[j:j + batch_size]]
        j_images_len = len(j_images)

        func = func_bs_remain if image_paths_len - j < batch_size else func_bs_full

        i = 0
        while i < image_paths_len:
            if i >= j:
                i_images = [
                    cv2_imread(x) for x in image_paths[i:i + batch_size]
                ]
                i_images_len = len(i_images)
                result = func(i_images, j_images)
                db[j:j + j_images_len, i:i + i_images_len] = np.array(result)
                io.progress_bar_inc(1)

            i += batch_size
        db_file.flush()
        j += batch_size

    io.progress_bar_close()

    next_id = 0
    sorted = [next_id]
    for i in io.progress_bar_generator(range(image_paths_len - 1), "Sorting"):
        id_ar = np.concatenate([db[:next_id, next_id], db[next_id, next_id:]])
        id_ar = np.argsort(id_ar)

        next_id = np.setdiff1d(id_ar, sorted, True)[0 if is_sim else -1]
        sorted += [next_id]
    db_file.close()
    db_file_path.unlink()

    img_list = [(image_paths[x], ) for x in sorted]
    return img_list, []
示例#5
0
def dev_segmented_extract(input_dir, output_dir ):
    # extract and merge .json labelme files within the faces

    device_config = nn.DeviceConfig.GPUIndexes( nn.ask_choose_device_idxs(suggest_all_gpu=True) )

    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('input_dir not found. Please ensure it exists.')

    output_path = Path(output_dir)
    io.log_info("Performing extract segmented faces.")
    io.log_info(f'Output dir is {output_path}')

    if output_path.exists():
        output_images_paths = pathex.get_image_paths(output_path, subdirs=True)
        if len(output_images_paths) > 0:
            io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
            for filename in output_images_paths:
                Path(filename).unlink()
            shutil.rmtree(str(output_path))
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    images_paths = pathex.get_image_paths(input_path, subdirs=True, return_Path_class=True)

    extract_data = []
    images_jsons = {}
    images_processed = 0


    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        json_filepath = filepath.parent / (filepath.stem+'.json')

        if json_filepath.exists():
            try:
                json_dict = json.loads(json_filepath.read_text())
                images_jsons[filepath] = json_dict

                total_points = [ [x,y] for shape in json_dict['shapes'] for x,y in shape['points'] ]
                total_points = np.array(total_points)

                if len(total_points) == 0:
                    io.log_info(f"No points found in {json_filepath}, skipping.")
                    continue

                l,r = int(total_points[:,0].min()), int(total_points[:,0].max())
                t,b = int(total_points[:,1].min()), int(total_points[:,1].max())

                force_output_path=output_path / filepath.relative_to(input_path).parent
                force_output_path.mkdir(exist_ok=True, parents=True)

                extract_data.append ( ExtractSubprocessor.Data(filepath,
                                                               rects=[ [l,t,r,b] ],
                                                               force_output_path=force_output_path ) )
                images_processed += 1
            except:
                io.log_err(f"err {filepath}, {traceback.format_exc()}")
                return
        else:
            io.log_info(f"No .json file for {filepath.relative_to(input_path)}, skipping.")
            continue

    image_size = 1024
    face_type = FaceType.HEAD
    extract_data = ExtractSubprocessor (extract_data, 'landmarks', image_size, face_type, device_config=device_config).run()
    extract_data = ExtractSubprocessor (extract_data, 'final', image_size, face_type, device_config=device_config).run()

    for data in extract_data:
        filepath = data.force_output_path / (data.filepath.stem+'_0.jpg')

        dflimg = DFLIMG.load(filepath)
        image_to_face_mat = dflimg.get_image_to_face_mat()

        json_dict = images_jsons[data.filepath]

        ie_polys = IEPolys()
        for shape in json_dict['shapes']:
            ie_poly = ie_polys.add(1)

            points = np.array( [ [x,y] for x,y in shape['points'] ] )
            points = LandmarksProcessor.transform_points(points, image_to_face_mat)

            for x,y in points:
                ie_poly.add( int(x), int(y) )

        dflimg.embed_and_set (filepath, ie_polys=ie_polys)

    io.log_info(f"Images found:     {len(images_paths)}")
    io.log_info(f"Images processed: {images_processed}")
示例#6
0
    def __init__(self, is_training=False,
                       saved_models_path=None,
                       training_data_src_path=None,
                       training_data_dst_path=None,
                       pretraining_data_path=None,
                       pretrained_model_path=None,
                       no_preview=False,
                       force_model_name=None,
                       force_gpu_idxs=None,
                       cpu_only=False,
                       debug=False,
                       force_model_class_name=None,
                       **kwargs):
        self.is_training = is_training
        self.saved_models_path = saved_models_path
        self.training_data_src_path = training_data_src_path
        self.training_data_dst_path = training_data_dst_path
        self.pretraining_data_path = pretraining_data_path
        self.pretrained_model_path = pretrained_model_path
        self.no_preview = no_preview
        self.debug = debug

        self.model_class_name = model_class_name = Path(inspect.getmodule(self).__file__).parent.name.rsplit("_", 1)[1]

        if force_model_class_name is None:
            if force_model_name is not None:
                self.model_name = force_model_name
            else:
                while True:
                    # gather all model dat files
                    saved_models_names = []
                    for filepath in pathex.get_file_paths(saved_models_path):
                        filepath_name = filepath.name
                        if filepath_name.endswith(f'{model_class_name}_data.dat'):
                            saved_models_names += [ (filepath_name.split('_')[0], os.path.getmtime(filepath)) ]

                    # sort by modified datetime
                    saved_models_names = sorted(saved_models_names, key=operator.itemgetter(1), reverse=True )
                    saved_models_names = [ x[0] for x in saved_models_names ]

                    if len(saved_models_names) != 0:
                        io.log_info ("Choose one of saved models, or enter a name to create a new model.")
                        io.log_info ("[r] : rename")
                        io.log_info ("[d] : delete")
                        io.log_info ("")
                        for i, model_name in enumerate(saved_models_names):
                            s = f"[{i}] : {model_name} "
                            if i == 0:
                                s += "- latest"
                            io.log_info (s)

                        inp = io.input_str(f"", "0", show_default_value=False )
                        model_idx = -1
                        try:
                            model_idx = np.clip ( int(inp), 0, len(saved_models_names)-1 )
                        except:
                            pass

                        if model_idx == -1:
                            if len(inp) == 1:
                                is_rename = inp[0] == 'r'
                                is_delete = inp[0] == 'd'

                                if is_rename or is_delete:
                                    if len(saved_models_names) != 0:

                                        if is_rename:
                                            name = io.input_str(f"Enter the name of the model you want to rename")
                                        elif is_delete:
                                            name = io.input_str(f"Enter the name of the model you want to delete")

                                        if name in saved_models_names:

                                            if is_rename:
                                                new_model_name = io.input_str(f"Enter new name of the model")

                                            for filepath in pathex.get_paths(saved_models_path):
                                                filepath_name = filepath.name

                                                model_filename, remain_filename = filepath_name.split('_', 1)
                                                if model_filename == name:

                                                    if is_rename:
                                                        new_filepath = filepath.parent / ( new_model_name + '_' + remain_filename )
                                                        filepath.rename (new_filepath)
                                                    elif is_delete:
                                                        filepath.unlink()
                                    continue

                            self.model_name = inp
                        else:
                            self.model_name = saved_models_names[model_idx]

                    else:
                        self.model_name = io.input_str(f"No saved models found. Enter a name of a new model", "new")
                        self.model_name = self.model_name.replace('_', ' ')
                    break

        
            self.model_name = self.model_name + '_' + self.model_class_name
        else:
            self.model_name = force_model_class_name

        self.iter = 0
        self.options = {}
        self.loss_history = []
        self.sample_for_preview = None
        self.choosed_gpu_indexes = None

        model_data = {}
        self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
        if self.model_data_path.exists():
            io.log_info (f"Loading {self.model_name} model...")
            model_data = pickle.loads ( self.model_data_path.read_bytes() )
            self.iter = model_data.get('iter',0)
            if self.iter != 0:
                self.options = model_data['options']
                self.loss_history = model_data.get('loss_history', [])
                self.sample_for_preview = model_data.get('sample_for_preview', None)
                self.choosed_gpu_indexes = model_data.get('choosed_gpu_indexes', None)

        if self.is_first_run():
            io.log_info ("\nModel first run.")

        self.device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(suggest_best_multi_gpu=True)) \
                             if not cpu_only else nn.DeviceConfig.CPU()

        nn.initialize(self.device_config)

        ####
        self.default_options_path = saved_models_path / f'{self.model_class_name}_default_options.dat'
        self.default_options = {}
        if self.default_options_path.exists():
            try:
                self.default_options = pickle.loads ( self.default_options_path.read_bytes() )
            except:
                pass

        self.choose_preview_history = False
        self.batch_size = self.load_or_def_option('batch_size', 1)
        #####

        io.input_skip_pending()
        self.on_initialize_options()

        if self.is_first_run():
            # save as default options only for first run model initialize
            self.default_options_path.write_bytes( pickle.dumps (self.options) )

        self.autobackup_hour = self.options.get('autobackup_hour', 0)
        self.write_preview_history = self.options.get('write_preview_history', False)
        self.target_iter = self.options.get('target_iter',0)
        self.random_flip = self.options.get('random_flip',True)

        self.on_initialize()
        self.options['batch_size'] = self.batch_size

        if self.is_training:
            self.preview_history_path = self.saved_models_path / ( f'{self.get_model_name()}_history' )
            self.autobackups_path     = self.saved_models_path / ( f'{self.get_model_name()}_autobackups' )

            if self.write_preview_history or io.is_colab():
                if not self.preview_history_path.exists():
                    self.preview_history_path.mkdir(exist_ok=True)
                else:
                    if self.iter == 0:
                        for filename in pathex.get_image_paths(self.preview_history_path):
                            Path(filename).unlink()

            if self.generator_list is None:
                raise ValueError( 'You didnt set_training_data_generators()')
            else:
                for i, generator in enumerate(self.generator_list):
                    if not isinstance(generator, SampleGeneratorBase):
                        raise ValueError('training data generator is not subclass of SampleGeneratorBase')

            self.update_sample_for_preview(choose_preview_history=self.choose_preview_history)
            
            if self.autobackup_hour != 0:
                self.autobackup_start_time = time.time()

                if not self.autobackups_path.exists():
                    self.autobackups_path.mkdir(exist_ok=True)

        io.log_info( self.get_summary_text() )
示例#7
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    face_type='full_face',
    max_faces_from_image=0,
    cpu_only=False,
    force_gpu_idxs=None,
):
    print("------------start_extractor")

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if face_type is not None:
        face_type = FaceType.fromString(face_type)

    if face_type is None:
        if manual_output_debug_fix and output_path.exists():
            files = pathex.get_image_paths(output_path)
            if len(files) != 0:
                dflimg = DFLIMG.load(Path(files[0]))
                if dflimg is not None and dflimg.has_data():
                    face_type = FaceType.fromString(dflimg.get_face_type())

    if face_type is None:
        face_type = io.input_str(
            "Face type",
            'wf', ['f', 'wf', 'head'],
            help_message=
            "Full face / whole face / head. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset."
        ).lower()
        face_type = {
            'f': FaceType.FULL,
            'wf': FaceType.WHOLE_FACE,
            'head': FaceType.HEAD
        }[face_type]

    image_size = 512 if face_type < FaceType.HEAD else 768

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_debug_path = output_path.parent / (output_path.name + '_debug')

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            output_images_paths = pathex.get_image_paths(output_path)
            if len(output_images_paths) > 0:
                #io.input(f"\n WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue.\n")
                for filename in output_images_paths:
                    Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    input_path_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
            io.log_info('Found %d images.' % (len(input_path_image_paths)))
    else:
        if output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()
        else:
            output_debug_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            print('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'landmarks-manual',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            print('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            print('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'all',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                print('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                print('Performing manual fix for %d images...' %
                      (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    print('-------------------------')
    print('Images found:        %d' % (images_found))
    print('Faces detected:      %d' % (faces_detected))
    print('-------------------------')