コード例 #1
0
ファイル: Util.py プロジェクト: zhd8757/DeepFaceLab
def convert_png_to_jpg_file(filepath):
    filepath = Path(filepath)

    if filepath.suffix != '.png':
        return

    dflpng = DFLPNG.load(str(filepath))
    if dflpng is None:
        io.log_err("%s is not a dfl image file" % (filepath.name))
        return

    dfl_dict = dflpng.getDFLDictData()

    img = cv2_imread(str(filepath))
    new_filepath = str(filepath.parent / (filepath.stem + '.jpg'))
    cv2_imwrite(new_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 85])

    DFLJPG.embed_data(new_filepath,
                      face_type=dfl_dict.get('face_type', None),
                      landmarks=dfl_dict.get('landmarks', None),
                      source_filename=dfl_dict.get('source_filename', None),
                      source_rect=dfl_dict.get('source_rect', None),
                      source_landmarks=dfl_dict.get('source_landmarks', None))

    filepath.unlink()
コード例 #2
0
def sort_by_face_yaw(input_path):
    io.log_info("Sorting by face yaw...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        pitch, yaw = LandmarksProcessor.estimate_pitch_yaw(
            dflimg.get_landmarks())

        img_list.append([str(filepath), yaw])

    io.log_info("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list, trash_img_list
コード例 #3
0
ファイル: Util.py プロジェクト: zhd8757/DeepFaceLab
def add_landmarks_debug_images(input_path):
    io.log_info("Adding landmarks debug images...")

    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        img = cv2_imread(str(filepath))

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            continue

        if img is not None:
            face_landmarks = dflimg.get_landmarks()
            LandmarksProcessor.draw_landmarks(img, face_landmarks)

            output_file = '{}{}'.format(
                str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
            cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
コード例 #4
0
def denoise_image_sequence(input_dir, ext=None, factor=None):
    input_path = Path(input_dir)

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if ext is None:
        ext = io.input_str(
            "Input image format (extension)? ( default:png ) : ", "png")

    if factor is None:
        factor = np.clip(
            io.input_int("Denoise factor? (1-20 default:5) : ", 5), 1, 20)

    kwargs = {}
    if ext == 'jpg':
        kwargs.update({'q:v': '2'})

    job = (ffmpeg.input(str(input_path / ('%5d.' + ext))).filter(
        "hqdn3d", factor, factor, 5,
        5).output(str(input_path / ('%5d.' + ext)), **kwargs))

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
コード例 #5
0
ファイル: SampleHost.py プロジェクト: ngoduongkha/DeepFaceLab
    def load_face_samples(image_paths, silent=False):
        sample_list = []

        for filename in (image_paths if silent else io.progress_bar_generator(
                image_paths, "Loading")):
            filename_path = Path(filename)
            try:
                dflimg = DFLIMG.load(filename_path)

                if dflimg is None:
                    io.log_err(
                        "load_face_samples: %s is not a dfl image file required for training"
                        % (filename_path.name))
                    continue

                sample_list.append(
                    Sample(
                        filename=filename,
                        sample_type=SampleType.FACE,
                        face_type=FaceType.fromString(dflimg.get_face_type()),
                        shape=dflimg.get_shape(),
                        landmarks=dflimg.get_landmarks(),
                        ie_polys=dflimg.get_ie_polys(),
                        pitch_yaw_roll=dflimg.get_pitch_yaw_roll(),
                        eyebrows_expand_mod=dflimg.get_eyebrows_expand_mod(),
                        source_filename=dflimg.get_source_filename(),
                    ))
            except:
                io.log_err("Unable to load %s , error: %s" %
                           (filename, traceback.format_exc()))

        return sample_list
コード例 #6
0
ファイル: Sorter.py プロジェクト: zhd8757/DeepFaceLab
def sort_by_hist_dissim(input_path):
    io.log_info ("Sorting by histogram dissimilarity...")

    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)
        
        if filepath.suffix == '.png':
            dflimg = DFLPNG.load( str(filepath) )
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load ( str(filepath) )
        else:
            dflimg = None
            
        if dflimg is None:
            io.log_err ("%s is not a dfl image file" % (filepath.name) )
            trash_img_list.append ([str(filepath)])
            continue
            
        image = cv2_imread(str(filepath))
        face_mask = LandmarksProcessor.get_image_hull_mask (image.shape, dflimg.get_landmarks())
        image = (image*face_mask).astype(np.uint8)

        img_list.append ([str(filepath), cv2.calcHist([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]), 0 ])

    img_list = HistDissimSubprocessor(img_list).run()
                         
    io.log_info ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)

    return img_list, trash_img_list
コード例 #7
0
def sort_by_origname(input_path):
    io.log_info("Sort by original filename...")

    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        img_list.append([str(filepath), dflimg.get_source_filename()])

    io.log_info("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1))
    return img_list, trash_img_list
コード例 #8
0
ファイル: Sorter.py プロジェクト: zym1599/DeepFaceLab-1
def sort_by_face_pitch(input_path):
    io.log_info("根据[pitch]排序... ")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s 不是DeepFaceLab的图片格式,请使用DeepFaceLab提取脸图" %
                       (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        pitch_yaw_roll = dflimg.get_pitch_yaw_roll()
        if pitch_yaw_roll is not None:
            pitch, yaw, roll = pitch_yaw_roll
        else:
            pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                dflimg.get_landmarks())

        img_list.append([str(filepath), pitch])

    io.log_info("排序...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list, trash_img_list
コード例 #9
0
def sort_by_face_pitch(input_path):
    io.log_info("Sorting by face pitch...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load(filepath)

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        pitch_yaw_roll = dflimg.get_pitch_yaw_roll()
        if pitch_yaw_roll is not None:
            pitch, yaw, roll = pitch_yaw_roll
        else:
            pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                dflimg.get_landmarks())

        img_list.append([str(filepath), pitch])

    io.log_info("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list, trash_img_list
コード例 #10
0
def sort_by_face_dissim(input_path):

    io.log_info("Sorting by face dissimilarity...")

    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load(filepath)

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        img_list.append([str(filepath), dflimg.get_landmarks(), 0])

    img_list_len = len(img_list)
    for i in io.progress_bar_generator(range(img_list_len - 1), "Sorting"):
        score_total = 0
        for j in range(i + 1, len(img_list)):
            if i == j:
                continue
            fl1 = img_list[i][1]
            fl2 = img_list[j][1]
            score_total += np.sum(np.absolute((fl2 - fl1).flatten()))

        img_list[i][2] = score_total

    io.log_info("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)

    return img_list, trash_img_list
コード例 #11
0
ファイル: F.py プロジェクト: kirka1206/deep-learning-copy
def convert(workspace, skip=True, manual=False):
    import os
    for f in os.listdir(workspace):
        if not os.path.isdir(os.path.join(workspace,
                                          f)) or not f.startswith("data_dst_"):
            continue
        io.log_info(f)
        model_dir = os.path.join(workspace, "model")
        self_model_dir = os.path.join(workspace, f, "model")
        if os.path.exists(self_model_dir):
            io.log_info("Use Self Model")
            model_dir = self_model_dir
        data_dst = os.path.join(workspace, f)
        data_dst_merged = os.path.join(data_dst, "merged")
        data_dst_aligned = os.path.join(data_dst, "aligned")
        data_dst_video = os.path.join(data_dst, "video")
        refer_path = None
        for v in os.listdir(data_dst_video):
            if v.split(".")[-1] in ["mp4", "avi", "wmv", "mkv"]:
                refer_path = os.path.join(data_dst_video, v)
                break
        if not refer_path:
            io.log_err("No Refer File In " + data_dst_video)
            return
        # 恢复排序
        need_recover = True
        for img in os.listdir(data_dst_aligned):
            if img.endswith("_0.jpg") or img.endswith("_0.png"):
                need_recover = False
        if need_recover:
            recover_filename(data_dst_aligned)
        # 如果data_dst里没有脸则extract
        has_img = False
        for img in os.listdir(data_dst):
            if img.endswith(".jpg") or img.endswith(".png"):
                has_img = True
                break
        if not has_img:
            dfl.dfl_extract_video(refer_path, data_dst)
        # 转换
        dfl.dfl_convert(data_dst,
                        data_dst_merged,
                        data_dst_aligned,
                        model_dir,
                        enable_predef=not manual)
        # ConverterMasked.enable_predef = enable_predef
        # 去掉没有脸的
        if skip:
            skip_no_face(data_dst)
        # 转mp4
        refer_name = ".".join(os.path.basename(refer_path).split(".")[:-1])
        result_path = os.path.join(
            workspace, "result_%s_%s.mp4" % (get_time_str(), refer_name))
        dfl.dfl_video_from_sequence(data_dst_merged, result_path, refer_path)
        # 移动到trash
        trash_dir = os.path.join(workspace, "data_trash")
        import shutil
        shutil.move(data_dst, trash_dir)
コード例 #12
0
    def embed_data(filename,
                   face_type=None,
                   landmarks=None,
                   ie_polys=None,
                   source_filename=None,
                   source_rect=None,
                   source_landmarks=None,
                   image_to_face_mat=None,
                   fanseg_mask=None,
                   pitch_yaw_roll=None,
                   eyebrows_expand_mod=None,
                   relighted=None,
                   **kwargs):

        if fanseg_mask is not None:
            fanseg_mask = np.clip((fanseg_mask * 255).astype(np.uint8), 0, 255)

            ret, buf = cv2.imencode('.jpg', fanseg_mask,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 85])

            if ret and len(buf) < 60000:
                fanseg_mask = buf
            else:
                io.log_err("Unable to encode fanseg_mask for %s" % (filename))
                fanseg_mask = None

        inst = DFLJPG.load_raw(filename)
        inst.setDFLDictData({
            'face_type':
            face_type,
            'landmarks':
            landmarks,
            'ie_polys':
            ie_polys.dump() if ie_polys is not None else None,
            'source_filename':
            source_filename,
            'source_rect':
            source_rect,
            'source_landmarks':
            source_landmarks,
            'image_to_face_mat':
            image_to_face_mat,
            'fanseg_mask':
            fanseg_mask,
            'pitch_yaw_roll':
            pitch_yaw_roll,
            'eyebrows_expand_mod':
            eyebrows_expand_mod,
            'relighted':
            relighted
        })

        try:
            with open(filename, "wb") as f:
                f.write(inst.dump())
        except:
            raise Exception('cannot save %s' % (filename))
コード例 #13
0
def remove_ie_polys_file (filepath):
    filepath = Path(filepath)

    dflimg = DFLIMG.load (filepath)
    if dflimg is None:
        io.log_err ("%s is not a dfl image file" % (filepath.name) )
        return

    dflimg.remove_ie_polys()
    dflimg.embed_and_set( str(filepath) )
コード例 #14
0
ファイル: F.py プロジェクト: kirka1206/deep-learning-copy
def get_pitch_yaw_roll(input_path, r=0.05):
    import os
    import numpy as np
    import cv2
    from shutil import copyfile
    from pathlib import Path
    from utils import Path_utils
    from utils.DFLPNG import DFLPNG
    from utils.DFLJPG import DFLJPG
    from facelib import LandmarksProcessor
    from joblib import Subprocessor
    import multiprocessing
    from interact import interact as io
    from imagelib import estimate_sharpness
    io.log_info("Sorting by face yaw...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)
        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None
        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            trash_img_list.append([str(filepath)])
            continue
        pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
            dflimg.get_landmarks())
        img_list.append([str(filepath), pitch, yaw, roll])

    img_list.sort(key=lambda item: item[1])
    with open(os.path.join(input_path, "_pitch_yaw_roll.csv"), "w") as f:
        for i in img_list:
            f.write("%s,%f,%f,%f\n" %
                    (os.path.basename(i[0]), i[1], i[2], i[3]))

    import cv
    width = 800
    img = cv.cv_new((width, width))
    xs = [i[1] for i in img_list]
    ys = [i[2] for i in img_list]
    cs = [(128, 128, 128)] * len(xs)
    rs = [int(r * width / 2)] * len(xs)
    cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs)
    cs = [(0xcc, 0x66, 0x33)] * len(xs)
    rs = [2] * len(xs)
    cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs)
    cv.cv_save(img, os.path.join(input_path, "_pitch_yaw_roll.bmp"))
    return img_list
コード例 #15
0
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = Path_utils.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if fps is None:
        fps = io.input_int(
            "Enter FPS ( ?:help skip:fullfps ) : ",
            0,
            help_message=
            "How many frames of every second of the video will be extracted.")

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format? ( jpg png ?:help skip:png ) : ",
            "png", ["png", "jpg"],
            help_message=
            "png is lossless, but extraction is x10 slower for HDD, requires x10 more disk space than jpg."
        )

    for filename in Path_utils.get_image_paths(output_path,
                                               ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
コード例 #16
0
ファイル: dfl.py プロジェクト: kirka1206/deep-learning-copy
def dfl_load_img(path):
    from pathlib import Path
    from utils.DFLPNG import DFLPNG
    from utils.DFLJPG import DFLJPG
    filepath = Path(path)
    if filepath.suffix == '.png':
        dflimg = DFLPNG.load(str(filepath))
    elif filepath.suffix == '.jpg':
        dflimg = DFLJPG.load(str(filepath))
    else:
        dflimg = None
    if dflimg is None:
        io.log_err("%s is not a dfl image file" % (filepath.name))
    return dflimg
コード例 #17
0
def cut_video(input_file,
              from_time=None,
              to_time=None,
              audio_track_id=None,
              bitrate=None):
    input_file_path = Path(input_file)
    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    output_file_path = input_file_path.parent / (
        input_file_path.stem + "_cut" + input_file_path.suffix)

    if from_time is None:
        from_time = io.input_str("From time (skip: 00:00:00.000) : ",
                                 "00:00:00.000")

    if to_time is None:
        to_time = io.input_str("To time (skip: 00:00:00.000) : ",
                               "00:00:00.000")

    if audio_track_id is None:
        audio_track_id = io.input_int("Specify audio track id. ( skip:0 ) : ",
                                      0)

    if bitrate is None:
        bitrate = max(
            1,
            io.input_int("Bitrate of output file in MB/s ? (default:25) : ",
                         25))

    kwargs = {
        "c:v": "libx264",
        "b:v": "%dM" % (bitrate),
        "pix_fmt": "yuv420p",
    }

    job = ffmpeg.input(str(input_file_path), ss=from_time, to=to_time)

    job_v = job['v:0']
    job_a = job['a:' + str(audio_track_id) + '?']

    job = ffmpeg.output(job_v, job_a, str(output_file_path),
                        **kwargs).overwrite_output()

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
コード例 #18
0
    def get_person_id_max_count(samples_path):
        samples = None
        try:
            samples = samplelib.PackedFaceset.load(samples_path)
        except:
            io.log_err(
                f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}"
            )

        if samples is None:
            raise ValueError("packed faceset not found.")
        persons_name_idxs = {}
        for sample in samples:
            persons_name_idxs[sample.person_name] = 0
        return len(list(persons_name_idxs.keys()))
コード例 #19
0
ファイル: VideoEd.py プロジェクト: zym1599/DeepFaceLab-1
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = Path_utils.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if fps is None:
        fps = io.input_int("输入帧率[FPS] ( ?:帮助 跳过:默认帧率 ) : ",
                           0,
                           help_message="FPS是指每秒多少张图片,一般视频为24,推荐输入12")

    if output_ext is None:
        output_ext = io.input_str(
            "输出格式? ( jpg还是png ?:帮助 默认为png ) : ",
            "png", ["png", "jpg"],
            help_message="png 为无损格式, 但是比JPG慢10倍, 空间也比JPG大十倍,建议使用JPG格式.")

    for filename in Path_utils.get_image_paths(output_path,
                                               ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg 调用失败, 错误提示:" + str(job.compile()))
コード例 #20
0
ファイル: Util.py プロジェクト: zhuxiaofenga/DeepFaceLab
def remove_fanseg_file(filepath):
    filepath = Path(filepath)

    if filepath.suffix == '.png':
        dflimg = DFLPNG.load(str(filepath))
    elif filepath.suffix == '.jpg':
        dflimg = DFLJPG.load(str(filepath))
    else:
        return

    if dflimg is None:
        io.log_err("%s is not a dfl image file" % (filepath.name))
        return

    dflimg.remove_fanseg_mask()
    dflimg.embed_and_set(str(filepath))
コード例 #21
0
ファイル: F.py プロジェクト: wa407/YML
def mp4(workspace, skip=False):
    import os
    for f in os.listdir(workspace):
        if not os.path.isdir(os.path.join(workspace,
                                          f)) or not f.startswith("data_dst_"):
            continue
        io.log_info(f)
        data_dst = os.path.join(workspace, f)
        data_dst_merged = os.path.join(data_dst, "merged")
        data_dst_aligned = os.path.join(data_dst, "aligned")
        data_dst_video = os.path.join(data_dst, "video")
        refer_path = None
        for v in os.listdir(data_dst_video):
            if v.split(".")[-1] in ["mp4", "avi", "wmv", "mkv"]:
                refer_path = os.path.join(data_dst_video, v)
                break
        if not refer_path:
            io.log_err("No Refer File In " + data_dst_video)
            return
        io.log_info("Refer File " + refer_path)
        # 恢复排序
        need_recover = True
        for img in os.listdir(data_dst_aligned):
            if img.endswith("_0.jpg") or img.endswith("_0.png"):
                need_recover = False
        if need_recover:
            recover_filename(data_dst_aligned)
        # 如果data_dst里没有脸则extract
        has_img = False
        for img in os.listdir(data_dst):
            if img.endswith(".jpg") or img.endswith(".png"):
                has_img = True
                break
        if not has_img:
            dfl.dfl_extract_video(refer_path, data_dst)
        # 去掉没有脸的
        # if skip:
        #     skip_no_face(data_dst)
        # 转mp4
        refer_name = ".".join(os.path.basename(refer_path).split(".")[:-1])
        result_path = os.path.join(
            workspace, "result_%s_%s.mp4" % (get_time_str(), refer_name))
        dfl.dfl_video_from_sequence(data_dst_merged, result_path, refer_path)
        # 移动到trash
        trash_dir = os.path.join(workspace, "../trash_workspace")
        import shutil
        shutil.move(data_dst, trash_dir)
コード例 #22
0
def delete_relighted(input_dir):
    input_path = Path(input_dir)
    image_paths = [Path(x) for x in Path_utils.get_image_paths(input_path)]

    files_to_delete = []
    for filepath in io.progress_bar_generator(image_paths, "Loading"):
        dflimg = DFLIMG.load(Path(filepath))

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            continue
        else:
            if dflimg.get_relighted():
                files_to_delete += [filepath]

    for file in io.progress_bar_generator(files_to_delete, "Deleting"):
        file.unlink()
コード例 #23
0
ファイル: VideoEd.py プロジェクト: coinsbarboss/dfs
def extract_video(input_file, output_dir, output_ext=None, fps=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = Path_utils.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format (extension)? ( default:png ) : ", "png")

    if fps is None:
        fps = io.input_int(
            "Enter FPS ( ?:help skip:fullfps ) : ",
            0,
            help_message=
            "How many frames of every second of the video will be extracted.")

    for filename in Path_utils.get_image_paths(output_path,
                                               ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    kwargs = {}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
コード例 #24
0
ファイル: Sorter.py プロジェクト: zym1599/DeepFaceLab-1
def sort_by_face(input_path):
    io.log_info("根据相似度[similarity]排序...")

    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s 不是DeepFaceLab的图片格式,请使用DeepFaceLab提取脸图" %
                       (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        img_list.append([str(filepath), dflimg.get_landmarks()])

    img_list_len = len(img_list)
    for i in io.progress_bar_generator(range(0, img_list_len - 1), "Sorting"):
        min_score = float("inf")
        j_min_score = i + 1
        for j in range(i + 1, len(img_list)):

            fl1 = img_list[i][1]
            fl2 = img_list[j][1]
            score = np.sum(np.absolute((fl2 - fl1).flatten()))

            if score < min_score:
                min_score = score
                j_min_score = j
        img_list[i +
                 1], img_list[j_min_score] = img_list[j_min_score], img_list[i
                                                                             +
                                                                             1]

    return img_list, trash_img_list
コード例 #25
0
    def load(sample_type, samples_path):
        samples_cache = SampleHost.samples_cache

        if str(samples_path) not in samples_cache.keys():
            samples_cache[str(samples_path)] = [None] * SampleType.QTY

        samples = samples_cache[str(samples_path)]

        if sample_type == SampleType.IMAGE:
            if samples[sample_type] is None:
                samples[sample_type] = [
                    Sample(filename=filename)
                    for filename in io.progress_bar_generator(
                        Path_utils.get_image_paths(samples_path), "Loading")
                ]
        elif sample_type == SampleType.FACE:
            if samples[sample_type] is None:
                result = None
                try:
                    result = samplelib.PackedFaceset.load(samples_path)
                except:
                    io.log_err(
                        f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}"
                    )

                if result is not None:
                    io.log_info(
                        f"Loaded {len(result)} packed faces from {samples_path}"
                    )

                if result is None:
                    result = SampleHost.load_face_samples(
                        Path_utils.get_image_paths(samples_path))

                samples[sample_type] = result

        elif sample_type == SampleType.FACE_TEMPORAL_SORTED:
            if samples[sample_type] is None:
                samples[
                    sample_type] = SampleHost.upgradeToFaceTemporalSortedSamples(
                        SampleHost.load(SampleType.FACE, samples_path))

        return samples[sample_type]
コード例 #26
0
    def embed_data(filename,
                   face_type=None,
                   landmarks=None,
                   ie_polys=None,
                   source_filename=None,
                   source_rect=None,
                   source_landmarks=None,
                   image_to_face_mat=None,
                   fanseg_mask=None,
                   eyebrows_expand_mod=None,
                   relighted=None,
                   **kwargs):

        if fanseg_mask is not None:
            fanseg_mask = np.clip((fanseg_mask * 255).astype(np.uint8), 0, 255)

            ret, buf = cv2.imencode('.jpg', fanseg_mask,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 85])

            if ret and len(buf) < 60000:
                fanseg_mask = buf
            else:
                io.log_err("Unable to encode fanseg_mask for %s" % (filename))
                fanseg_mask = None

        if ie_polys is not None:
            if not isinstance(ie_polys, list):
                ie_polys = ie_polys.dump()

        DFLJPG.embed_dfldict(
            filename, {
                'face_type': face_type,
                'landmarks': landmarks,
                'ie_polys': ie_polys,
                'source_filename': source_filename,
                'source_rect': source_rect,
                'source_landmarks': source_landmarks,
                'image_to_face_mat': image_to_face_mat,
                'fanseg_mask': fanseg_mask,
                'eyebrows_expand_mod': eyebrows_expand_mod,
                'relighted': relighted
            })
コード例 #27
0
    def __init__(self,
                 resolution,
                 face_type_str,
                 load_weights=True,
                 weights_file_root=None,
                 training=False):
        exec(nnlib.import_all(), locals(), globals())

        self.model = FANSegmentator.BuildModel(resolution, ngf=64)

        if weights_file_root:
            weights_file_root = Path(weights_file_root)
        else:
            weights_file_root = Path(__file__).parent

        self.weights_path = weights_file_root / ('FANSeg_%d_%s.h5' %
                                                 (resolution, face_type_str))

        if load_weights:
            self.model.load_weights(str(self.weights_path))
        else:
            if training:
                try:
                    with open(
                            Path(__file__).parent / 'vgg11_enc_weights.npy',
                            'rb') as f:
                        d = pickle.loads(f.read())

                    for i in [0, 3, 6, 8, 11, 13, 16, 18]:
                        s = 'features.%d' % i

                        self.model.get_layer(s).set_weights(d[s])
                except:
                    io.log_err(
                        "Unable to load VGG11 pretrained weights from vgg11_enc_weights.npy"
                    )

        if training:
            #self.model.compile(loss='mse', optimizer=Adam(tf_cpu_mode=2))
            self.model.compile(loss='binary_crossentropy',
                               optimizer=Adam(tf_cpu_mode=2),
                               metrics=['accuracy'])
コード例 #28
0
def add_landmarks_debug_images(input_path):
    io.log_info ("Adding landmarks debug images...")

    for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        img = cv2_imread(str(filepath))

        dflimg = DFLIMG.load (filepath)

        if dflimg is None:
            io.log_err ("%s is not a dfl image file" % (filepath.name) )
            continue

        if img is not None:
            face_landmarks = dflimg.get_landmarks()
            LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load(dflimg.get_ie_polys()) )

            output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem),  '_debug.jpg')
            cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
コード例 #29
0
ファイル: SampleHost.py プロジェクト: rodri595/DeepFaceLab
    def load_face_samples(image_paths):
        sample_list = []

        for filename in io.progress_bar_generator(image_paths, desc="Loading"):
            dflimg = DFLIMG.load(Path(filename))
            if dflimg is None:
                io.log_err(f"{filename} is not a dfl image file.")
            else:
                sample_list.append(
                    Sample(
                        filename=filename,
                        sample_type=SampleType.FACE,
                        face_type=FaceType.fromString(dflimg.get_face_type()),
                        shape=dflimg.get_shape(),
                        landmarks=dflimg.get_landmarks(),
                        ie_polys=dflimg.get_ie_polys(),
                        eyebrows_expand_mod=dflimg.get_eyebrows_expand_mod(),
                        source_filename=dflimg.get_source_filename(),
                    ))
        return sample_list
コード例 #30
0
ファイル: Sorter.py プロジェクト: zym1599/DeepFaceLab-1
def sort_by_face_dissim(input_path):

    io.log_info("根据差异度[dissimilarity]排序...")

    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s 不是DeepFaceLab的图片格式,请使用DeepFaceLab提取脸图" %
                       (filepath.name))
            trash_img_list.append([str(filepath)])
            continue

        img_list.append([str(filepath), dflimg.get_landmarks(), 0])

    img_list_len = len(img_list)
    for i in io.progress_bar_generator(range(img_list_len - 1), "Sorting"):
        score_total = 0
        for j in range(i + 1, len(img_list)):
            if i == j:
                continue
            fl1 = img_list[i][1]
            fl2 = img_list[j][1]
            score_total += np.sum(np.absolute((fl2 - fl1).flatten()))

        img_list[i][2] = score_total

    io.log_info("排序...")
    img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)

    return img_list, trash_img_list