Пример #1
0
    def load(sample_type, samples_path, subdirs=False):
        """
        Return MPSharedList of samples
        """
        samples_cache = SampleLoader.samples_cache

        if str(samples_path) not in samples_cache.keys():
            samples_cache[str(samples_path)] = [None]*SampleType.QTY

        samples = samples_cache[str(samples_path)]

        if            sample_type == SampleType.IMAGE:
            if  samples[sample_type] is None:
                samples[sample_type] = [ Sample(filename=filename) for filename in io.progress_bar_generator( pathex.get_image_paths(samples_path, subdirs=subdirs), "Loading") ]

        elif          sample_type == SampleType.FACE:
            if  samples[sample_type] is None:
                try:
                    result = samplelib.PackedFaceset.load(samples_path)
                except:
                    io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}")

                if result is not None:
                    io.log_info (f"Loaded {len(result)} packed faces from {samples_path}")

                if result is None:
                    result = SampleLoader.load_face_samples( pathex.get_image_paths(samples_path, subdirs=subdirs) )

                samples[sample_type] = MPSharedList(result)
        elif          sample_type == SampleType.FACE_TEMPORAL_SORTED:
                result = SampleLoader.load (SampleType.FACE, samples_path)
                result = SampleLoader.upgradeToFaceTemporalSortedSamples(result)
                samples[sample_type] = MPSharedList(result)

        return samples[sample_type]
Пример #2
0
def add_landmarks_debug_images(input_path):
    io.log_info ("Adding landmarks debug images...")

    for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        img = cv2_imread(str(filepath))

        dflimg = DFLIMG.load (filepath)

        if dflimg is None or not dflimg.has_data():
            io.log_err (f"{filepath.name} is not a dfl image file")
            continue
        
        if img is not None:
            face_landmarks = dflimg.get_landmarks()
            face_type = FaceType.fromString ( dflimg.get_face_type() )
            
            if face_type == FaceType.MARK_ONLY:
                rect = dflimg.get_source_rect()
                LandmarksProcessor.draw_rect_landmarks(img, rect, face_landmarks, FaceType.FULL )
            else:
                LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True )
            
            
            
            output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem),  '_debug.jpg')
            cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
Пример #3
0
def convert_png_to_jpg_file(filepath):
    filepath = Path(filepath)

    if filepath.suffix != '.png':
        return

    dflpng = DFLPNG.load(str(filepath))
    if dflpng is None:
        io.log_err("%s is not a dfl png image file" % (filepath.name))
        return

    dfl_dict = dflpng.getDFLDictData()

    img = cv2_imread(str(filepath))
    new_filepath = str(filepath.parent / (filepath.stem + '.jpg'))
    cv2_imwrite(new_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 100])

    DFLJPG.embed_data(new_filepath,
                      face_type=dfl_dict.get('face_type', None),
                      landmarks=dfl_dict.get('landmarks', None),
                      ie_polys=dfl_dict.get('ie_polys', None),
                      source_filename=dfl_dict.get('source_filename', None),
                      source_rect=dfl_dict.get('source_rect', None),
                      source_landmarks=dfl_dict.get('source_landmarks', None))

    filepath.unlink()
Пример #4
0
def extract_video(input_file,
                  output_dir,
                  output_ext=None,
                  fps=None,
                  start_frame=None,
                  end_frame=None):
    input_file_path = Path(input_file)
    output_path = Path(output_dir)

    if not output_path.exists():
        output_path.mkdir(exist_ok=True)

    if input_file_path.suffix == '.*':
        input_file_path = pathex.get_first_file_by_stem(
            input_file_path.parent, input_file_path.stem)
    else:
        if not input_file_path.exists():
            input_file_path = None

    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    if fps is None:
        fps = io.input_int(
            "Enter FPS",
            0,
            help_message=
            "How many frames of every second of the video will be extracted. 0 - full fps"
        )

    if output_ext is None:
        output_ext = io.input_str(
            "Output image format",
            "png", ["png", "jpg"],
            help_message=
            "png is lossless, but extraction is x10 slower for HDD, requires x10 more disk space than jpg."
        )

    for filename in pathex.get_image_paths(output_path, ['.' + output_ext]):
        Path(filename).unlink()

    job = ffmpeg.input(str(input_file_path))

    if start_frame is not None and end_frame is not None:
        job = job.trim(start_frame=start_frame, end_frame=end_frame)

    kwargs = {'pix_fmt': 'rgb24'}
    if fps != 0:
        kwargs.update({'r': str(fps)})

    if output_ext == 'jpg':
        kwargs.update({'q:v': '2'})  #highest quality for jpg

    job = job.output(str(output_path / ('%5d.' + output_ext)), **kwargs)

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Пример #5
0
def prepare2(workspace):
    import shutil
    dst = get_workspace_dst(workspace)
    aligned = os.path.join(dst, "aligned")
    merged = os.path.join(dst, "merged")
    if not os.path.exists(aligned):
        io.log_err("No Aligned Dir Exists")
        return
    if not os.path.exists(merged):
        io.log_err("No Merged Dir Exists")
        return
    aligned_nos = {}
    for f in os.listdir(aligned):
        if not f.endswith(".png") and not f.endswith(".jpg"):
            continue
        no = f.split("_")[0]
        aligned_nos[no] = True
    for f in os.listdir(merged):
        if not f.endswith(".png") and not f.endswith(".jpg"):
            continue
        no = f.split(".")[0]
        if no not in aligned_nos:
            os.remove(os.path.join(merged, f))
    aligned2 = os.path.join(dst, "aligned2")
    if os.path.exists(aligned2):
        shutil.rmtree(aligned2)
    shutil.move(aligned, aligned2)
    dfl.dfl_extract_faces(merged, aligned)
Пример #6
0
def add_landmarks_debug_images(input_path):
    io.log_info("Adding landmarks debug images...")

    for filepath in io.progress_bar_generator(
            pathex.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        img = cv2_imread(str(filepath))

        dflimg = DFLIMG.load(filepath)

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            continue

        if img is not None:
            face_landmarks = dflimg.get_landmarks()
            LandmarksProcessor.draw_landmarks(img,
                                              face_landmarks,
                                              transparent_mask=True,
                                              ie_polys=IEPolys.load(
                                                  dflimg.get_ie_polys()))

            output_file = '{}{}'.format(
                str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
            cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
Пример #7
0
    def load(filename, loader_func=None):
        try:
            inst = DFLJPG.load_raw(filename, loader_func=loader_func)
            inst.dfl_dict = {}

            for chunk in inst.chunks:
                if chunk['name'] == 'APP0':
                    d, c = chunk['data'], 0
                    c, id, _ = struct_unpack(d, c, "=4sB")

                    if id == b"JFIF":
                        c, ver_major, ver_minor, units, Xdensity, Ydensity, Xthumbnail, Ythumbnail = struct_unpack(
                            d, c, "=BBBHHBB")
                        #if units == 0:
                        #    inst.shape = (Ydensity, Xdensity, 3)
                    else:
                        raise Exception("Unknown jpeg ID: %s" % (id))
                elif chunk['name'] == 'SOF0' or chunk['name'] == 'SOF2':
                    d, c = chunk['data'], 0
                    c, precision, height, width = struct_unpack(d, c, ">BHH")
                    inst.shape = (height, width, 3)

                elif chunk['name'] == 'APP15':
                    if type(chunk['data']) == bytes:
                        inst.dfl_dict = pickle.loads(chunk['data'])

            return inst
        except Exception as e:
            io.log_err(
                f'Exception occured while DFLJPG.load : {traceback.format_exc()}'
            )
            return None
Пример #8
0
def denoise_image_sequence(input_dir, ext=None, factor=None):
    input_path = Path(input_dir)

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if ext is None:
        ext = io.input_str("Input image format (extension)", "png")

    if factor is None:
        factor = np.clip(io.input_int("Denoise factor?", 5, add_info="1-20"),
                         1, 20)

    kwargs = {}
    if ext == 'jpg':
        kwargs.update({'q:v': '2'})

    job = (ffmpeg.input(str(input_path / ('%5d.' + ext))).filter(
        "hqdn3d", factor, factor, 5,
        5).output(str(input_path / ('%5d.' + ext)), **kwargs))

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
def sort_by_face_source_rect_size(input_path):
    io.log_info("Sorting by face rect size...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            pathex.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load(filepath)

        if dflimg is None or not dflimg.has_data():
            io.log_err(f"{filepath.name} is not a dfl image file")
            trash_img_list.append([str(filepath)])
            continue

        source_rect = dflimg.get_source_rect()
        rect_area = mathlib.polygon_area(
            np.array(source_rect[[0, 2, 2, 0]]).astype(np.float32),
            np.array(source_rect[[1, 1, 3, 3]]).astype(np.float32))

        img_list.append([str(filepath), rect_area])

    io.log_info("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list, trash_img_list
Пример #10
0
def remove_ie_polys_file(filepath):
    filepath = Path(filepath)

    dflimg = DFLIMG.load(filepath)
    if dflimg is None:
        io.log_err("%s is not a dfl image file" % (filepath.name))
        return

    dflimg.remove_ie_polys()
    dflimg.embed_and_set(str(filepath))
Пример #11
0
def merge(input_dir):
    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('input_dir not found. Please ensure it exists.')

    images_paths = pathex.get_image_paths(input_path, return_Path_class=True)

    images_processed = 0
    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        json_filepath = filepath.parent / (filepath.stem + '.json')
        if json_filepath.exists():
            dflimg = DFLIMG.load(filepath)
            if dflimg is not None and dflimg.has_data():
                try:
                    json_dict = json.loads(json_filepath.read_text())

                    seg_ie_polys = IEPolys()
                    total_points = 0

                    #include polys first
                    for shape in json_dict['shapes']:
                        if shape['shape_type'] == 'polygon' and \
                           shape['label'] != '0':
                            seg_ie_poly = seg_ie_polys.add(1)

                            for x, y in shape['points']:
                                seg_ie_poly.add(int(x), int(y))
                                total_points += 1

                    #exclude polys
                    for shape in json_dict['shapes']:
                        if shape['shape_type'] == 'polygon' and \
                           shape['label'] == '0':
                            seg_ie_poly = seg_ie_polys.add(0)

                            for x, y in shape['points']:
                                seg_ie_poly.add(int(x), int(y))
                                total_points += 1

                    if total_points == 0:
                        io.log_info(
                            f"No points found in {json_filepath}, skipping.")
                        continue

                    dflimg.set_seg_ie_polys(seg_ie_polys.dump())
                    dflimg.save()

                    json_filepath.unlink()

                    images_processed += 1
                except:
                    io.log_err(f"err {filepath}, {traceback.format_exc()}")
                    return

    io.log_info(f"Images processed: {images_processed}")
Пример #12
0
def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None):
    try:
        if loader_func is not None:
            bytes = bytearray(loader_func(filename))
        else:
            with open(filename, "rb") as stream:
                bytes = bytearray(stream.read())
        numpyarray = np.asarray(bytes, dtype=np.uint8)
        return cv2.imdecode(numpyarray, flags)
    except:
        io.log_err(f"Exception occured in cv2_imread : {traceback.format_exc()}")
        return None
Пример #13
0
def get_pitch_yaw_roll(input_path, r=0.05):
    import os
    import numpy as np
    import cv2
    from shutil import copyfile
    from pathlib import Path
    from utils import Path_utils
    from utils.DFLPNG import DFLPNG
    from utils.DFLJPG import DFLJPG
    from facelib import LandmarksProcessor
    from joblib import Subprocessor
    import multiprocessing
    from imagelib import estimate_sharpness
    io.log_info("Sorting by face yaw...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator(
            Path_utils.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)
        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None
        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            trash_img_list.append([str(filepath)])
            continue
        pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
            dflimg.get_landmarks())
        img_list.append([str(filepath), pitch, yaw, roll])

    img_list.sort(key=lambda item: item[1])
    with open(os.path.join(input_path, "_pitch_yaw_roll.csv"), "w") as f:
        for i in img_list:
            f.write("%s,%f,%f,%f\n" %
                    (os.path.basename(i[0]), i[1], i[2], i[3]))

    import cv
    width = 800
    img = cv.cv_new((width, width))
    xs = [i[1] for i in img_list]
    ys = [i[2] for i in img_list]
    cs = [(128, 128, 128)] * len(xs)
    rs = [int(r * width / 2)] * len(xs)
    cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs)
    cs = [(0xcc, 0x66, 0x33)] * len(xs)
    rs = [2] * len(xs)
    cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs)
    cv.cv_save(img, os.path.join(input_path, "_pitch_yaw_roll.bmp"))
    return img_list
Пример #14
0
    def get_person_id_max_count(samples_path):
        samples = None
        try:
            samples = samplelib.PackedFaceset.load(samples_path)
        except:
            io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}")

        if samples is None:
            raise ValueError("packed faceset not found.")
        persons_name_idxs = {}
        for sample in samples:
            persons_name_idxs[sample.person_name] = 0
        return len(list(persons_name_idxs.keys()))
Пример #15
0
def mp4(workspace):
    import os
    for f in os.listdir(workspace):
        if not os.path.isdir(os.path.join(workspace,
                                          f)) or not f.startswith("data_dst_"):
            continue
        io.log_info(f)
        data_dst = os.path.join(workspace, f)
        data_dst_merged = os.path.join(data_dst, "merged")
        data_dst_aligned = os.path.join(data_dst, "aligned")
        data_dst_video = os.path.join(data_dst, "video")
        refer_path = None
        for v in os.listdir(data_dst_video):
            # if v.split(".")[-1] in ["mp4", "avi", "wmv", "mkv"]:
            if testExt(v):
                refer_path = os.path.join(data_dst_video, v)
                break
        if not refer_path:
            io.log_err("No Refer File In " + data_dst_video)
            return
        io.log_info("Refer File " + refer_path)
        # 恢复排序
        need_recover = True
        for img in os.listdir(data_dst_aligned):
            if img.endswith("_0.jpg") or img.endswith("_0.png"):
                need_recover = False
        if need_recover:
            recover_filename(data_dst_aligned)
        # 如果data_dst里没有脸则extract
        has_img = False
        for img in os.listdir(data_dst):
            if img.endswith(".jpg") or img.endswith(".png"):
                has_img = True
                break
        if not has_img:
            dfl.dfl_extract_video(refer_path, data_dst)
        # 去掉没有脸的
        # if skip:
        #     skip_no_face(data_dst)
        # 转mp4
        refer_name = ".".join(os.path.basename(refer_path).split(".")[:-1])
        result_path = os.path.join(
            workspace, "result_%s_%s.mp4" % (get_time_str(), refer_name))
        dfl.dfl_video_from_sequence(data_dst_merged, result_path, refer_path)
        # 移动到trash
        trash_dir = os.path.join(workspace, "../trash_workspace")
        import shutil
        shutil.move(data_dst, trash_dir)
Пример #16
0
    def embed_data(filename,
                   face_type=None,
                   landmarks=None,
                   ie_polys=None,
                   seg_ie_polys=None,
                   source_filename=None,
                   source_rect=None,
                   source_landmarks=None,
                   image_to_face_mat=None,
                   fanseg_mask=None,
                   eyebrows_expand_mod=None,
                   relighted=None,
                   **kwargs):

        if fanseg_mask is not None:
            fanseg_mask = np.clip((fanseg_mask * 255).astype(np.uint8), 0, 255)

            ret, buf = cv2.imencode('.jpg', fanseg_mask,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 85])

            if ret and len(buf) < 60000:
                fanseg_mask = buf
            else:
                io.log_err("Unable to encode fanseg_mask for %s" % (filename))
                fanseg_mask = None

        if ie_polys is not None:
            if not isinstance(ie_polys, list):
                ie_polys = ie_polys.dump()

        if seg_ie_polys is not None:
            if not isinstance(seg_ie_polys, list):
                seg_ie_polys = seg_ie_polys.dump()

        DFLJPG.embed_dfldict(
            filename, {
                'face_type': face_type,
                'landmarks': landmarks,
                'ie_polys': ie_polys,
                'seg_ie_polys': seg_ie_polys,
                'source_filename': source_filename,
                'source_rect': source_rect,
                'source_landmarks': source_landmarks,
                'image_to_face_mat': image_to_face_mat,
                'fanseg_mask': fanseg_mask,
                'eyebrows_expand_mod': eyebrows_expand_mod,
                'relighted': relighted
            })
Пример #17
0
def cut_video(input_file,
              from_time=None,
              to_time=None,
              audio_track_id=None,
              bitrate=None):
    input_file_path = Path(input_file)
    if input_file_path is None:
        io.log_err("input_file not found.")
        return

    output_file_path = input_file_path.parent / (
        input_file_path.stem + "_cut" + input_file_path.suffix)

    if from_time is None:
        from_time = io.input_str("From time", "00:00:00.000")

    if to_time is None:
        to_time = io.input_str("To time", "00:00:00.000")

    if audio_track_id is None:
        audio_track_id = io.input_int("Specify audio track id.", 0)

    if bitrate is None:
        bitrate = max(1, io.input_int("输出码率 Bitrate of output file in MB/s",
                                      25))

    kwargs = {
        "c:v": "libx264",
        "b:v": "%dM" % (bitrate),
        "pix_fmt": "yuv420p",
    }

    job = ffmpeg.input(str(input_file_path), ss=from_time, to=to_time)

    job_v = job['v:0']
    job_a = job['a:' + str(audio_track_id) + '?']

    job = ffmpeg.output(job_v, job_a, str(output_file_path),
                        **kwargs).overwrite_output()

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Пример #18
0
def convert(workspace, model="SAEHD", force_recover=False):
    import os
    for f in os.listdir(workspace):
        if not os.path.isdir(os.path.join(workspace,
                                          f)) or not f.startswith("data_dst_"):
            continue
        io.log_info(f)
        model_dir = os.path.join(workspace, "model")
        self_model_dir = os.path.join(workspace, f, "model")
        if os.path.exists(self_model_dir):
            io.log_info("Use Self Model")
            model_dir = self_model_dir
        data_dst = os.path.join(workspace, f)
        data_dst_merged = os.path.join(data_dst, "merged")
        data_dst_aligned = os.path.join(data_dst, "aligned")
        data_dst_video = os.path.join(data_dst, "video")
        refer_path = None
        for v in os.listdir(data_dst_video):
            # if v.split(".")[-1] in ["mp4", "avi", "wmv", "mkv"]:
            if testExt(v):
                refer_path = os.path.join(data_dst_video, v)
                break
        if not refer_path:
            io.log_err("No Refer File In " + data_dst_video)
            return
        # 恢复排序
        need_recover = True
        for img in os.listdir(data_dst_aligned):
            if img.endswith("_0.jpg") or img.endswith("_0.png"):
                need_recover = False
            break
        if need_recover or force_recover:
            recover_filename(data_dst_aligned)
        # 如果data_dst里没有脸则extract
        has_img = False
        for img in os.listdir(data_dst):
            if img.endswith(".jpg") or img.endswith(".png"):
                has_img = True
                break
        if not has_img:
            dfl.dfl_extract_video(refer_path, data_dst)
        # 转换
        dfl.dfl_merge(data_dst, data_dst_merged, data_dst_aligned, model_dir,
                      model)
Пример #19
0
 def cli_init_dispatcher(cli):
     while not cli.c2s.empty():
         obj = cli.c2s.get()
         op = obj.get('op', '')
         if op == 'init_ok':
             cli.state = 0
         elif op == 'log_info':
             io.log_info(obj['msg'])
         elif op == 'log_err':
             io.log_err(obj['msg'])
         elif op == 'error':
             err_msg = obj.get('err_msg', None)
             if err_msg is not None:
                 io.log_info(
                     f'Error while subprocess initialization: {err_msg}'
                 )
             cli.kill()
             self.clis.remove(cli)
             break
Пример #20
0
def split(input_dir ):
    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('input_dir not found. Please ensure it exists.')

    images_paths = pathex.get_image_paths(input_path, return_Path_class=True)

    images_processed = 0
    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        json_filepath = filepath.parent / (filepath.stem+'.json')
 
            
        dflimg = DFLIMG.load(filepath)
        if dflimg is not None:
            try:
                seg_ie_polys = dflimg.get_seg_ie_polys()
                if seg_ie_polys is not None:                    
                    json_dict = {}
                    json_dict['version'] = "4.2.9"
                    json_dict['flags'] = {}
                    json_dict['shapes'] = []
                    json_dict['imagePath'] = filepath.name
                    json_dict['imageData'] = None
                    
                    for poly_type, points_list in seg_ie_polys:
                        shape_dict = {}
                        shape_dict['label'] = str(poly_type)
                        shape_dict['points'] = points_list
                        shape_dict['group_id'] = None
                        shape_dict['shape_type'] = 'polygon'
                        shape_dict['flags'] = {}
                        json_dict['shapes'].append( shape_dict )

                    json_filepath.write_text( json.dumps (json_dict,indent=4) )

                    dflimg.remove_seg_ie_polys()
                    dflimg.embed_and_set (filepath)
                    images_processed += 1
            except:
                io.log_err(f"err {filepath}, {traceback.format_exc()}")
                return

    io.log_info(f"Images processed: {images_processed}")
Пример #21
0
def restore_faceset_metadata_folder(input_path):
    input_path = Path(input_path)

    metadata_filepath = input_path / 'meta.dat'
    io.log_info(f"Restoring metadata from {str(metadata_filepath)}.\r\n")

    if not metadata_filepath.exists():
        io.log_err(f"Unable to find {str(metadata_filepath)}.")

    try:
        with open(metadata_filepath, "rb") as f:
            d = pickle.loads(f.read())
    except:
        raise FileNotFoundError(filename)

    for filepath in io.progress_bar_generator(
            pathex.get_image_paths(input_path,
                                   image_extensions=['.jpg'],
                                   return_Path_class=True), "Processing"):
        saved_data = d.get(filepath.name, None)
        if saved_data is None:
            io.log_info(f"No saved metadata for {filepath}")
            continue

        shape, dfl_dict = saved_data

        img = cv2_imread(filepath)
        if img.shape != shape:
            img = cv2.resize(img, (shape[1], shape[0]),
                             interpolation=cv2.INTER_LANCZOS4)

            cv2_imwrite(str(filepath), img,
                        [int(cv2.IMWRITE_JPEG_QUALITY), 100])

        if filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(filepath)
            dflimg.set_dict(dfl_dict)
            dflimg.save()
        else:
            continue

    metadata_filepath.unlink()
Пример #22
0
def sort_by_origname(input_path):
    io.log_info ("Sort by original filename...")

    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load (filepath)

        if dflimg is None:
            io.log_err ("%s is not a dfl image file" % (filepath.name) )
            trash_img_list.append( [str(filepath)] )
            continue

        img_list.append( [str(filepath), dflimg.get_source_filename()] )

    io.log_info ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1))
    return img_list, trash_img_list
Пример #23
0
def restore_faceset_metadata_folder(input_path):
    input_path = Path(input_path)

    metadata_filepath = input_path / 'meta.dat'
    io.log_info(f"Restoring metadata from {str(metadata_filepath)}.\r\n")

    if not metadata_filepath.exists():
        io.log_err(f"Unable to find {str(metadata_filepath)}.")

    try:
        with open(metadata_filepath, "rb") as f:
            d = pickle.loads(f.read())
    except:
        raise FileNotFoundError(filename)

    for filepath in io.progress_bar_generator(
            pathex.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        shape, dfl_dict = d.get(filepath.name, None)

        img = cv2_imread(str(filepath))
        if img.shape != shape:
            img = cv2.resize(img, (shape[1], shape[0]), cv2.INTER_LANCZOS4)

            if filepath.suffix == '.png':
                cv2_imwrite(str(filepath), img)
            elif filepath.suffix == '.jpg':
                cv2_imwrite(str(filepath), img,
                            [int(cv2.IMWRITE_JPEG_QUALITY), 100])

        if filepath.suffix == '.png':
            DFLPNG.embed_dfldict(str(filepath), dfl_dict)
        elif filepath.suffix == '.jpg':
            DFLJPG.embed_dfldict(str(filepath), dfl_dict)
        else:
            continue

    metadata_filepath.unlink()
Пример #24
0
def sort_by_face_pitch(input_path):
    io.log_info ("Sorting by face pitch...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load (filepath)

        if dflimg is None:
            io.log_err ("%s is not a dfl image file" % (filepath.name) )
            trash_img_list.append ( [str(filepath)] )
            continue

        pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll ( dflimg.get_landmarks(), size=dflimg.get_shape()[1] )

        img_list.append( [str(filepath), pitch ] )

    io.log_info ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list, trash_img_list
Пример #25
0
        def load_weights(self, filename):
            """
            returns True if file exists
            """
            filepath = Path(filename)
            if filepath.exists():
                result = True
                d_dumped = filepath.read_bytes()
                d = pickle.loads(d_dumped)
            else:
                return False

            weights = self.get_weights()

            if self.name is None:
                raise Exception("name must be defined.")

            tuples = []
            for w in weights:
                w_name_split = w.name.split('/')
                if self.name != w_name_split[0]:
                    raise Exception("weight first name != Saveable.name")

                sub_w_name = "/".join(w_name_split[1:])

                w_val = d.get(sub_w_name, None)
                w_val = np.reshape(w_val, w.shape.as_list())

                if w_val is None:
                    io.log_err(
                        f"Weight {w.name} was not loaded from file {filename}")
                    tuples.append((w, w.initializer))
                else:
                    tuples.append((w, w_val))

            nn.tf_batch_set_value(tuples)

            return True
Пример #26
0
def sort_by_mouth(input_path):
    io.log_info ("Sorting by mouth openness...")
    img_list = []
    trash_img_list = []
    for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load (filepath)

        if dflimg is None or not dflimg.has_data():
            io.log_err (f"{filepath.name} is not a dfl image file")
            trash_img_list.append ( [str(filepath)] )
            continue

        landmarks = dflimg.get_landmarks()
        distance = landmarks[67][1] - landmarks[63][1]

        img_list.append( [str(filepath), distance ] )

    io.log_info ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list, trash_img_list
Пример #27
0
def recover_original_aligned_filename(input_path):
    io.log_info("Recovering original aligned filename...")

    files = []
    for filepath in io.progress_bar_generator(
            pathex.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        dflimg = DFLIMG.load(filepath)

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            continue

        files += [[filepath, None, dflimg.get_source_filename(), False]]

    files_len = len(files)
    for i in io.progress_bar_generator(range(files_len), "Sorting"):
        fp, _, sf, converted = files[i]

        if converted:
            continue

        sf_stem = Path(sf).stem

        files[i][1] = fp.parent / (sf_stem + '_0' + fp.suffix)
        files[i][3] = True
        c = 1

        for j in range(i + 1, files_len):
            fp_j, _, sf_j, converted_j = files[j]
            if converted_j:
                continue

            if sf_j == sf:
                files[j][1] = fp_j.parent / (sf_stem + ('_%d' %
                                                        (c)) + fp_j.suffix)
                files[j][3] = True
                c += 1

    for file in io.progress_bar_generator(files, "Renaming", leave=False):
        fs, _, _, _ = file
        dst = fs.parent / (fs.stem + '_tmp' + fs.suffix)
        try:
            fs.rename(dst)
        except:
            io.log_err('fail to rename %s' % (fs.name))

    for file in io.progress_bar_generator(files, "Renaming"):
        fs, fd, _, _ = file
        fs = fs.parent / (fs.stem + '_tmp' + fs.suffix)
        try:
            fs.rename(fd)
        except:
            io.log_err('fail to rename %s' % (fs.name))
Пример #28
0
def denoise_image_sequence(input_dir, ext=None, factor=None):
    input_path = Path(input_dir)

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    image_paths = [
        Path(filepath) for filepath in pathex.get_image_paths(input_path)
    ]

    # Check extension of all images
    image_paths_suffix = None
    for filepath in image_paths:
        if image_paths_suffix is None:
            image_paths_suffix = filepath.suffix
        else:
            if filepath.suffix != image_paths_suffix:
                io.log_err(
                    f"All images in {input_path.name} should be with the same extension."
                )
                return

    if factor is None:
        factor = np.clip(
            settings.Denoise_factor, 1,
            20)  #io.input_int ("Denoise factor?", 7, add_info="1-20"), 1, 20 )

    # Rename to temporary filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):
        src = filepath
        dst = filepath.parent / (f'{i+1:06}_{filepath.name}')
        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return

    # Rename to sequental filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):

        src = filepath.parent / (f'{i+1:06}_{filepath.name}')
        dst = filepath.parent / (f'{i+1:06}{filepath.suffix}')
        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return

    # Process image sequence in ffmpeg
    kwargs = {}
    if image_paths_suffix == '.jpg':
        kwargs.update({'q:v': '2'})

    job = (ffmpeg.input(str(input_path / ('%6d' + image_paths_suffix))).filter(
        "hqdn3d", factor, factor, 5,
        5).output(str(input_path / ('%6d' + image_paths_suffix)), **kwargs))

    try:
        job = job.run()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))

    # Rename to temporary filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):
        src = filepath.parent / (f'{i+1:06}{filepath.suffix}')
        dst = filepath.parent / (f'{i+1:06}_{filepath.name}')
        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return

    # Rename to initial filenames
    for i, filepath in io.progress_bar_generator(enumerate(image_paths),
                                                 "Renaming",
                                                 leave=False):
        src = filepath.parent / (f'{i+1:06}_{filepath.name}')
        dst = filepath

        try:
            src.rename(dst)
        except:
            io.log_error('fail to rename %s' % (src.name))
            return
Пример #29
0
def video_from_sequence_(input_dir,
                         output_file,
                         reference_file=None,
                         ext=None,
                         fps=None,
                         bitrate=None,
                         include_audio=False,
                         lossless=None):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(
        reference_file) if reference_file is not None else None

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = io.input_str("Input image format (extension)", "png")

    if lossless is None:
        lossless = io.input_bool("Use lossless codec", False)

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == '.*':
            reference_file_path = pathex.get_first_file_by_stem(
                reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            io.log_err("reference_file not found.")
            return

        #probing reference file
        probe = ffmpeg.probe(str(reference_file_path))

        #getting first video and audio streams id with fps
        for stream in probe['streams']:
            if video_id is None and stream['codec_type'] == 'video':
                video_id = stream['index']
                fps = stream['r_frame_rate']

            if audio_id is None and stream['codec_type'] == 'audio':
                audio_id = stream['index']

        if audio_id is not None:
            #has audio track
            ref_in_a = ffmpeg.input(str(reference_file_path))[str(audio_id)]

    if fps is None:
        #if fps not specified and not overwritten by reference-file
        fps = max(1, io.input_int("Enter FPS", 25))

    if not lossless and bitrate is None:
        bitrate = 1  #max (1, settings.bitrate)#io.input_int ("Bitrate of output file in MB/s", 16)

    input_image_paths = pathex.get_image_paths(input_path)

    i_in = ffmpeg.input('pipe:', format='image2pipe', r=fps)

    output_args = [i_in]

    if include_audio and ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str(output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update({
            "c:v": "libx264",
            "crf": "0",
            "pix_fmt": "yuv420p",
        })
    else:
        output_kwargs.update({
            "c:v": "libx264",
            "b:v": "%dM" % (1),
            "pix_fmt": "yuv420p",
        })

    if include_audio and ref_in_a is not None:
        output_kwargs.update({
            "c:a": "aac",
            "b:a": "192k",
            "ar": "48000",
            "strict": "experimental"
        })

    job = (ffmpeg.output(*output_args, **output_kwargs).overwrite_output())

    try:
        job_run = job.run_async(pipe_stdin=True)

        for image_path in input_image_paths:
            with open(image_path, "rb") as f:
                image_bytes = f.read()
                job_run.stdin.write(image_bytes)

        job_run.stdin.close()
        job_run.wait()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Пример #30
0
    def batch_func(self, param ):
        pickled_samples, resolution, face_type, data_format = param

        samples = pickle.loads(pickled_samples)    
            
        shuffle_idxs = []
        idxs = [*range(len(samples))]
        
        random_flip = True
        rotation_range=[-10,10]
        scale_range=[-0.05, 0.05]
        tx_range=[-0.05, 0.05]
        ty_range=[-0.05, 0.05]

        random_bilinear_resize_chance, random_bilinear_resize_max_size_per = 25,75
        motion_blur_chance, motion_blur_mb_max_size = 25, 5
        gaussian_blur_chance, gaussian_blur_kernel_max_size = 25, 5

        bs = self.batch_size
        while True:
            batches = [ [], [] ]

            n_batch = 0
            while n_batch < bs:
                try:
                    if len(shuffle_idxs) == 0:
                        shuffle_idxs = idxs.copy()
                        np.random.shuffle(shuffle_idxs)
                    idx = shuffle_idxs.pop()
                    
                    sample = samples[idx] 

                    img = sample.load_bgr()
                    h,w,c = img.shape

                    mask = np.zeros ((h,w,1), dtype=np.float32)
                    sample.seg_ie_polys.overlay_mask(mask)

                    warp_params = imagelib.gen_warp_params(resolution, random_flip, rotation_range=rotation_range, scale_range=scale_range, tx_range=tx_range, ty_range=ty_range )

                    if face_type == sample.face_type:
                        if w != resolution:
                            img = cv2.resize( img, (resolution, resolution), cv2.INTER_LANCZOS4 )
                            mask = cv2.resize( mask, (resolution, resolution), cv2.INTER_LANCZOS4 )
                    else:
                        mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
                        img  = cv2.warpAffine( img,  mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LANCZOS4 )
                        mask = cv2.warpAffine( mask, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LANCZOS4 )

                    if len(mask.shape) == 2:
                        mask = mask[...,None]

                    img   = imagelib.warp_by_params (warp_params, img,  can_warp=True, can_transform=True, can_flip=True, border_replicate=False)
                    mask  = imagelib.warp_by_params (warp_params, mask, can_warp=True, can_transform=True, can_flip=True, border_replicate=False)

                    img = np.clip(img.astype(np.float32), 0, 1)
                    mask[mask < 0.5] = 0.0
                    mask[mask >= 0.5] = 1.0
                    mask = np.clip(mask, 0, 1)

                    if np.random.randint(2) == 0:
                        img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution]))
                    else:                    
                        img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution]))
                    
                    img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution]))
                    img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution]))
                    img = imagelib.apply_random_bilinear_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution]))
                    
                    if data_format == "NCHW":
                        img = np.transpose(img, (2,0,1) )
                        mask = np.transpose(mask, (2,0,1) )

                    batches[0].append ( img )
                    batches[1].append ( mask )

                    n_batch += 1
                except:
                    io.log_err ( traceback.format_exc() )

            yield [ np.array(batch) for batch in batches]