Example #1
0
 def get_person_id_max_count(samples_path):
     return len ( Path_utils.get_all_dir_names(samples_path) )
Example #2
0
def video_from_sequence( input_dir, output_file, reference_file=None, ext=None, fps=None, bitrate=None, lossless=None ):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(reference_file) if reference_file is not None else None

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = io.input_str ("Input image format (extension)? ( default:png ) : ", "png")

    if lossless is None:
        lossless = io.input_bool ("Use lossless codec ? ( default:no ) : ", False)

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == '.*':
            reference_file_path = Path_utils.get_first_file_by_stem (reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            io.log_err("reference_file not found.")
            return

        #probing reference file
        probe = ffmpeg.probe (str(reference_file_path))

        #getting first video and audio streams id with fps
        for stream in probe['streams']:
            if video_id is None and stream['codec_type'] == 'video':
                video_id = stream['index']
                fps = stream['r_frame_rate']

            if audio_id is None and stream['codec_type'] == 'audio':
                audio_id = stream['index']

        if audio_id is not None:
            #has audio track
            ref_in_a = ffmpeg.input (str(reference_file_path))[str(audio_id)]

    if fps is None:
        #if fps not specified and not overwritten by reference-file
        fps = max (1, io.input_int ("FPS ? (default:25) : ", 25) )

    if not lossless and bitrate is None:
        bitrate = max (1, io.input_int ("Bitrate of output file in MB/s ? (default:16) : ", 16) )

    i_in = ffmpeg.input(str (input_path / ('%5d.'+ext)), r=fps)

    output_args = [i_in]

    if ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str (output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update ({"c:v": "png"
                              })
    else:
        output_kwargs.update ({"c:v": "libx264",
                               "b:v": "%dM" %(bitrate),
                               "pix_fmt": "yuv420p",
                              })

    output_kwargs.update ({"c:a": "aac",
                           "b:a": "192k",
                           "ar" : "48000",
                           "strict": "experimental"
                          })

    job = ( ffmpeg.output(*output_args, **output_kwargs).overwrite_output() )
    try:
        job = job.run()
    except:
        io.log_err ("ffmpeg fail, job commandline:" + str(job.compile()) )
Example #3
0
def mask_editor_main(input_dir, confirmed_dir=None, skipped_dir=None):
    input_path = Path(input_dir)

    confirmed_path = Path(confirmed_dir)
    skipped_path = Path(skipped_dir)

    if not input_path.exists():
        raise ValueError('Input directory not found. Please ensure it exists.')

    if not confirmed_path.exists():
        confirmed_path.mkdir(parents=True)

    if not skipped_path.exists():
        skipped_path.mkdir(parents=True)

    wnd_name = "MaskEditor tool"
    io.named_window(wnd_name)
    io.capture_mouse(wnd_name)
    io.capture_keys(wnd_name)

    cached_images = {}

    image_paths = [Path(x) for x in Path_utils.get_image_paths(input_path)]
    done_paths = []
    done_images_types = {}
    image_paths_total = len(image_paths)

    zoom_factor = 1.0
    preview_images_count = 9
    target_wh = 256

    do_prev_count = 0
    do_save_move_count = 0
    do_save_count = 0
    do_skip_move_count = 0
    do_skip_count = 0

    def jobs_count():
        return do_prev_count + do_save_move_count + do_save_count + do_skip_move_count + do_skip_count

    is_exit = False
    while not is_exit:

        if len(image_paths) > 0:
            filepath = image_paths.pop(0)
        else:
            filepath = None

        next_image_paths = image_paths[0:preview_images_count]
        next_image_paths_names = [path.name for path in next_image_paths]
        prev_image_paths = done_paths[-preview_images_count:]
        prev_image_paths_names = [path.name for path in prev_image_paths]

        for key in list(cached_images.keys()):
            if key not in prev_image_paths_names and \
               key not in next_image_paths_names:
                cached_images.pop(key)

        for paths in [prev_image_paths, next_image_paths]:
            for path in paths:
                if path.name not in cached_images:
                    cached_images[path.name] = cv2_imread(str(path)) / 255.0

        if filepath is not None:
            if filepath.suffix == '.png':
                dflimg = DFLPNG.load(str(filepath))
            elif filepath.suffix == '.jpg':
                dflimg = DFLJPG.load(str(filepath))
            else:
                dflimg = None

            if dflimg is None:
                io.log_err("%s is not a dfl image file" % (filepath.name))
                continue
            else:
                lmrks = dflimg.get_landmarks()
                ie_polys = dflimg.get_ie_polys()

                if filepath.name in cached_images:
                    img = cached_images[filepath.name]
                else:
                    img = cached_images[filepath.name] = cv2_imread(
                        str(filepath)) / 255.0

                mask = LandmarksProcessor.get_image_hull_mask(img.shape, lmrks)
        else:
            img = np.zeros((target_wh, target_wh, 3))
            mask = np.ones((target_wh, target_wh, 3))
            ie_polys = None

        def get_status_lines_func():
            return [
                'Progress: %d / %d . Current file: %s' %
                (len(done_paths), image_paths_total,
                 str(filepath.name) if filepath is not None else "end"),
                '[Left mouse button] - mark include mask.',
                '[Right mouse button] - mark exclude mask.',
                '[Middle mouse button] - finish current poly.',
                '[Mouse wheel] - undo/redo poly or point. [+ctrl] - undo to begin/redo to end',
                '[q] - prev image. [w] - skip and move to %s. [e] - save and move to %s. '
                % (skipped_path.name, confirmed_path.name),
                '[z] - prev image. [x] - skip. [c] - save. ',
                'hold [shift] - speed up the frame counter by 10.',
                '[-/+] - window zoom [esc] - quit',
            ]

        try:
            ed = MaskEditor(img,
                            [(done_images_types[name], cached_images[name])
                             for name in prev_image_paths_names],
                            [(0, cached_images[name])
                             for name in next_image_paths_names], mask,
                            ie_polys, get_status_lines_func)
        except Exception as e:
            print(e)
            continue

        next = False
        while not next:
            io.process_messages(0.005)

            if jobs_count() == 0:
                for (x, y, ev, flags) in io.get_mouse_events(wnd_name):
                    x, y = int(x / zoom_factor), int(y / zoom_factor)
                    ed.set_mouse_pos(x, y)
                    if filepath is not None:
                        if ev == io.EVENT_LBUTTONDOWN:
                            ed.mask_point(1)
                        elif ev == io.EVENT_RBUTTONDOWN:
                            ed.mask_point(0)
                        elif ev == io.EVENT_MBUTTONDOWN:
                            ed.mask_finish()
                        elif ev == io.EVENT_MOUSEWHEEL:
                            if flags & 0x80000000 != 0:
                                if flags & 0x8 != 0:
                                    ed.undo_to_begin_point()
                                else:
                                    ed.undo_point()
                            else:
                                if flags & 0x8 != 0:
                                    ed.redo_to_end_point()
                                else:
                                    ed.redo_point()

                for key, chr_key, ctrl_pressed, alt_pressed, shift_pressed in io.get_key_events(
                        wnd_name):
                    if chr_key == 'q' or chr_key == 'z':
                        do_prev_count = 1 if not shift_pressed else 10
                    elif chr_key == '-':
                        zoom_factor = np.clip(zoom_factor - 0.1, 0.1, 4.0)
                        ed.set_screen_changed()
                    elif chr_key == '+':
                        zoom_factor = np.clip(zoom_factor + 0.1, 0.1, 4.0)
                        ed.set_screen_changed()
                    elif key == 27:  #esc
                        is_exit = True
                        next = True
                        break
                    elif filepath is not None:
                        if chr_key == 'e':
                            do_save_move_count = 1 if not shift_pressed else 10
                        elif chr_key == 'c':
                            do_save_count = 1 if not shift_pressed else 10
                        elif chr_key == 'w':
                            do_skip_move_count = 1 if not shift_pressed else 10
                        elif chr_key == 'x':
                            do_skip_count = 1 if not shift_pressed else 10

            if do_prev_count > 0:
                do_prev_count -= 1
                if len(done_paths) > 0:
                    if filepath is not None:
                        image_paths.insert(0, filepath)

                    filepath = done_paths.pop(-1)
                    done_images_types[filepath.name] = 0

                    if filepath.parent != input_path:
                        new_filename_path = input_path / filepath.name
                        filepath.rename(new_filename_path)
                        image_paths.insert(0, new_filename_path)
                    else:
                        image_paths.insert(0, filepath)

                    next = True
            elif filepath is not None:
                if do_save_move_count > 0:
                    do_save_move_count -= 1

                    ed.mask_finish()
                    dflimg.embed_and_set(str(filepath),
                                         ie_polys=ed.get_ie_polys())

                    done_paths += [confirmed_path / filepath.name]
                    done_images_types[filepath.name] = 2
                    filepath.rename(done_paths[-1])

                    next = True
                elif do_save_count > 0:
                    do_save_count -= 1

                    ed.mask_finish()
                    dflimg.embed_and_set(str(filepath),
                                         ie_polys=ed.get_ie_polys())

                    done_paths += [filepath]
                    done_images_types[filepath.name] = 2

                    next = True
                elif do_skip_move_count > 0:
                    do_skip_move_count -= 1

                    done_paths += [skipped_path / filepath.name]
                    done_images_types[filepath.name] = 1
                    filepath.rename(done_paths[-1])

                    next = True
                elif do_skip_count > 0:
                    do_skip_count -= 1

                    done_paths += [filepath]
                    done_images_types[filepath.name] = 1

                    next = True
            else:
                do_save_move_count = do_save_count = do_skip_move_count = do_skip_count = 0

            if jobs_count() == 0:
                if ed.switch_screen_changed():
                    screen = ed.make_screen()
                    if zoom_factor != 1.0:
                        h, w, c = screen.shape
                        screen = cv2.resize(
                            screen,
                            (int(w * zoom_factor), int(h * zoom_factor)))
                    io.show_image(wnd_name, screen)

        io.process_messages(0.005)

    io.destroy_all_windows()
Example #4
0
def sort_final(input_path):
    io.log_info ("Performing final sort.")
    
    target_count = io.input_int ("Target number of images? (default:2000) : ", 2000)
    
    img_list, trash_img_list = FinalLoaderSubprocessor( Path_utils.get_image_paths(input_path) ).run()
    final_img_list = []

    grads = 128
    imgs_per_grad = round (target_count / grads)

    grads_space = np.linspace (-1.0,1.0,grads)
    
    yaws_sample_list = [None]*grads
    for g in io.progress_bar_generator ( range(grads), "Sort by yaw"):    
        yaw = grads_space[g]
        next_yaw = grads_space[g+1] if g < grads-1 else yaw
        
        yaw_samples = []
        for img in img_list:
            s_yaw = -img[3]
            if (g == 0          and s_yaw < next_yaw) or \
               (g < grads-1     and s_yaw >= yaw and s_yaw < next_yaw) or \
               (g == grads-1    and s_yaw >= yaw):
                yaw_samples += [ img ]
        if len(yaw_samples) > 0:
            yaws_sample_list[g] = yaw_samples
    
    total_lack = 0
    for g in io.progress_bar_generator ( range(grads), ""):
        img_list = yaws_sample_list[g]
        img_list_len = len(img_list) if img_list is not None else 0
        
        lack = imgs_per_grad - img_list_len
        total_lack += max(lack, 0)        

    imgs_per_grad += total_lack // grads
    sharpned_imgs_per_grad = imgs_per_grad*10
    
    for g in io.progress_bar_generator ( range (grads), "Sort by blur"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue

        img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)    
 
        if len(img_list) > imgs_per_grad*2:
            trash_img_list += img_list[len(img_list) // 2:]
            img_list = img_list[0: len(img_list) // 2]
        
        if len(img_list) > sharpned_imgs_per_grad:
            trash_img_list += img_list[sharpned_imgs_per_grad:]
            img_list = img_list[0:sharpned_imgs_per_grad]
            
        yaws_sample_list[g] = img_list
            
    for g in io.progress_bar_generator ( range (grads), "Sort by hist"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue
            
        for i in range( len(img_list) ):
            score_total = 0
            for j in range( len(img_list) ):
                if i == j:
                    continue
                score_total += cv2.compareHist(img_list[i][2], img_list[j][2], cv2.HISTCMP_BHATTACHARYYA)
            img_list[i][3] = score_total
            
        yaws_sample_list[g] = sorted(img_list, key=operator.itemgetter(3), reverse=True)    

    for g in io.progress_bar_generator ( range (grads), "Fetching best"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue
            
        final_img_list += img_list[0:imgs_per_grad]
        trash_img_list += img_list[imgs_per_grad:]

    return final_img_list, trash_img_list
Example #5
0
def main(input_dir,
         output_dir,
         debug,
         detector='mt',
         multi_gpu=True,
         cpu_only=False,
         manual_fix=False,
         manual_output_debug_fix=False,
         manual_window_size=1368,
         image_size=256,
         face_type='full_face'):
    print("Running extractor.\r\n")

    input_path = Path(input_dir)
    output_path = Path(output_dir)
    face_type = FaceType.fromString(face_type)

    if not input_path.exists():
        print('Input directory not found. Please ensure it exists.')
        return

    if output_path.exists():
        if not manual_output_debug_fix:
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        debug = True
        detector = 'manual'
        print(
            'Performing re-extract frames which were deleted from _debug directory.'
        )

    input_path_image_paths = Path_utils.get_image_unique_filestem_paths(
        input_path, verbose=True)

    if debug:
        debug_output_path = Path(str(output_path) + '_debug')

        if manual_output_debug_fix:
            if not debug_output_path.exists():
                print("%s not found " % (str(debug_output_path)))
                return

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                Path_utils.get_image_paths(debug_output_path)).process()
            input_path_image_paths = sorted(input_path_image_paths)
        else:
            if debug_output_path.exists():
                for filename in Path_utils.get_image_paths(debug_output_path):
                    Path(filename).unlink()
            else:
                debug_output_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            print('Performing manual extract...')
            extracted_faces = ExtractSubprocessor(
                [(filename, []) for filename in input_path_image_paths],
                'landmarks',
                image_size,
                face_type,
                debug,
                cpu_only=cpu_only,
                manual=True,
                manual_window_size=manual_window_size).process()
        else:
            print('Performing 1st pass...')
            extracted_rects = ExtractSubprocessor(
                [(x, ) for x in input_path_image_paths],
                'rects',
                image_size,
                face_type,
                debug,
                multi_gpu=multi_gpu,
                cpu_only=cpu_only,
                manual=False,
                detector=detector).process()

            print('Performing 2nd pass...')
            extracted_faces = ExtractSubprocessor(extracted_rects,
                                                  'landmarks',
                                                  image_size,
                                                  face_type,
                                                  debug,
                                                  multi_gpu=multi_gpu,
                                                  cpu_only=cpu_only,
                                                  manual=False).process()

            if manual_fix:
                print('Performing manual fix...')

                if all(
                        np.array(
                            [len(data[1]) > 0
                             for data in extracted_faces]) == True):
                    print('All faces are detected, manual fix not needed.')
                else:
                    extracted_faces = ExtractSubprocessor(
                        extracted_faces,
                        'landmarks',
                        image_size,
                        face_type,
                        debug,
                        manual=True,
                        manual_window_size=manual_window_size).process()

        if len(extracted_faces) > 0:
            print('Performing 3rd pass...')
            final_imgs_paths = ExtractSubprocessor(
                extracted_faces,
                'final',
                image_size,
                face_type,
                debug,
                multi_gpu=multi_gpu,
                cpu_only=cpu_only,
                manual=False,
                output_path=output_path).process()
            faces_detected = len(final_imgs_paths)

    print('-------------------------')
    print('Images found:        %d' % (images_found))
    print('Faces detected:      %d' % (faces_detected))
    print('-------------------------')
Example #6
0
def main(input_dir,
         output_dir,
         debug_dir=None,
         detector='mt',
         manual_fix=False,
         manual_output_debug_fix=False,
         manual_window_size=1368,
         image_size=256,
         face_type='full_face',
         device_args={}):

    input_path = Path(input_dir)
    output_path = Path(output_dir)
    face_type = FaceType.fromString(face_type)

    multi_gpu = device_args.get('multi_gpu', False)
    cpu_only = device_args.get('cpu_only', False)

    if not input_path.exists():
        raise ValueError('Input directory not found. Please ensure it exists.')

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        if debug_dir is None:
            raise ValueError('debug-dir must be specified')
        detector = 'manual'
        io.log_info(
            'Performing re-extract frames which were deleted from _debug directory.'
        )

    input_path_image_paths = Path_utils.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)
    if debug_dir is not None:
        debug_output_path = Path(debug_dir)

        if manual_output_debug_fix:
            if not debug_output_path.exists():
                raise ValueError("%s not found " % (str(debug_output_path)))

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                Path_utils.get_image_paths(debug_output_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
        else:
            if debug_output_path.exists():
                for filename in Path_utils.get_image_paths(debug_output_path):
                    Path(filename).unlink()
            else:
                debug_output_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            extracted_faces = ExtractSubprocessor(
                [(filename, []) for filename in input_path_image_paths],
                'landmarks',
                image_size,
                face_type,
                debug_dir,
                cpu_only=cpu_only,
                manual=True,
                manual_window_size=manual_window_size).run()
        else:
            io.log_info('Performing 1st pass...')
            extracted_rects = ExtractSubprocessor(
                [(x, ) for x in input_path_image_paths],
                'rects',
                image_size,
                face_type,
                debug_dir,
                multi_gpu=multi_gpu,
                cpu_only=cpu_only,
                manual=False,
                detector=detector).run()

            io.log_info('Performing 2nd pass...')
            extracted_faces = ExtractSubprocessor(extracted_rects,
                                                  'landmarks',
                                                  image_size,
                                                  face_type,
                                                  debug_dir,
                                                  multi_gpu=multi_gpu,
                                                  cpu_only=cpu_only,
                                                  manual=False).run()

            if manual_fix:
                io.log_info('Performing manual fix...')

                if all(
                        np.array(
                            [len(data[1]) > 0
                             for data in extracted_faces]) == True):
                    io.log_info(
                        'All faces are detected, manual fix not needed.')
                else:
                    extracted_faces = ExtractSubprocessor(
                        extracted_faces,
                        'landmarks',
                        image_size,
                        face_type,
                        debug_dir,
                        manual=True,
                        manual_window_size=manual_window_size).run()

        if len(extracted_faces) > 0:
            io.log_info('Performing 3rd pass...')
            final_imgs_paths = ExtractSubprocessor(
                extracted_faces,
                'final',
                image_size,
                face_type,
                debug_dir,
                multi_gpu=multi_gpu,
                cpu_only=cpu_only,
                manual=False,
                output_path=output_path).run()
            faces_detected = len(final_imgs_paths)

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Example #7
0
def sort_final(input_path, include_by_blur=True):
    io.log_info("运行终极[final]排序工具.")

    target_count = io.input_int("Target number of images? (default:2000) : ",
                                2000)

    img_list, trash_img_list = FinalLoaderSubprocessor(
        Path_utils.get_image_paths(input_path), include_by_blur).run()
    final_img_list = []

    grads = 128
    imgs_per_grad = round(target_count / grads)

    grads_space = np.linspace(-1.0, 1.0, grads)

    yaws_sample_list = [None] * grads
    for g in io.progress_bar_generator(range(grads), "Sort by yaw"):
        yaw = grads_space[g]
        next_yaw = grads_space[g + 1] if g < grads - 1 else yaw

        yaw_samples = []
        for img in img_list:
            s_yaw = -img[3]
            if (g == 0          and s_yaw < next_yaw) or \
               (g < grads-1     and s_yaw >= yaw and s_yaw < next_yaw) or \
               (g == grads-1    and s_yaw >= yaw):
                yaw_samples += [img]
        if len(yaw_samples) > 0:
            yaws_sample_list[g] = yaw_samples

    total_lack = 0
    for g in io.progress_bar_generator(range(grads), ""):
        img_list = yaws_sample_list[g]
        img_list_len = len(img_list) if img_list is not None else 0

        lack = imgs_per_grad - img_list_len
        total_lack += max(lack, 0)

    imgs_per_grad += total_lack // grads

    if include_by_blur:
        sharpned_imgs_per_grad = imgs_per_grad * 10
        for g in io.progress_bar_generator(range(grads), "Sort by blur"):
            img_list = yaws_sample_list[g]
            if img_list is None:
                continue

            img_list = sorted(img_list,
                              key=operator.itemgetter(1),
                              reverse=True)

            if len(img_list) > sharpned_imgs_per_grad:
                trash_img_list += img_list[sharpned_imgs_per_grad:]
                img_list = img_list[0:sharpned_imgs_per_grad]

            yaws_sample_list[g] = img_list

    yaws_sample_list = FinalHistDissimSubprocessor(yaws_sample_list).run()

    for g in io.progress_bar_generator(range(grads), "Fetching best"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue

        final_img_list += img_list[0:imgs_per_grad]
        trash_img_list += img_list[imgs_per_grad:]

    return final_img_list, trash_img_list
Example #8
0
def main(input_dir,
         output_dir,
         debug_dir=None,
         detector='mt',
         manual_fix=False,
         manual_output_debug_fix=False,
         manual_window_size=1368,
         image_size=256,
         face_type='full_face',
         device_args={}):

    input_path = Path(input_dir)
    output_path = Path(output_dir)
    face_type = FaceType.fromString(face_type)

    multi_gpu = device_args.get('multi_gpu', False)
    cpu_only = device_args.get('cpu_only', False)

    if not input_path.exists():
        raise ValueError('Input directory not found. Please ensure it exists.')

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            output_images_paths = Path_utils.get_image_paths(output_path)
            if len(output_images_paths) > 0:
                io.input_bool(
                    "WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue."
                    % (str(output_path)), False)
                for filename in output_images_paths:
                    Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        if debug_dir is None:
            raise ValueError('debug-dir must be specified')
        detector = 'manual'
        io.log_info(
            'Performing re-extract frames which were deleted from _debug directory.'
        )

    input_path_image_paths = Path_utils.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)
    if debug_dir is not None:
        debug_output_path = Path(debug_dir)

        if manual_output_debug_fix:
            if not debug_output_path.exists():
                raise ValueError("%s not found " % (str(debug_output_path)))

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                Path_utils.get_image_paths(debug_output_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
            io.log_info('Found %d images.' % (len(input_path_image_paths)))
        else:
            if debug_output_path.exists():
                for filename in Path_utils.get_image_paths(debug_output_path):
                    Path(filename).unlink()
            else:
                debug_output_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(filename)
                    for filename in input_path_image_paths
                ],
                'landmarks',
                image_size,
                face_type,
                debug_dir,
                cpu_only=cpu_only,
                manual=True,
                manual_window_size=manual_window_size).run()
        else:
            io.log_info('Performing 1st pass...')
            data = ExtractSubprocessor([
                ExtractSubprocessor.Data(filename)
                for filename in input_path_image_paths
            ],
                                       'rects-' + detector,
                                       image_size,
                                       face_type,
                                       debug_dir,
                                       multi_gpu=multi_gpu,
                                       cpu_only=cpu_only,
                                       manual=False).run()

            io.log_info('Performing 2nd pass...')
            data = ExtractSubprocessor(data,
                                       'landmarks',
                                       image_size,
                                       face_type,
                                       debug_dir,
                                       multi_gpu=multi_gpu,
                                       cpu_only=cpu_only,
                                       manual=False).run()

        io.log_info('Performing 3rd pass...')
        data = ExtractSubprocessor(data,
                                   'final',
                                   image_size,
                                   face_type,
                                   debug_dir,
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only,
                                   manual=False,
                                   final_output_path=output_path).run()
        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filename) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks',
                    image_size,
                    face_type,
                    debug_dir,
                    manual=True,
                    manual_window_size=manual_window_size).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    face_type,
                    debug_dir,
                    multi_gpu=multi_gpu,
                    cpu_only=cpu_only,
                    manual=False,
                    final_output_path=output_path).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Example #9
0
    def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None,
                        batch_size=0,
                        write_preview_history = False,
                        debug = False, **in_options
                ):
        print ("Loading model...")
        self.model_path = model_path
        self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
        
        self.training_data_src_path = training_data_src_path
        self.training_data_dst_path = training_data_dst_path
        
        self.src_images_paths = None
        self.dst_images_paths = None
        self.src_yaw_images_paths = None
        self.dst_yaw_images_paths = None
        self.src_data_generator = None
        self.dst_data_generator = None
        self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None)
        self.batch_size = batch_size
        self.write_preview_history = write_preview_history
        self.debug = debug
        self.supress_std_once = ('TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1')
        
        if self.model_data_path.exists():            
            model_data = pickle.loads ( self.model_data_path.read_bytes() )            
            self.epoch = model_data['epoch']            
            self.options = model_data['options']
            self.loss_history = model_data['loss_history'] if 'loss_history' in model_data.keys() else []
            self.generator_dict_states = model_data['generator_dict_states'] if 'generator_dict_states' in model_data.keys() else None
            self.sample_for_preview = model_data['sample_for_preview']  if 'sample_for_preview' in model_data.keys() else None
        else:
            self.epoch = 0
            self.options = {}
            self.loss_history = []
            self.generator_dict_states = None
            self.sample_for_preview = None
            
        if self.write_preview_history:
            self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name()) )
            
            if not self.preview_history_path.exists():
                self.preview_history_path.mkdir(exist_ok=True)
            else:
                if self.epoch == 0:
                    for filename in Path_utils.get_image_paths(self.preview_history_path):
                        Path(filename).unlink()    

                        
        self.gpu_config = gpufmkmgr.GPUConfig(allow_growth=False, **in_options)
        self.gpu_total_vram_gb = self.gpu_config.gpu_total_vram_gb

        if self.epoch == 0: 
            #first run         
            self.options['created_vram_gb'] = self.gpu_total_vram_gb
            self.created_vram_gb = self.gpu_total_vram_gb
        else: 
            #not first run        
            if 'created_vram_gb' in self.options.keys():
                self.created_vram_gb = self.options['created_vram_gb']
            else:
                self.options['created_vram_gb'] = self.gpu_total_vram_gb
                self.created_vram_gb = self.gpu_total_vram_gb
            
        self.tf = gpufmkmgr.import_tf( self.gpu_config )
        self.tf_sess = gpufmkmgr.get_tf_session()
        self.keras = gpufmkmgr.import_keras()
        self.keras_contrib = gpufmkmgr.import_keras_contrib()

        self.onInitialize(**in_options)
        
        if self.debug or self.batch_size == 0:
            self.batch_size = 1 
        
        if self.is_training_mode:
            if self.generator_list is None:
                raise Exception( 'You didnt set_training_data_generators()')
            else:
                for i, generator in enumerate(self.generator_list):
                    if not isinstance(generator, TrainingDataGeneratorBase):
                        raise Exception('training data generator is not subclass of TrainingDataGeneratorBase')
                      
                    if self.generator_dict_states is not None and i < len(self.generator_dict_states):
                        generator.set_dict_state ( self.generator_dict_states[i] )
                        
            if self.sample_for_preview is None:
                self.sample_for_preview = self.generate_next_sample()

        print ("===== Model summary =====")
        print ("== Model name: " + self.get_model_name())
        print ("==")
        print ("== Current epoch: " + str(self.epoch) )
        print ("==")
        print ("== Options:")
        print ("== |== batch_size : %s " % (self.batch_size) )
        print ("== |== multi_gpu : %s " % (self.gpu_config.multi_gpu) )
        for key in self.options.keys():
            print ("== |== %s : %s" % (key, self.options[key]) )        
        
        print ("== Running on:")
        for idx in self.gpu_config.gpu_idxs:
            print ("== |== [%d : %s]" % (idx, gpufmkmgr.getDeviceName(idx)) )
 
        if self.gpu_total_vram_gb == 2:
            print ("==")
            print ("== WARNING: You are using 2GB GPU. Result quality may be significantly decreased.")
            print ("== If training does not start, close all programs and try again.")
            print ("== Also you can disable Windows Aero Desktop to get extra free VRAM.")
            print ("==")
            
        print ("=========================")
Example #10
0
def sort_by_absdiff(input_path):
    io.log_info("Sorting by absolute difference...")

    is_sim = io.input_bool("Sort by similar? ( y/n ?:help skip:y ) : ",
                           True,
                           help_message="Otherwise sort by dissimilar.")

    from nnlib import nnlib
    exec(nnlib.import_all(device_config=nnlib.device.Config()), locals(),
         globals())

    image_paths = Path_utils.get_image_paths(input_path)
    image_paths_len = len(image_paths)

    batch_size = 1024
    batch_size_remain = image_paths_len % batch_size

    i_t = Input((256, 256, 3))
    j_t = Input((256, 256, 3))

    outputs = []
    for i in range(batch_size):
        outputs += [K.sum(K.abs(i_t - j_t[i]), axis=[1, 2, 3])]

    func_bs_full = K.function([i_t, j_t], outputs)

    outputs = []
    for i in range(batch_size_remain):
        outputs += [K.sum(K.abs(i_t - j_t[i]), axis=[1, 2, 3])]

    func_bs_remain = K.function([i_t, j_t], outputs)

    import h5py
    db_file_path = Path(tempfile.gettempdir()) / 'sort_cache.hdf5'
    db_file = h5py.File(str(db_file_path), "w")
    db = db_file.create_dataset("results", (image_paths_len, image_paths_len),
                                compression="gzip")

    pg_len = image_paths_len // batch_size
    if batch_size_remain != 0:
        pg_len += 1

    pg_len = int((pg_len * pg_len - pg_len) / 2 + pg_len)

    io.progress_bar("Computing", pg_len)
    j = 0
    while j < image_paths_len:
        j_images = [cv2_imread(x) for x in image_paths[j:j + batch_size]]
        j_images_len = len(j_images)

        func = func_bs_remain if image_paths_len - j < batch_size else func_bs_full

        i = 0
        while i < image_paths_len:
            if i >= j:
                i_images = [
                    cv2_imread(x) for x in image_paths[i:i + batch_size]
                ]
                i_images_len = len(i_images)
                result = func([i_images, j_images])
                db[j:j + j_images_len, i:i + i_images_len] = np.array(result)
                io.progress_bar_inc(1)

            i += batch_size
        db_file.flush()
        j += batch_size

    io.progress_bar_close()

    next_id = 0
    sorted = [next_id]
    for i in io.progress_bar_generator(range(image_paths_len - 1), "Sorting"):
        id_ar = np.concatenate([db[:next_id, next_id], db[next_id, next_id:]])
        id_ar = np.argsort(id_ar)

        next_id = np.setdiff1d(id_ar, sorted, True)[0 if is_sim else -1]
        sorted += [next_id]
    db_file.close()
    db_file_path.unlink()

    img_list = [(image_paths[x], ) for x in sorted]
    return img_list, []
Example #11
0
def extract_umd_csv(input_file_csv,
                    image_size=256,
                    face_type='full_face',
                    device_args={}):

    #extract faces from umdfaces.io dataset csv file with pitch,yaw,roll info.
    multi_gpu = device_args.get('multi_gpu', False)
    cpu_only = device_args.get('cpu_only', False)
    face_type = FaceType.fromString(face_type)

    input_file_csv_path = Path(input_file_csv)
    if not input_file_csv_path.exists():
        raise ValueError('input_file_csv not found. Please ensure it exists.')

    input_file_csv_root_path = input_file_csv_path.parent
    output_path = input_file_csv_path.parent / ('aligned_' +
                                                input_file_csv_path.name)

    io.log_info("Output dir is %s." % (str(output_path)))

    if output_path.exists():
        output_images_paths = Path_utils.get_image_paths(output_path)
        if len(output_images_paths) > 0:
            io.input_bool(
                "WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue."
                % (str(output_path)), False)
            for filename in output_images_paths:
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    try:
        with open(str(input_file_csv_path), 'r') as f:
            csv_file = f.read()
    except Exception as e:
        io.log_err("Unable to open or read file " + str(input_file_csv_path) +
                   ": " + str(e))
        return

    strings = csv_file.split('\n')
    keys = strings[0].split(',')
    keys_len = len(keys)
    csv_data = []
    for i in range(1, len(strings)):
        values = strings[i].split(',')
        if keys_len != len(values):
            io.log_err("Wrong string in csv file, skipping.")
            continue

        csv_data += [{keys[n]: values[n] for n in range(keys_len)}]

    data = []
    for d in csv_data:
        filename = input_file_csv_root_path / d['FILE']

        pitch, yaw, roll = float(d['PITCH']), float(d['YAW']), float(d['ROLL'])
        if pitch < -90 or pitch > 90 or yaw < -90 or yaw > 90 or roll < -90 or roll > 90:
            continue

        pitch_yaw_roll = pitch / 90.0, yaw / 90.0, roll / 90.0

        x, y, w, h = float(d['FACE_X']), float(d['FACE_Y']), float(
            d['FACE_WIDTH']), float(d['FACE_HEIGHT'])

        data += [
            ExtractSubprocessor.Data(filename=filename,
                                     rects=[[x, y, x + w, y + h]],
                                     pitch_yaw_roll=pitch_yaw_roll)
        ]

    images_found = len(data)
    faces_detected = 0
    if len(data) > 0:
        io.log_info("Performing 2nd pass from csv file...")
        data = ExtractSubprocessor(data,
                                   'landmarks',
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only).run()

        io.log_info('Performing 3rd pass...')
        data = ExtractSubprocessor(data,
                                   'final',
                                   image_size,
                                   face_type,
                                   None,
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only,
                                   manual=False,
                                   final_output_path=output_path).run()
        faces_detected += sum([d.faces_detected for d in data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Example #12
0
def sort_final(input_path, include_by_blur=True):
    io.log_info("Performing final sort.")

    target_count = io.input_int("Target number of images? (default:2000) : ",
                                2000)

    img_list, trash_img_list = FinalLoaderSubprocessor(
        Path_utils.get_image_paths(input_path), include_by_blur).run()
    final_img_list = []

    grads = 128
    imgs_per_grad = round(target_count / grads)

    grads_space = np.linspace(-1.0, 1.0, grads)

    yaws_sample_list = [None] * grads
    for g in io.progress_bar_generator(range(grads), "Sort by yaw"):
        yaw = grads_space[g]
        next_yaw = grads_space[g + 1] if g < grads - 1 else yaw

        yaw_samples = []
        for img in img_list:
            s_yaw = -img[3]
            if (g == 0          and s_yaw < next_yaw) or \
               (g < grads-1     and s_yaw >= yaw and s_yaw < next_yaw) or \
               (g == grads-1    and s_yaw >= yaw):
                yaw_samples += [img]
        if len(yaw_samples) > 0:
            yaws_sample_list[g] = yaw_samples

    total_lack = 0
    for g in io.progress_bar_generator(range(grads), ""):
        img_list = yaws_sample_list[g]
        img_list_len = len(img_list) if img_list is not None else 0

        lack = imgs_per_grad - img_list_len
        total_lack += max(lack, 0)

    imgs_per_grad += total_lack // grads

    if include_by_blur:
        sharpned_imgs_per_grad = imgs_per_grad * 10
        for g in io.progress_bar_generator(range(grads), "Sort by blur"):
            img_list = yaws_sample_list[g]
            if img_list is None:
                continue

            img_list = sorted(img_list,
                              key=operator.itemgetter(1),
                              reverse=True)

            if len(img_list) > sharpned_imgs_per_grad:
                trash_img_list += img_list[sharpned_imgs_per_grad:]
                img_list = img_list[0:sharpned_imgs_per_grad]

            yaws_sample_list[g] = img_list

    yaw_pitch_sample_list = [None] * grads
    pitch_grads = imgs_per_grad

    for g in io.progress_bar_generator(range(grads), "Sort by pitch"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue

        pitch_sample_list = [None] * pitch_grads

        grads_space = np.linspace(-1.0, 1.0, pitch_grads)

        for pg in range(pitch_grads):

            pitch = grads_space[pg]
            next_pitch = grads_space[pg + 1] if pg < pitch_grads - 1 else pitch

            pitch_samples = []
            for img in img_list:
                s_pitch = img[4]
                if (pg == 0                and s_pitch < next_pitch) or \
                   (pg < pitch_grads-1     and s_pitch >= pitch and s_pitch < next_pitch) or \
                   (pg == pitch_grads-1    and s_pitch >= pitch):
                    pitch_samples += [img]

            if len(pitch_samples) > 0:
                pitch_sample_list[pg] = pitch_samples
        yaw_pitch_sample_list[g] = pitch_sample_list

    yaw_pitch_sample_list = FinalHistDissimSubprocessor(
        yaw_pitch_sample_list).run()

    for g in io.progress_bar_generator(range(grads), "Fetching the best"):
        pitch_sample_list = yaw_pitch_sample_list[g]
        if pitch_sample_list is None:
            continue

        n = imgs_per_grad

        while n > 0:
            n_prev = n
            for pg in range(pitch_grads):
                img_list = pitch_sample_list[pg]
                if img_list is None:
                    continue
                final_img_list += [img_list.pop(0)]
                if len(img_list) == 0:
                    pitch_sample_list[pg] = None
                n -= 1
                if n == 0:
                    break
            if n_prev == n:
                break

        for pg in range(pitch_grads):
            img_list = pitch_sample_list[pg]
            if img_list is None:
                continue
            trash_img_list += img_list

    return final_img_list, trash_img_list
Example #13
0
def relight(input_dir, lighten=None, random_one=None):
    if lighten is None:
        lighten = io.input_bool(
            "Lighten the faces? ( y/n default:n ?:help ) : ",
            False,
            help_message=
            "Lighten the faces instead of shadow. May produce artifacts.")

    if io.is_colab():
        io.log_info(
            "In colab version you cannot choose light directions manually.")
        manual = False
    else:
        manual = io.input_bool(
            "Choose light directions manually? ( y/n default:y ) : ", True)

    if not manual:
        if random_one is None:
            random_one = io.input_bool(
                "Relight the faces only with one random direction and random intensity? ( y/n default:y ?:help) : ",
                True,
                help_message=
                "Otherwise faceset will be relighted with predefined 7 light directions but with random intensity."
            )

    image_paths = [Path(x) for x in Path_utils.get_image_paths(input_dir)]
    filtered_image_paths = []
    for filepath in io.progress_bar_generator(image_paths,
                                              "Collecting fileinfo"):
        try:
            dflimg = DFLIMG.load(Path(filepath))

            if dflimg is None:
                io.log_err("%s is not a dfl image file" % (filepath.name))
            else:
                if not dflimg.get_relighted():
                    filtered_image_paths += [filepath]
        except:
            io.log_err(
                f"Exception occured while processing file {filepath.name}. Error: {traceback.format_exc()}"
            )
    image_paths = filtered_image_paths

    if len(image_paths) == 0:
        io.log_info("No files to process.")
        return

    dpr = DeepPortraitRelighting()

    if manual:
        alt_azi_ar = RelightEditor(image_paths, dpr, lighten).run()

    for filepath in io.progress_bar_generator(image_paths, "Relighting"):
        try:
            dflimg = DFLIMG.load(Path(filepath))
            if dflimg is None:
                io.log_err("%s is not a dfl image file" % (filepath.name))
                continue
            else:
                if dflimg.get_relighted():
                    continue
                img = cv2_imread(str(filepath))

                if random_one:
                    alt = np.random.randint(-90, 91)
                    azi = np.random.randint(-90, 91)
                    inten = np.random.random() * 0.3 + 0.3
                    relighted_imgs = [
                        dpr.relight(img,
                                    alt=alt,
                                    azi=azi,
                                    intensity=inten,
                                    lighten=lighten)
                    ]
                else:
                    if not manual and not random_one:
                        inten = np.random.random() * 0.3 + 0.3
                        alt_azi_ar = [(60, 0, inten), (60, 60, inten),
                                      (0, 60, inten), (-60, 60, inten),
                                      (-60, 0, inten), (-60, -60, inten),
                                      (0, -60, inten), (60, -60, inten)]

                    relighted_imgs = [
                        dpr.relight(img,
                                    alt=alt,
                                    azi=azi,
                                    intensity=inten,
                                    lighten=lighten)
                        for (alt, azi, inten) in alt_azi_ar
                    ]

                i = 0
                for i, relighted_img in enumerate(relighted_imgs):
                    im_flags = []
                    if filepath.suffix == '.jpg':
                        im_flags += [int(cv2.IMWRITE_JPEG_QUALITY), 100]

                    while True:
                        relighted_filepath = filepath.parent / (
                            filepath.stem + f'_relighted_{i}' +
                            filepath.suffix)
                        if not relighted_filepath.exists():
                            break
                        i += 1

                    cv2_imwrite(relighted_filepath, relighted_img)

                    dflimg.remove_source_filename()
                    dflimg.embed_and_set(relighted_filepath, relighted=True)
        except:
            io.log_err(
                f"Exception occured while processing file {filepath.name}. Error: {traceback.format_exc()}"
            )
Example #14
0
    def load(sample_type, samples_path, target_samples_path=None, person_id_mode=False, use_caching=False):
        cache = SampleLoader.cache

        if str(samples_path) not in cache.keys():
            cache[str(samples_path)] = [None]*SampleType.QTY

        datas = cache[str(samples_path)]

        if            sample_type == SampleType.IMAGE:
            if  datas[sample_type] is None:
                datas[sample_type] = [ Sample(filename=filename) for filename in io.progress_bar_generator( Path_utils.get_image_paths(samples_path), "Loading") ]
        elif          sample_type == SampleType.FACE:
            if  datas[sample_type] is None:
                
                if not use_caching:
                    datas[sample_type] = SampleLoader.upgradeToFaceSamples( [ Sample(filename=filename) for filename in Path_utils.get_image_paths(samples_path) ] )
                else:
                    samples_dat = samples_path / 'samples.dat'
                    if samples_dat.exists():
                        io.log_info (f"Using saved samples info from '{samples_dat}' ")
                        
                        all_samples = pickle.loads(samples_dat.read_bytes())
                    
                        if person_id_mode:
                            for samples in all_samples:
                                for sample in samples:
                                    sample.filename = str( samples_path / Path(sample.filename) )
                        else:
                            for sample in all_samples:
                                sample.filename = str( samples_path / Path(sample.filename) )
                            
                        datas[sample_type] = all_samples
                        
                    else:  
                        if person_id_mode:
                            dir_names = Path_utils.get_all_dir_names(samples_path)
                            all_samples = []
                            for i, dir_name in io.progress_bar_generator( [*enumerate(dir_names)] , "Loading"):
                                all_samples += [ SampleLoader.upgradeToFaceSamples( [ Sample(filename=filename, person_id=i) for filename in Path_utils.get_image_paths( samples_path / dir_name  ) ], silent=True ) ]
                            datas[sample_type] = all_samples
                        else:
                            datas[sample_type] = all_samples = SampleLoader.upgradeToFaceSamples( [ Sample(filename=filename) for filename in Path_utils.get_image_paths(samples_path) ] )

                        if person_id_mode:
                            for samples in all_samples:
                                for sample in samples:
                                    sample.filename = str(Path(sample.filename).relative_to(samples_path))
                        else:
                            for sample in all_samples:
                                sample.filename = str(Path(sample.filename).relative_to(samples_path))
                                
                        samples_dat.write_bytes (pickle.dumps(all_samples))
                        
                        if person_id_mode:
                            for samples in all_samples:
                                for sample in samples:
                                    sample.filename = str( samples_path / Path(sample.filename) )
                        else:
                            for sample in all_samples:
                                sample.filename = str( samples_path / Path(sample.filename) )
                            
        elif          sample_type == SampleType.FACE_TEMPORAL_SORTED:
            if  datas[sample_type] is None:
                datas[sample_type] = SampleLoader.upgradeToFaceTemporalSortedSamples( SampleLoader.load(SampleType.FACE, samples_path) )

        elif          sample_type == SampleType.FACE_YAW_SORTED:
            if  datas[sample_type] is None:
                datas[sample_type] = SampleLoader.upgradeToFaceYawSortedSamples( SampleLoader.load(SampleType.FACE, samples_path) )

        elif          sample_type == SampleType.FACE_YAW_SORTED_AS_TARGET:
            if  datas[sample_type] is None:
                if target_samples_path is None:
                    raise Exception('target_samples_path is None for FACE_YAW_SORTED_AS_TARGET')
                datas[sample_type] = SampleLoader.upgradeToFaceYawSortedAsTargetSamples( SampleLoader.load(SampleType.FACE_YAW_SORTED, samples_path), SampleLoader.load(SampleType.FACE_YAW_SORTED, target_samples_path) )

        return datas[sample_type]
Example #15
0
    def __init__(self, is_interactive, converter_session_filepath,
                 predictor_func, predictor_input_shape, converter_config,
                 frames, output_path, model_iter):
        if len(frames) == 0:
            raise ValueError("len (frames) == 0")

        super().__init__('Converter',
                         ConvertSubprocessor.Cli,
                         86400 if CONVERTER_DEBUG else 60,
                         io_loop_sleep_time=0.001,
                         initialize_subprocesses_in_serial=False)

        self.is_interactive = is_interactive
        self.converter_session_filepath = Path(converter_session_filepath)
        self.converter_config = converter_config

        #dummy predict and sleep, tensorflow caching kernels. If remove it, sometime conversion speed can be x2 slower
        predictor_func(dummy_predict=True)
        time.sleep(2)

        self.predictor_func_host, self.predictor_func = SubprocessFunctionCaller.make_pair(
            predictor_func)
        self.predictor_input_shape = predictor_input_shape

        self.dcscn = None
        self.ranksrgan = None

        def superres_func(mode, *args, **kwargs):
            if mode == 1:
                if self.ranksrgan is None:
                    self.ranksrgan = imagelib.RankSRGAN()
                return self.ranksrgan.upscale(*args, **kwargs)

        self.dcscn_host, self.superres_func = SubprocessFunctionCaller.make_pair(
            superres_func)

        self.output_path = output_path
        self.model_iter = model_iter

        self.prefetch_frame_count = self.process_count = min(
            6, multiprocessing.cpu_count())

        session_data = None
        if self.is_interactive and self.converter_session_filepath.exists():

            if io.input_bool("Use saved session? (y/n skip:y) : ", True):
                try:
                    with open(str(self.converter_session_filepath), "rb") as f:
                        session_data = pickle.loads(f.read())
                except Exception as e:
                    pass

        self.frames = frames
        self.frames_idxs = [*range(len(self.frames))]
        self.frames_done_idxs = []

        if self.is_interactive and session_data is not None:
            s_frames = session_data.get('frames', None)
            s_frames_idxs = session_data.get('frames_idxs', None)
            s_frames_done_idxs = session_data.get('frames_done_idxs', None)
            s_model_iter = session_data.get('model_iter', None)

            frames_equal = (s_frames is not None) and \
                           (s_frames_idxs is not None) and \
                           (s_frames_done_idxs is not None) and \
                           (s_model_iter is not None) and \
                           (len(frames) == len(s_frames))

            if frames_equal:
                for i in range(len(frames)):
                    frame = frames[i]
                    s_frame = s_frames[i]
                    if frame.frame_info.filename != s_frame.frame_info.filename:
                        frames_equal = False
                    if not frames_equal:
                        break

            if frames_equal:
                io.log_info(
                    'Using saved session from ' +
                    '/'.join(self.converter_session_filepath.parts[-2:]))

                for frame in s_frames:
                    if frame.cfg is not None:
                        #recreate ConverterConfig class using constructor with get_config() as dict params
                        #so if any new param will be added, old converter session will work properly
                        frame.cfg = frame.cfg.__class__(
                            **frame.cfg.get_config())

                self.frames = s_frames
                self.frames_idxs = s_frames_idxs
                self.frames_done_idxs = s_frames_done_idxs

                if self.model_iter != s_model_iter:
                    #model is more trained, recompute all frames
                    for frame in self.frames:
                        frame.is_done = False

                if self.model_iter != s_model_iter or \
                    len(self.frames_idxs) == 0:
                    #rewind to begin if model is more trained or all frames are done

                    while len(self.frames_done_idxs) > 0:
                        prev_frame = self.frames[self.frames_done_idxs.pop()]
                        self.frames_idxs.insert(0, prev_frame.idx)

                if len(self.frames_idxs) != 0:
                    cur_frame = self.frames[self.frames_idxs[0]]
                    cur_frame.is_shown = False

            if not frames_equal:
                session_data = None

        if session_data is None:
            for filename in Path_utils.get_image_paths(
                    self.output_path):  #remove all images in output_path
                Path(filename).unlink()

            frames[0].cfg = self.converter_config.copy()

        for i in range(len(self.frames)):
            frame = self.frames[i]
            frame.idx = i
            frame.output_filename = self.output_path / (
                Path(frame.frame_info.filename).stem + '.png')
Example #16
0
    def __init__(self,
                 model_path,
                 training_data_src_path=None,
                 training_data_dst_path=None,
                 debug=False,
                 device_args=None,
                 ask_write_preview_history=True,
                 ask_target_iter=True,
                 ask_batch_size=True,
                 ask_sort_by_yaw=True,
                 ask_random_flip=True,
                 ask_src_scale_mod=True):

        device_args['force_gpu_idx'] = device_args.get('force_gpu_idx', -1)
        device_args['cpu_only'] = device_args.get('cpu_only', False)

        if device_args['force_gpu_idx'] == -1 and not device_args['cpu_only']:
            idxs_names_list = nnlib.device.getValidDevicesIdxsWithNamesList()
            if len(idxs_names_list) > 1:
                io.log_info("You have multi GPUs in a system: ")
                for idx, name in idxs_names_list:
                    io.log_info("[%d] : %s" % (idx, name))

                device_args['force_gpu_idx'] = io.input_int(
                    "Which GPU idx to choose? ( skip: best GPU ) : ", -1,
                    [x[0] for x in idxs_names_list])
        self.device_args = device_args

        self.device_config = nnlib.DeviceConfig(allow_growth=False,
                                                **self.device_args)

        io.log_info("Loading model...")

        self.model_path = model_path
        self.model_data_path = Path(
            self.get_strpath_storage_for_file('data.dat'))

        self.training_data_src_path = training_data_src_path
        self.training_data_dst_path = training_data_dst_path

        self.src_images_paths = None
        self.dst_images_paths = None
        self.src_yaw_images_paths = None
        self.dst_yaw_images_paths = None
        self.src_data_generator = None
        self.dst_data_generator = None
        self.debug = debug
        self.is_training_mode = (training_data_src_path is not None
                                 and training_data_dst_path is not None)

        self.iter = 0
        self.options = {}
        self.loss_history = []
        self.sample_for_preview = None

        model_data = {}
        if self.model_data_path.exists():
            model_data = pickle.loads(self.model_data_path.read_bytes())
            self.iter = max(model_data.get('iter', 0),
                            model_data.get('epoch', 0))
            if 'epoch' in self.options:
                self.options.pop('epoch')
            if self.iter != 0:
                self.options = model_data['options']
                self.loss_history = model_data[
                    'loss_history'] if 'loss_history' in model_data.keys(
                    ) else []
                self.sample_for_preview = model_data[
                    'sample_for_preview'] if 'sample_for_preview' in model_data.keys(
                    ) else None

        ask_override = self.is_training_mode and self.iter != 0 and io.input_in_time(
            "Press enter in 2 seconds to override model settings.", 2)

        yn_str = {True: 'y', False: 'n'}

        if self.iter == 0:
            io.log_info(
                "\nModel first run. Enter model options as default for each run."
            )

        if ask_write_preview_history and (self.iter == 0 or ask_override):
            default_write_preview_history = False if self.iter == 0 else self.options.get(
                'write_preview_history', False)
            self.options['write_preview_history'] = io.input_bool(
                "Write preview history? (y/n ?:help skip:%s) : " %
                (yn_str[default_write_preview_history]),
                default_write_preview_history,
                help_message=
                "Preview history will be writed to <ModelName>_history folder."
            )
        else:
            self.options['write_preview_history'] = self.options.get(
                'write_preview_history', False)

        if ask_target_iter:
            if (self.iter == 0 or ask_override):
                self.options['target_iter'] = max(
                    0,
                    io.input_int(
                        "Target iteration (skip:unlimited/default) : ", 0))
            else:
                self.options['target_iter'] = max(
                    model_data.get('target_iter', 0),
                    self.options.get('target_epoch', 0))
                if 'target_epoch' in self.options:
                    self.options.pop('target_epoch')

        if ask_batch_size and (self.iter == 0 or ask_override):
            default_batch_size = 0 if self.iter == 0 else self.options.get(
                'batch_size', 0)
            self.options['batch_size'] = max(
                0,
                io.input_int(
                    "Batch_size (?:help skip:%d) : " % (default_batch_size),
                    default_batch_size,
                    help_message=
                    "Larger batch size is always better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."
                ))
        else:
            self.options['batch_size'] = self.options.get('batch_size', 0)

        if ask_sort_by_yaw:
            if (self.iter == 0):
                self.options['sort_by_yaw'] = io.input_bool(
                    "Feed faces to network sorted by yaw? (y/n ?:help skip:n) : ",
                    False,
                    help_message=
                    "NN will not learn src face directions that don't match dst face directions. Do not use if the dst face has hair that covers the jaw."
                )
            else:
                self.options['sort_by_yaw'] = self.options.get(
                    'sort_by_yaw', False)

        if ask_random_flip:
            if (self.iter == 0):
                self.options['random_flip'] = io.input_bool(
                    "Flip faces randomly? (y/n ?:help skip:y) : ",
                    True,
                    help_message=
                    "Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset."
                )
            else:
                self.options['random_flip'] = self.options.get(
                    'random_flip', True)

        if ask_src_scale_mod:
            if (self.iter == 0):
                self.options['src_scale_mod'] = np.clip(
                    io.input_int(
                        "Src face scale modifier % ( -30...30, ?:help skip:0) : ",
                        0,
                        help_message=
                        "If src face shape is wider than dst, try to decrease this value to get a better result."
                    ), -30, 30)
            else:
                self.options['src_scale_mod'] = self.options.get(
                    'src_scale_mod', 0)

        self.write_preview_history = self.options.get('write_preview_history',
                                                      False)
        if not self.write_preview_history and 'write_preview_history' in self.options:
            self.options.pop('write_preview_history')

        self.target_iter = self.options.get('target_iter', 0)
        if self.target_iter == 0 and 'target_iter' in self.options:
            self.options.pop('target_iter')

        self.batch_size = self.options.get('batch_size', 0)
        self.sort_by_yaw = self.options.get('sort_by_yaw', False)
        self.random_flip = self.options.get('random_flip', True)

        self.src_scale_mod = self.options.get('src_scale_mod', 0)
        if self.src_scale_mod == 0 and 'src_scale_mod' in self.options:
            self.options.pop('src_scale_mod')

        self.onInitializeOptions(self.iter == 0, ask_override)

        nnlib.import_all(self.device_config)
        self.keras = nnlib.keras
        self.K = nnlib.keras.backend

        self.onInitialize()

        self.options['batch_size'] = self.batch_size

        if self.debug or self.batch_size == 0:
            self.batch_size = 1

        if self.is_training_mode:
            if self.device_args['force_gpu_idx'] == -1:
                self.preview_history_path = self.model_path / (
                    '%s_history' % (self.get_model_name()))
            else:
                self.preview_history_path = self.model_path / (
                    '%d_%s_history' %
                    (self.device_args['force_gpu_idx'], self.get_model_name()))

            if self.write_preview_history or io.is_colab():
                if not self.preview_history_path.exists():
                    self.preview_history_path.mkdir(exist_ok=True)
                else:
                    if self.iter == 0:
                        for filename in Path_utils.get_image_paths(
                                self.preview_history_path):
                            Path(filename).unlink()

            if self.generator_list is None:
                raise ValueError('You didnt set_training_data_generators()')
            else:
                for i, generator in enumerate(self.generator_list):
                    if not isinstance(generator, SampleGeneratorBase):
                        raise ValueError(
                            'training data generator is not subclass of SampleGeneratorBase'
                        )

            if (self.sample_for_preview is None) or (self.iter == 0):
                self.sample_for_preview = self.generate_next_sample()

        model_summary_text = []

        model_summary_text += ["===== Model summary ====="]
        model_summary_text += ["== Model name: " + self.get_model_name()]
        model_summary_text += ["=="]
        model_summary_text += ["== Current iteration: " + str(self.iter)]
        model_summary_text += ["=="]
        model_summary_text += ["== Model options:"]
        for key in self.options.keys():
            model_summary_text += ["== |== %s : %s" % (key, self.options[key])]

        if self.device_config.multi_gpu:
            model_summary_text += ["== |== multi_gpu : True "]

        model_summary_text += ["== Running on:"]
        if self.device_config.cpu_only:
            model_summary_text += ["== |== [CPU]"]
        else:
            for idx in self.device_config.gpu_idxs:
                model_summary_text += [
                    "== |== [%d : %s]" % (idx, nnlib.device.getDeviceName(idx))
                ]

        if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[
                0] == 2:
            model_summary_text += ["=="]
            model_summary_text += [
                "== WARNING: You are using 2GB GPU. Result quality may be significantly decreased."
            ]
            model_summary_text += [
                "== If training does not start, close all programs and try again."
            ]
            model_summary_text += [
                "== Also you can disable Windows Aero Desktop to get extra free VRAM."
            ]
            model_summary_text += ["=="]

        model_summary_text += ["========================="]
        model_summary_text = "\r\n".join(model_summary_text)
        self.model_summary_text = model_summary_text
        io.log_info(model_summary_text)
Example #17
0
def main(args, device_args):
    io.log_info("Running converter.\r\n")

    training_data_src_dir = args.get('training_data_src_dir', None)
    training_data_src_path = Path(
        training_data_src_dir) if training_data_src_dir is not None else None
    aligned_dir = args.get('aligned_dir', None)
    avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)

    try:
        input_path = Path(args['input_dir'])
        output_path = Path(args['output_dir'])
        model_path = Path(args['model_dir'])

        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not model_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        is_interactive = io.input_bool(
            "Use interactive converter? (y/n skip:y) : ",
            True) if not io.is_colab() else False

        import models
        model = models.import_model(args['model_name'])(
            model_path,
            device_args=device_args,
            training_data_src_path=training_data_src_path)
        converter_session_filepath = model.get_strpath_storage_for_file(
            'converter_session.dat')
        predictor_func, predictor_input_shape, cfg = model.get_ConverterConfig(
        )

        if not is_interactive:
            cfg.ask_settings()

        input_path_image_paths = Path_utils.get_image_paths(input_path)

        if cfg.type == ConverterConfig.TYPE_MASKED:
            if aligned_dir is None:
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            aligned_path = Path(aligned_dir)
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            alignments = {}
            multiple_faces_detected = False
            aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
            for filepath in io.progress_bar_generator(aligned_path_image_paths,
                                                      "Collecting alignments"):
                filepath = Path(filepath)

                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load(str(filepath))
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load(str(filepath))
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue

                source_filename = dflimg.get_source_filename()
                if source_filename is None or source_filename == "_":
                    continue

                source_filename = Path(source_filename)
                source_filename_stem = source_filename.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(dflimg.get_source_landmarks())
                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Strongly recommended to process them separately."
                )

            frames = [
                ConvertSubprocessor.Frame(frame_info=FrameInfo(
                    filename=p,
                    landmarks_list=alignments.get(Path(p).stem, None)))
                for p in input_path_image_paths
            ]

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        elif cfg.type == ConverterConfig.TYPE_FACE_AVATAR:
            filesdata = []
            for filepath in io.progress_bar_generator(input_path_image_paths,
                                                      "Collecting info"):
                filepath = Path(filepath)

                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load(str(filepath))
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load(str(filepath))
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue
                filesdata += [
                    (FrameInfo(filename=str(filepath),
                               landmarks_list=[dflimg.get_landmarks()]),
                     dflimg.get_source_filename())
                ]

            filesdata = sorted(filesdata,
                               key=operator.itemgetter(1))  #sort by filename
            frames = []
            filesdata_len = len(filesdata)
            for i in range(len(filesdata)):
                frame_info = filesdata[i][0]

                prev_temporal_frame_infos = []
                next_temporal_frame_infos = []

                for t in range(cfg.temporal_face_count):
                    prev_frame_info = filesdata[max(i - t, 0)][0]
                    next_frame_info = filesdata[min(i + t,
                                                    filesdata_len - 1)][0]

                    prev_temporal_frame_infos.insert(0, prev_frame_info)
                    next_temporal_frame_infos.append(next_frame_info)

                frames.append(
                    ConvertSubprocessor.Frame(
                        prev_temporal_frame_infos=prev_temporal_frame_infos,
                        frame_info=frame_info,
                        next_temporal_frame_infos=next_temporal_frame_infos))

        if len(frames) == 0:
            io.log_info("No frames to convert in input_dir.")
        else:
            ConvertSubprocessor(
                is_interactive=is_interactive,
                converter_session_filepath=converter_session_filepath,
                predictor_func=predictor_func,
                predictor_input_shape=predictor_input_shape,
                converter_config=cfg,
                frames=frames,
                output_path=output_path,
                model_iter=model.get_iter()).run()

        model.finalize()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Example #18
0
def main(input_dir, output_dir):
    input_path = Path(input_dir)
    output_path = Path(output_dir)

    if not input_path.exists():
        raise ValueError('Input directory not found. Please ensure it exists.')

    if not output_path.exists():
        output_path.mkdir(parents=True)

    wnd_name = "Labeling tool"
    io.named_window(wnd_name)
    io.capture_mouse(wnd_name)
    io.capture_keys(wnd_name)

    #for filename in io.progress_bar_generator (Path_utils.get_image_paths(input_path), desc="Labeling"):
    for filename in Path_utils.get_image_paths(input_path):
        filepath = Path(filename)

        if filepath.suffix == '.png':
            dflimg = DFLPNG.load(str(filepath))
        elif filepath.suffix == '.jpg':
            dflimg = DFLJPG.load(str(filepath))
        else:
            dflimg = None

        if dflimg is None:
            io.log_err("%s is not a dfl image file" % (filepath.name))
            continue

        lmrks = dflimg.get_landmarks()
        lmrks_list = lmrks.tolist()
        orig_img = cv2_imread(str(filepath))
        h, w, c = orig_img.shape

        mask_orig = LandmarksProcessor.get_image_hull_mask(
            orig_img.shape, lmrks).astype(np.uint8)[:, :, 0]
        ero_dil_rate = w // 8
        mask_ero = cv2.erode(
            mask_orig,
            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                      (ero_dil_rate, ero_dil_rate)),
            iterations=1)
        mask_dil = cv2.dilate(mask_orig,
                              cv2.getStructuringElement(
                                  cv2.MORPH_ELLIPSE,
                                  (ero_dil_rate, ero_dil_rate)),
                              iterations=1)

        #mask_bg = np.zeros(orig_img.shape[:2],np.uint8)
        mask_bg = 1 - mask_dil
        mask_bgp = np.ones(orig_img.shape[:2],
                           np.uint8)  #default - all background possible
        mask_fg = np.zeros(orig_img.shape[:2], np.uint8)
        mask_fgp = np.zeros(orig_img.shape[:2], np.uint8)

        img = orig_img.copy()

        l_thick = 2

        def draw_4_lines(masks_out, pts, thickness=1):
            fgp, fg, bg, bgp = masks_out
            h, w = fg.shape

            fgp_pts = []
            fg_pts = np.array([pts[i:i + 2] for i in range(len(pts) - 1)])
            bg_pts = []
            bgp_pts = []

            for i in range(len(fg_pts)):
                a, b = line = fg_pts[i]

                ba = b - a
                v = ba / npl.norm(ba)

                ccpv = np.array([v[1], -v[0]])
                cpv = np.array([-v[1], v[0]])
                step = 1 / max(np.abs(cpv))

                fgp_pts.append(
                    np.clip(line + ccpv * step * thickness, 0,
                            w - 1).astype(np.int))
                bg_pts.append(
                    np.clip(line + cpv * step * thickness, 0,
                            w - 1).astype(np.int))
                bgp_pts.append(
                    np.clip(line + cpv * step * thickness * 2, 0,
                            w - 1).astype(np.int))

            fgp_pts = np.array(fgp_pts)
            bg_pts = np.array(bg_pts)
            bgp_pts = np.array(bgp_pts)

            cv2.polylines(fgp, fgp_pts, False, (1, ), thickness=thickness)
            cv2.polylines(fg, fg_pts, False, (1, ), thickness=thickness)
            cv2.polylines(bg, bg_pts, False, (1, ), thickness=thickness)
            cv2.polylines(bgp, bgp_pts, False, (1, ), thickness=thickness)

        def draw_lines(masks_steps, pts, thickness=1):
            lines = np.array([pts[i:i + 2] for i in range(len(pts) - 1)])

            for mask, step in masks_steps:
                h, w = mask.shape

                mask_lines = []
                for i in range(len(lines)):
                    a, b = line = lines[i]
                    ba = b - a
                    ba_len = npl.norm(ba)
                    if ba_len != 0:
                        v = ba / ba_len
                        pv = np.array([-v[1], v[0]])
                        pv_inv_max = 1 / max(np.abs(pv))
                        mask_lines.append(
                            np.clip(line + pv * pv_inv_max * thickness * step,
                                    0, w - 1).astype(np.int))
                    else:
                        mask_lines.append(np.array(line, dtype=np.int))
                cv2.polylines(mask,
                              mask_lines,
                              False, (1, ),
                              thickness=thickness)

        def draw_fill_convex(mask_out, pts, scale=1.0):
            hull = cv2.convexHull(np.array(pts))

            if scale != 1.0:
                pts_count = hull.shape[0]

                sum_x = np.sum(hull[:, 0, 0])
                sum_y = np.sum(hull[:, 0, 1])

                hull_center = np.array([sum_x / pts_count, sum_y / pts_count])
                hull = hull_center + (hull - hull_center) * scale
                hull = hull.astype(pts.dtype)
            cv2.fillConvexPoly(mask_out, hull, (1, ))

        def get_gc_mask_bgr(gc_mask):
            h, w = gc_mask.shape
            bgr = np.zeros((h, w, 3), dtype=np.uint8)

            bgr[gc_mask == 0] = (0, 0, 0)
            bgr[gc_mask == 1] = (255, 255, 255)
            bgr[gc_mask == 2] = (0, 0, 255)  #RED
            bgr[gc_mask == 3] = (0, 255, 0)  #GREEN
            return bgr

        def get_gc_mask_result(gc_mask):
            return np.where((gc_mask == 1) + (gc_mask == 3), 1,
                            0).astype(np.int)

        #convex inner of right chin to end of right eyebrow
        #draw_fill_convex ( mask_fgp, lmrks_list[8:17]+lmrks_list[26:27] )

        #convex inner of start right chin to right eyebrow
        #draw_fill_convex ( mask_fgp, lmrks_list[8:9]+lmrks_list[22:27] )

        #convex inner of nose
        draw_fill_convex(mask_fgp, lmrks[27:36])

        #convex inner of nose half
        draw_fill_convex(mask_fg, lmrks[27:36], scale=0.5)

        #left corner of mouth to left corner of nose
        #draw_lines ( [ (mask_fg,0),   ], lmrks_list[49:50]+lmrks_list[32:33], l_thick)

        #convex inner: right corner of nose to centers of eyebrows
        #draw_fill_convex ( mask_fgp, lmrks_list[35:36]+lmrks_list[19:20]+lmrks_list[24:25])

        #right corner of mouth to right corner of nose
        #draw_lines ( [ (mask_fg,0),   ], lmrks_list[54:55]+lmrks_list[35:36], l_thick)

        #left eye
        #draw_fill_convex ( mask_fg, lmrks_list[36:40] )
        #right eye
        #draw_fill_convex ( mask_fg, lmrks_list[42:48] )

        #right chin
        draw_lines([
            (mask_bg, 0),
            (mask_fg, -1),
        ], lmrks[8:17], l_thick)

        #left eyebrow center to right eyeprow center
        draw_lines([
            (mask_bg, -1),
            (mask_fg, 0),
        ], lmrks_list[19:20] + lmrks_list[24:25], l_thick)
        #        #draw_lines ( [ (mask_bg,-1), (mask_fg,0),   ], lmrks_list[24:25] + lmrks_list[19:17:-1], l_thick)

        #half right eyebrow to end of right chin
        draw_lines([
            (mask_bg, -1),
            (mask_fg, 0),
        ], lmrks_list[24:27] + lmrks_list[16:17], l_thick)

        #import code
        #code.interact(local=dict(globals(), **locals()))

        #compose mask layers
        gc_mask = np.zeros(orig_img.shape[:2], np.uint8)
        gc_mask[mask_bgp == 1] = 2
        gc_mask[mask_fgp == 1] = 3
        gc_mask[mask_bg == 1] = 0
        gc_mask[mask_fg == 1] = 1

        gc_bgr_before = get_gc_mask_bgr(gc_mask)

        #io.show_image (wnd_name, gc_mask )

        ##points, hierarcy = cv2.findContours(original_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        ##gc_mask = ( (1-erode_mask)*2 + erode_mask )# * dilate_mask
        #gc_mask = (1-erode_mask)*2 + erode_mask
        #cv2.addWeighted(
        #gc_mask = mask_0_27 + (1-mask_0_27)*2
        #
        ##import code
        ##code.interact(local=dict(globals(), **locals()))
        #
        #rect = (1,1,img.shape[1]-2,img.shape[0]-2)
        #
        #
        cv2.grabCut(img, gc_mask, None, np.zeros((1, 65), np.float64),
                    np.zeros((1, 65), np.float64), 5, cv2.GC_INIT_WITH_MASK)

        gc_bgr = get_gc_mask_bgr(gc_mask)
        gc_mask_result = get_gc_mask_result(gc_mask)
        gc_mask_result_1 = gc_mask_result[:, :, np.newaxis]

        #import code
        #code.interact(local=dict(globals(), **locals()))
        orig_img_gc_layers_masked = (0.5 * orig_img + 0.5 * gc_bgr).astype(
            np.uint8)
        orig_img_gc_before_layers_masked = (0.5 * orig_img +
                                            0.5 * gc_bgr_before).astype(
                                                np.uint8)

        pink_bg = np.full(orig_img.shape, (255, 0, 255), dtype=np.uint8)

        orig_img_result = orig_img * gc_mask_result_1
        orig_img_result_pinked = orig_img_result + pink_bg * (1 -
                                                              gc_mask_result_1)

        #io.show_image (wnd_name, blended_img)

        ##gc_mask, bgdModel, fgdModel =
        #
        #mask2 = np.where((gc_mask==1) + (gc_mask==3),255,0).astype('uint8')[:,:,np.newaxis]
        #mask2 = np.repeat(mask2, (3,), -1)
        #
        ##mask2 = np.where(gc_mask!=0,255,0).astype('uint8')
        #blended_img = orig_img #-\
        #              #0.3 * np.full(original_img.shape, (50,50,50)) * (1-mask_0_27)[:,:,np.newaxis]
        #              #0.3 * np.full(original_img.shape, (50,50,50)) * (1-dilate_mask)[:,:,np.newaxis] +\
        #              #0.3 * np.full(original_img.shape, (50,50,50)) * (erode_mask)[:,:,np.newaxis]
        #blended_img = np.clip(blended_img, 0, 255).astype(np.uint8)
        ##import code
        ##code.interact(local=dict(globals(), **locals()))
        orig_img_lmrked = orig_img.copy()
        LandmarksProcessor.draw_landmarks(orig_img_lmrked,
                                          lmrks,
                                          transparent_mask=True)

        screen = np.concatenate([
            orig_img_gc_before_layers_masked,
            orig_img_gc_layers_masked,
            orig_img,
            orig_img_lmrked,
            orig_img_result_pinked,
            orig_img_result,
        ],
                                axis=1)

        io.show_image(wnd_name, screen.astype(np.uint8))

        while True:
            io.process_messages()

            for (x, y, ev, flags) in io.get_mouse_events(wnd_name):
                pass
                #print (x,y,ev,flags)

            key_events = [ev for ev, in io.get_key_events(wnd_name)]
            for key in key_events:
                if key == ord('1'):
                    pass
                if key == ord('2'):
                    pass
                if key == ord('3'):
                    pass

            if ord(' ') in key_events:
                break

    import code
    code.interact(local=dict(globals(), **locals()))


#original_mask = np.ones(original_img.shape[:2],np.uint8)*2
#cv2.drawContours(original_mask, points, -1, (1,), 1)
Example #19
0
def sort_by_hist(input_path):
    io.log_info("根据[histogram similarity]排序...")
    img_list = HistSsimSubprocessor(
        Path_utils.get_image_paths(input_path)).run()
    return img_list
Example #20
0
def main(args, device_args):
    io.log_info("Running converter.\r\n")

    aligned_dir = args.get('aligned_dir', None)

    try:
        input_path = Path(args['input_dir'])
        output_path = Path(args['output_dir'])
        model_path = Path(args['model_dir'])

        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)

        if not model_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        import models
        model = models.import_model(args['model_name'])(
            model_path, device_args=device_args)
        converter = model.get_converter()
        converter.dummy_predict()

        alignments = None

        if converter.type == Converter.TYPE_FACE:
            if aligned_dir is None:
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            aligned_path = Path(aligned_dir)
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            alignments = {}

            aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
            for filepath in io.progress_bar_generator(aligned_path_image_paths,
                                                      "Collecting alignments"):
                filepath = Path(filepath)

                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load(str(filepath))
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load(str(filepath))
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue

                source_filename_stem = Path(dflimg.get_source_filename()).stem
                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments[source_filename_stem].append(
                    dflimg.get_source_landmarks())

        files_processed, faces_processed = ConvertSubprocessor(
            converter=converter,
            input_path_image_paths=Path_utils.get_image_paths(input_path),
            output_path=output_path,
            alignments=alignments,
            debug=args.get('debug', False)).run()

        model.finalize()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Example #21
0
def sort_by_hue(input_path):
    io.log_info ("Sorting by hue...")
    img_list = [ [x, np.mean ( cv2.cvtColor(cv2_imread(x), cv2.COLOR_BGR2HSV)[...,0].flatten()  )] for x in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading") ]
    io.log_info ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)    
    return img_list
Example #22
0
    def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, pretraining_data_path=None, is_training=False, debug = False, device_args = None,
                 ask_enable_autobackup=True,
                 ask_write_preview_history=True,
                 ask_target_iter=True,
                 ask_batch_size=True,
                 ask_random_flip=True, **kwargs):

        device_args['force_gpu_idx'] = device_args.get('force_gpu_idx',-1)
        device_args['cpu_only'] = True if debug else device_args.get('cpu_only',False)

        if device_args['force_gpu_idx'] == -1 and not device_args['cpu_only']:
            idxs_names_list = nnlib.device.getValidDevicesIdxsWithNamesList()
            if len(idxs_names_list) > 1:
                io.log_info ("You have multi GPUs in a system: ")
                for idx, name in idxs_names_list:
                    io.log_info ("[%d] : %s" % (idx, name) )

                device_args['force_gpu_idx'] = io.input_int("Which GPU idx to choose? ( skip: best GPU ) : ", -1, [ x[0] for x in idxs_names_list] )
        self.device_args = device_args

        self.device_config = nnlib.DeviceConfig(allow_growth=True, **self.device_args)

        io.log_info ("Loading model...")

        self.model_path = model_path
        self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )

        self.training_data_src_path = training_data_src_path
        self.training_data_dst_path = training_data_dst_path
        self.pretraining_data_path = pretraining_data_path

        self.debug = debug
        self.is_training_mode = is_training

        self.iter = 0
        self.options = {}
        self.loss_history = []
        self.sample_for_preview = None

        model_data = {}
        if self.model_data_path.exists():
            model_data = pickle.loads ( self.model_data_path.read_bytes() )
            self.iter = max( model_data.get('iter',0), model_data.get('epoch',0) )
            if 'epoch' in self.options:
                self.options.pop('epoch')
            if self.iter != 0:
                self.options = model_data['options']
                self.loss_history = model_data.get('loss_history', [])
                self.sample_for_preview = model_data.get('sample_for_preview', None)

        ask_override = self.is_training_mode and self.iter != 0 and io.input_in_time ("Press enter in 2 seconds to override model settings.", 5 if io.is_colab() else 2 )

        yn_str = {True:'y',False:'n'}

        if self.iter == 0:
            io.log_info ("\nModel first run.")

        if ask_enable_autobackup and (self.iter == 0 or ask_override):
            default_autobackup = False if self.iter == 0 else self.options.get('autobackup',False)
            self.options['autobackup'] = io.input_bool("Enable autobackup? (y/n ?:help skip:%s) : " % (yn_str[default_autobackup]) , default_autobackup, help_message="Autobackup model files with preview every hour for last 15 hours. Latest backup located in model/<>_autobackups/01")
        else:
            self.options['autobackup'] = self.options.get('autobackup', False)

        if ask_write_preview_history and (self.iter == 0 or ask_override):
            default_write_preview_history = False if self.iter == 0 else self.options.get('write_preview_history',False)
            self.options['write_preview_history'] = io.input_bool("Write preview history? (y/n ?:help skip:%s) : " % (yn_str[default_write_preview_history]) , default_write_preview_history, help_message="Preview history will be writed to <ModelName>_history folder.")
        else:
            self.options['write_preview_history'] = self.options.get('write_preview_history', False)

        if (self.iter == 0 or ask_override) and self.options['write_preview_history'] and io.is_support_windows():
            choose_preview_history = io.input_bool("Choose image for the preview history? (y/n skip:%s) : " % (yn_str[False]) , False)
        elif (self.iter == 0 or ask_override) and self.options['write_preview_history'] and io.is_colab():
            choose_preview_history = io.input_bool("Randomly choose new image for preview history? (y/n ?:help skip:%s) : " % (yn_str[False]), False, help_message="Preview image history will stay stuck with old faces if you reuse the same model on different celebs. Choose no unless you are changing src/dst to a new person")
        else:
            choose_preview_history = False

        if ask_target_iter:
            if (self.iter == 0 or ask_override):
                self.options['target_iter'] = max(0, io.input_int("Target iteration (skip:unlimited/default) : ", 0))
            else:
                self.options['target_iter'] = max(model_data.get('target_iter',0), self.options.get('target_epoch',0))
                if 'target_epoch' in self.options:
                    self.options.pop('target_epoch')

        if ask_batch_size and (self.iter == 0 or ask_override):
            default_batch_size = 0 if self.iter == 0 else self.options.get('batch_size',0)
            self.batch_size = max(0, io.input_int("Batch_size (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message="Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."))
        else:
            self.batch_size = self.options.get('batch_size', 0)

        if ask_random_flip:
            default_random_flip = self.options.get('random_flip', True)
            if (self.iter == 0 or ask_override):
                self.options['random_flip'] = io.input_bool(f"Flip faces randomly? (y/n ?:help skip:{yn_str[default_random_flip]}) : ", default_random_flip, help_message="Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset.")
            else:
                self.options['random_flip'] = self.options.get('random_flip', default_random_flip)

        self.autobackup = self.options.get('autobackup', False)
        if not self.autobackup and 'autobackup' in self.options:
            self.options.pop('autobackup')

        self.write_preview_history = self.options.get('write_preview_history', False)
        if not self.write_preview_history and 'write_preview_history' in self.options:
            self.options.pop('write_preview_history')

        self.target_iter = self.options.get('target_iter',0)
        if self.target_iter == 0 and 'target_iter' in self.options:
            self.options.pop('target_iter')

        #self.batch_size = self.options.get('batch_size',0)
        self.sort_by_yaw = self.options.get('sort_by_yaw',False)
        self.random_flip = self.options.get('random_flip',True)

        self.onInitializeOptions(self.iter == 0, ask_override)

        nnlib.import_all(self.device_config)
        self.keras = nnlib.keras
        self.K = nnlib.keras.backend

        self.onInitialize()

        self.options['batch_size'] = self.batch_size

        if self.debug or self.batch_size == 0:
            self.batch_size = 1

        if self.is_training_mode:
            if self.device_args['force_gpu_idx'] == -1:
                self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name()) )
                self.autobackups_path = self.model_path / ( '%s_autobackups' % (self.get_model_name()) )
            else:
                self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.device_args['force_gpu_idx'], self.get_model_name()) )
                self.autobackups_path = self.model_path / ( '%d_%s_autobackups' % (self.device_args['force_gpu_idx'], self.get_model_name()) )

            if self.autobackup:
                self.autobackup_current_hour = time.localtime().tm_hour

                if not self.autobackups_path.exists():
                    self.autobackups_path.mkdir(exist_ok=True)

            if self.write_preview_history or io.is_colab():
                if not self.preview_history_path.exists():
                    self.preview_history_path.mkdir(exist_ok=True)
                else:
                    if self.iter == 0:
                        for filename in Path_utils.get_image_paths(self.preview_history_path):
                            Path(filename).unlink()

            if self.generator_list is None:
                raise ValueError( 'You didnt set_training_data_generators()')
            else:
                for i, generator in enumerate(self.generator_list):
                    if not isinstance(generator, SampleGeneratorBase):
                        raise ValueError('training data generator is not subclass of SampleGeneratorBase')

            if self.sample_for_preview is None or choose_preview_history:
                if choose_preview_history and io.is_support_windows():
                    io.log_info ("Choose image for the preview history. [p] - next. [enter] - confirm.")
                    wnd_name = "[p] - next. [enter] - confirm."
                    io.named_window(wnd_name)
                    io.capture_keys(wnd_name)
                    choosed = False
                    while not choosed:
                        self.sample_for_preview = self.generate_next_sample()
                        preview = self.get_static_preview()
                        io.show_image( wnd_name, (preview*255).astype(np.uint8) )

                        while True:
                            key_events = io.get_key_events(wnd_name)
                            key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[-1] if len(key_events) > 0 else (0,0,False,False,False)
                            if key == ord('\n') or key == ord('\r'):
                                choosed = True
                                break
                            elif key == ord('p'):
                                break

                            try:
                                io.process_messages(0.1)
                            except KeyboardInterrupt:
                                choosed = True

                    io.destroy_window(wnd_name)
                else:
                    self.sample_for_preview = self.generate_next_sample()

            try:
                self.get_static_preview()
            except:
                self.sample_for_preview = self.generate_next_sample()

            self.last_sample = self.sample_for_preview

        ###Generate text summary of model hyperparameters
        #Find the longest key name and value string. Used as column widths.
        width_name = max([len(k) for k in self.options.keys()] + [17]) + 1 # Single space buffer to left edge. Minimum of 17, the length of the longest static string used "Current iteration"
        width_value = max([len(str(x)) for x in self.options.values()] + [len(str(self.iter)), len(self.get_model_name())]) + 1 # Single space buffer to right edge
        if not self.device_config.cpu_only: #Check length of GPU names
            width_value = max([len(nnlib.device.getDeviceName(idx))+1 for idx in self.device_config.gpu_idxs] + [width_value])
        width_total = width_name + width_value + 2 #Plus 2 for ": "

        model_summary_text = []
        model_summary_text += [f'=={" Model Summary ":=^{width_total}}=='] # Model/status summary
        model_summary_text += [f'=={" "*width_total}==']
        model_summary_text += [f'=={"Model name": >{width_name}}: {self.get_model_name(): <{width_value}}=='] # Name
        model_summary_text += [f'=={" "*width_total}==']
        model_summary_text += [f'=={"Current iteration": >{width_name}}: {str(self.iter): <{width_value}}=='] # Iter
        model_summary_text += [f'=={" "*width_total}==']

        model_summary_text += [f'=={" Model Options ":-^{width_total}}=='] # Model options
        model_summary_text += [f'=={" "*width_total}==']
        for key in self.options.keys():
            model_summary_text += [f'=={key: >{width_name}}: {str(self.options[key]): <{width_value}}=='] # self.options key/value pairs
        model_summary_text += [f'=={" "*width_total}==']

        model_summary_text += [f'=={" Running On ":-^{width_total}}=='] # Training hardware info
        model_summary_text += [f'=={" "*width_total}==']
        if self.device_config.multi_gpu:
            model_summary_text += [f'=={"Using multi_gpu": >{width_name}}: {"True": <{width_value}}=='] # multi_gpu
            model_summary_text += [f'=={" "*width_total}==']
        if self.device_config.cpu_only:
            model_summary_text += [f'=={"Using device": >{width_name}}: {"CPU": <{width_value}}=='] # cpu_only
        else:
            for idx in self.device_config.gpu_idxs:
                model_summary_text += [f'=={"Device index": >{width_name}}: {idx: <{width_value}}=='] # GPU hardware device index
                model_summary_text += [f'=={"Name": >{width_name}}: {nnlib.device.getDeviceName(idx): <{width_value}}=='] # GPU name
                vram_str = f'{nnlib.device.getDeviceVRAMTotalGb(idx):.2f}GB' # GPU VRAM - Formated as #.## (or ##.##)
                model_summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}==']
        model_summary_text += [f'=={" "*width_total}==']
        model_summary_text += [f'=={"="*width_total}==']

        if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[0] <= 2: # Low VRAM warning
            model_summary_text += ["/!\\"]
            model_summary_text += ["/!\\ WARNING:"]
            model_summary_text += ["/!\\ You are using a GPU with 2GB or less VRAM. This may significantly reduce the quality of your result!"]
            model_summary_text += ["/!\\ If training does not start, close all programs and try again."]
            model_summary_text += ["/!\\ Also you can disable Windows Aero Desktop to increase available VRAM."]
            model_summary_text += ["/!\\"]

        model_summary_text = "\n".join (model_summary_text)
        self.model_summary_text = model_summary_text
        io.log_info(model_summary_text)
Example #23
0
def main(input_dir,
         output_dir,
         debug,
         detector='mt',
         multi_gpu=True,
         manual_fix=False,
         image_size=256,
         face_type='full_face'):
    print("Running extractor.\r\n")

    input_path = Path(input_dir)
    output_path = Path(output_dir)

    if not input_path.exists():
        print('Input directory not found. Please ensure it exists.')
        return

    if output_path.exists():
        for filename in Path_utils.get_image_paths(output_path):
            Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    if debug:
        debug_output_path = Path(str(output_path) + '_debug')
        if debug_output_path.exists():
            for filename in Path_utils.get_image_paths(debug_output_path):
                Path(filename).unlink()
        else:
            debug_output_path.mkdir(parents=True, exist_ok=True)

    input_path_image_paths = Path_utils.get_image_paths(input_path)
    images_found = len(input_path_image_paths)
    if images_found != 0:

        if detector == 'manual':
            print('Performing manual extract...')
            extracted_faces = [(filename, [])
                               for filename in input_path_image_paths]
            extracted_faces = manual_pass(extracted_faces, image_size,
                                          face_type)
        else:
            print('Performing 1st pass...')
            extracted_rects = extract_pass([(x, )
                                            for x in input_path_image_paths],
                                           'rects',
                                           image_size,
                                           face_type,
                                           debug,
                                           multi_gpu,
                                           detector=detector)

            print('Performing 2nd pass...')
            extracted_faces = extract_pass(extracted_rects, 'landmarks',
                                           image_size, face_type, debug,
                                           multi_gpu)

            if manual_fix:
                print('Performing manual fix...')

                if all(
                        np.array(
                            [len(data[1]) > 0
                             for data in extracted_faces]) == True):
                    print('All faces are detected, manual fix not needed.')
                else:
                    extracted_faces = manual_pass(extracted_faces, image_size,
                                                  face_type)

        if len(extracted_faces) > 0:
            print('Performing 3rd pass...')
            final_imgs_paths = extract_pass(extracted_faces,
                                            'final',
                                            image_size,
                                            face_type,
                                            debug,
                                            multi_gpu,
                                            image_size,
                                            output_path=output_path)
            faces_detected = len(final_imgs_paths)
    else:
        faces_detected = 0

    print('-------------------------')
    print('Images found:        %d' % (images_found))
    print('Faces detected:      %d' % (faces_detected))
    print('-------------------------')
Example #24
0
def main(input_dir, output_dir, aligned_dir, model_dir, model_name,
         **in_options):
    print("Running converter.\r\n")

    debug = in_options['debug']

    try:
        input_path = Path(input_dir)
        output_path = Path(output_dir)
        aligned_path = Path(aligned_dir)
        model_path = Path(model_dir)

        if not input_path.exists():
            print('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)

        if not aligned_path.exists():
            print('Aligned directory not found. Please ensure it exists.')
            return

        if not model_path.exists():
            print('Model directory not found. Please ensure it exists.')
            return

        model_sq = multiprocessing.Queue()
        model_cq = multiprocessing.Queue()
        model_lock = multiprocessing.Lock()
        model_p = multiprocessing.Process(target=model_process,
                                          args=(model_name, model_dir,
                                                in_options, model_sq,
                                                model_cq))
        model_p.start()

        while True:
            if not model_cq.empty():
                obj = model_cq.get()
                obj_op = obj['op']
                if obj_op == 'init':
                    converter = obj['converter']
                    break

        alignments = {}
        if converter.get_mode() == ConverterBase.MODE_FACE:
            aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
            for filename in tqdm(aligned_path_image_paths,
                                 desc="Collecting alignments"):
                a_png = AlignedPNG.load(str(filename))
                if a_png is None:
                    print("%s - no embedded data found." % (filename))
                    continue
                d = a_png.getFaceswapDictData()
                if d is None or d['source_filename'] is None or d[
                        'source_rect'] is None or d['source_landmarks'] is None:
                    print("%s - no embedded data found." % (filename))
                    continue

                source_filename_stem = Path(d['source_filename']).stem
                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments[source_filename_stem].append(
                    np.array(d['source_landmarks']))

        files_processed, faces_processed = ConvertSubprocessor(
            converter=converter.copy_and_set_predictor(
                model_process_predictor(model_sq, model_cq, model_lock)),
            input_path_image_paths=Path_utils.get_image_paths(input_path),
            output_path=output_path,
            alignments=alignments,
            **in_options).process()

        model_sq.put({'op': 'close'})
        model_p.join()
        '''            
        if model_name == 'AVATAR':
            output_path_image_paths = Path_utils.get_image_paths(output_path)
            
            last_ok_frame = -1
            for filename in output_path_image_paths:
                filename_path = Path(filename)
                stem = Path(filename).stem
                try:
                    frame = int(stem)
                except:
                    raise Exception ('Aligned avatars must be created from indexed sequence files.')
                    
                if frame-last_ok_frame > 1:
                    start = last_ok_frame + 1
                    end = frame - 1
                    
                    print ("Filling gaps: [%d...%d]" % (start, end) )
                    for i in range (start, end+1):                    
                        shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix ))  ) )
                    
                last_ok_frame = frame
        '''

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Example #25
0
    def __init__(self,
                 model_path,
                 training_data_src_path=None,
                 training_data_dst_path=None,
                 debug=False,
                 force_gpu_idx=-1,
                 **in_options):

        if force_gpu_idx == -1:
            idxs_names_list = nnlib.device.getAllDevicesIdxsWithNamesList()
            if len(idxs_names_list) > 1:
                print("You have multi GPUs in a system: ")
                for idx, name in idxs_names_list:
                    print("[%d] : %s" % (idx, name))

                force_gpu_idx = input_int(
                    "Which GPU idx to choose? ( skip: best GPU ) : ", -1,
                    [x[0] for x in idxs_names_list])
        self.force_gpu_idx = force_gpu_idx

        print("Loading model...")
        self.model_path = model_path
        self.model_data_path = Path(
            self.get_strpath_storage_for_file('data.dat'))

        self.training_data_src_path = training_data_src_path
        self.training_data_dst_path = training_data_dst_path

        self.src_images_paths = None
        self.dst_images_paths = None
        self.src_yaw_images_paths = None
        self.dst_yaw_images_paths = None
        self.src_data_generator = None
        self.dst_data_generator = None
        self.debug = debug
        self.is_training_mode = (training_data_src_path is not None
                                 and training_data_dst_path is not None)

        self.supress_std_once = os.environ.get('TF_SUPPRESS_STD', '0') == '1'

        self.epoch = 0
        self.options = {}
        self.loss_history = []
        self.sample_for_preview = None
        if self.model_data_path.exists():
            model_data = pickle.loads(self.model_data_path.read_bytes())
            self.epoch = model_data['epoch']
            if self.epoch != 0:
                self.options = model_data['options']
                self.loss_history = model_data[
                    'loss_history'] if 'loss_history' in model_data.keys(
                    ) else []
                self.sample_for_preview = model_data[
                    'sample_for_preview'] if 'sample_for_preview' in model_data.keys(
                    ) else None

        ask_override = self.is_training_mode and self.epoch != 0 and input_in_time(
            "Press enter in 2 seconds to override some model settings.", 2)

        if self.epoch == 0:
            print(
                "\nModel first run. Enter model options as default for each run."
            )

        if self.epoch == 0 or ask_override:
            default_write_preview_history = False if self.epoch == 0 else self.options.get(
                'write_preview_history', False)
            self.options['write_preview_history'] = input_bool(
                "Write preview history? (y/n ?:help skip:n/default) : ",
                default_write_preview_history,
                help_message=
                "Preview history will be writed to <ModelName>_history folder."
            )
        else:
            self.options['write_preview_history'] = self.options.get(
                'write_preview_history', False)

        if self.epoch == 0 or ask_override:
            self.options['target_epoch'] = max(
                0, input_int("Target epoch (skip:unlimited/default) : ", 0))
        else:
            self.options['target_epoch'] = self.options.get('target_epoch', 0)

        if self.epoch == 0 or ask_override:
            default_batch_size = 0 if self.epoch == 0 else self.options.get(
                'batch_size', 0)
            self.options['batch_size'] = max(
                0,
                input_int(
                    "Batch_size (?:help skip:0/default) : ",
                    default_batch_size,
                    help_message=
                    "Larger batch size is always better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."
                ))
        else:
            self.options['batch_size'] = self.options.get('batch_size', 0)

        if self.epoch == 0:
            self.options['sort_by_yaw'] = input_bool(
                "Feed faces to network sorted by yaw? (y/n ?:help skip:n) : ",
                False,
                help_message=
                "NN will not learn src face directions that don't match dst face directions."
            )
        else:
            self.options['sort_by_yaw'] = self.options.get(
                'sort_by_yaw', False)

        if self.epoch == 0:
            self.options['random_flip'] = input_bool(
                "Flip faces randomly? (y/n ?:help skip:y) : ",
                True,
                help_message=
                "Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset."
            )
        else:
            self.options['random_flip'] = self.options.get('random_flip', True)

        if self.epoch == 0:
            self.options['src_scale_mod'] = np.clip(
                input_int(
                    "Src face scale modifier % ( -30...30, ?:help skip:0) : ",
                    0,
                    help_message=
                    "If src face shape is wider than dst, try to decrease this value to get a better result."
                ), -30, 30)
        else:
            self.options['src_scale_mod'] = self.options.get(
                'src_scale_mod', 0)

        self.drive = GoogleDriveSync(
            model_dir=self.model_path,
            key=input_str("Google Drive OAuth key (default: None) : ", None))

        self.write_preview_history = self.options['write_preview_history']
        if not self.options['write_preview_history']:
            self.options.pop('write_preview_history')

        self.target_epoch = self.options['target_epoch']
        if self.options['target_epoch'] == 0:
            self.options.pop('target_epoch')

        self.batch_size = self.options['batch_size']

        self.sort_by_yaw = self.options['sort_by_yaw']
        if not self.sort_by_yaw:
            self.options.pop('sort_by_yaw')

        self.random_flip = self.options['random_flip']
        if self.random_flip:
            self.options.pop('random_flip')

        self.src_scale_mod = self.options['src_scale_mod']
        if self.src_scale_mod == 0:
            self.options.pop('src_scale_mod')

        self.onInitializeOptions(self.epoch == 0, ask_override)

        nnlib.import_all(
            nnlib.DeviceConfig(allow_growth=False,
                               force_gpu_idx=self.force_gpu_idx,
                               **in_options))
        self.device_config = nnlib.active_DeviceConfig

        self.onInitialize(**in_options)

        self.options['batch_size'] = self.batch_size

        if self.debug or self.batch_size == 0:
            self.batch_size = 1

        if self.is_training_mode:
            if self.write_preview_history:
                if self.force_gpu_idx == -1:
                    self.preview_history_path = self.model_path / (
                        '%s_history' % (self.get_model_name()))
                else:
                    self.preview_history_path = self.model_path / (
                        '%d_%s_history' %
                        (self.force_gpu_idx, self.get_model_name()))

                if not self.preview_history_path.exists():
                    self.preview_history_path.mkdir(exist_ok=True)
                else:
                    if self.epoch == 0:
                        for filename in Path_utils.get_image_paths(
                                self.preview_history_path):
                            Path(filename).unlink()

            if self.generator_list is None:
                raise Exception('You didnt set_training_data_generators()')
            else:
                for i, generator in enumerate(self.generator_list):
                    if not isinstance(generator, SampleGeneratorBase):
                        raise Exception(
                            'training data generator is not subclass of SampleGeneratorBase'
                        )

            if (self.sample_for_preview is None) or (self.epoch == 0):
                self.sample_for_preview = self.generate_next_sample()

        print("===== Model summary =====")
        print("== Model name: " + self.get_model_name())
        print("==")
        print("== Current epoch: " + str(self.epoch))
        print("==")
        print("== Model options:")
        for key in self.options.keys():
            print("== |== %s : %s" % (key, self.options[key]))

        if self.device_config.multi_gpu:
            print("== |== multi_gpu : True ")

        print("== Running on:")
        if self.device_config.cpu_only:
            print("== |== [CPU]")
        else:
            for idx in self.device_config.gpu_idxs:
                print("== |== [%d : %s]" %
                      (idx, nnlib.device.getDeviceName(idx)))

        if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[
                0] == 2:
            print("==")
            print(
                "== WARNING: You are using 2GB GPU. Result quality may be significantly decreased."
            )
            print(
                "== If training does not start, close all programs and try again."
            )
            print(
                "== Also you can disable Windows Aero Desktop to get extra free VRAM."
            )
            print("==")

        print("=========================")
Example #26
0
                'no_preview'             : arguments.no_preview,
                'debug'                  : arguments.debug,
                'execute_programs'       : [ [int(x[0]), x[1] ] for x in arguments.execute_program ]
                }
        device_args = {'cpu_only'  : arguments.cpu_only,
                       'force_gpu_idx' : arguments.force_gpu_idx,
                       }
        from mainscripts import Trainer
        Trainer.main(args, device_args)

    p = subparsers.add_parser( "train", help="Trainer")
    p.add_argument('--training-data-src-dir', required=True, action=fixPathAction, dest="training_data_src_dir", help="Dir of extracted SRC faceset.")
    p.add_argument('--training-data-dst-dir', required=True, action=fixPathAction, dest="training_data_dst_dir", help="Dir of extracted DST faceset.")
    p.add_argument('--pretraining-data-dir', action=fixPathAction, dest="pretraining_data_dir", default=None, help="Optional dir of extracted faceset that will be used in pretraining mode.")
    p.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.")
    p.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model")
    p.add_argument('--no-preview', action="store_true", dest="no_preview", default=False, help="Disable preview window.")
    p.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug samples.")
    p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.")
    p.add_argument('--force-gpu-idx', type=int, dest="force_gpu_idx", default=-1, help="Force to choose this GPU idx.")
    p.add_argument('--execute-program', dest="execute_program", default=[], action='append', nargs='+')
    p.set_defaults (func=process_train)

    def process_convert(arguments):
        os_utils.set_process_lowest_prio()
        args = {'input_dir'   : arguments.input_dir,
                'output_dir'  : arguments.output_dir,
                'aligned_dir' : arguments.aligned_dir,
                'avaperator_aligned_dir' : arguments.avaperator_aligned_dir,
                'model_dir'   : arguments.model_dir,
                'model_name'  : arguments.model_name,
Example #27
0
     '--pretraining-data-dir',
     action=fixPathAction,
     dest="pretraining_data_dir",
     default=None,
     help=
     "Optional dir of extracted faceset that will be used in pretraining mode."
 )
 p.add_argument('--model-dir',
                required=True,
                action=fixPathAction,
                dest="model_dir",
                help="Model dir.")
 p.add_argument('--model',
                required=True,
                dest="model_name",
                choices=Path_utils.get_all_dir_names_startswith(
                    Path(__file__).parent / 'models', 'Model_'),
                help="Type of model")
 p.add_argument('--no-preview',
                action="store_true",
                dest="no_preview",
                default=False,
                help="Disable preview window.")
 p.add_argument('--debug',
                action="store_true",
                dest="debug",
                default=False,
                help="Debug samples.")
 p.add_argument('--cpu-only',
                action="store_true",
                dest="cpu_only",
                default=False,
Example #28
0
def video_from_sequence(
    input_dir,
    output_file,
    reference_file=None,
    ext=None,
    fps=None,
    bitrate=None,
    lossless=None,
):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(
        reference_file) if reference_file is not None else None

    if not input_path.exists():
        logger.error("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = "png"

    if lossless is None:
        lossless = False

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == ".*":
            reference_file_path = Path_utils.get_first_file_by_stem(
                reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            logger.error("reference_file not found.")
            return

        # probing reference file
        probe = ffmpeg.probe(str(reference_file_path))

        # getting first video and audio streams id with fps
        for stream in probe["streams"]:
            if video_id is None and stream["codec_type"] == "video":
                video_id = stream["index"]
                fps = stream["r_frame_rate"]

            if audio_id is None and stream["codec_type"] == "audio":
                audio_id = stream["index"]

        if audio_id is not None:
            # has audio track
            ref_in_a = ffmpeg.input(str(reference_file_path))[str(audio_id)]

    if fps is None:
        # if fps not specified and not overwritten by reference-file
        fps = 25

    if not lossless and bitrate is None:
        bitrate = 16

    input_image_paths = Path_utils.get_image_paths(input_path)

    i_in = ffmpeg.input("pipe:", format="image2pipe", r=fps)

    output_args = [i_in]

    if ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str(output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update({"c:v": "png"})
    else:
        output_kwargs.update({
            "c:v": "libx264",
            "b:v": "%dM" % (bitrate),
            "pix_fmt": "yuv420p"
        })

    output_kwargs.update({"c:a": "aac", "b:a": "192k", "ar": "48000"})

    job = ffmpeg.output(*output_args, **output_kwargs).overwrite_output()

    try:
        job_run = job.run_async(pipe_stdin=True)

        for image_path in input_image_paths:
            with open(image_path, "rb") as f:
                image_bytes = f.read()
                job_run.stdin.write(image_bytes)

        job_run.stdin.close()
        job_run.wait()
    except:
        logger.error("ffmpeg fail, job commandline:" + str(job.compile()))
Example #29
0
def sort_by_hist(input_path):
    print("Sorting by histogram similarity...")
    img_list = HistSsimSubprocessor(
        Path_utils.get_image_paths(input_path)).process()
    return img_list
Example #30
0
    def save(self):
        self.options['batch_size'] = self.batch_size
        self.options['paddle'] = self.ping_pong_options.paddle
        summary_path = self.get_strpath_storage_for_file('summary.txt')
        Path(summary_path).write_text(self.model_summary_text)
        self.onSave()

        model_data = {
            'iter': self.iter,
            'options': self.options,
            'loss_history': self.loss_history,
            'sample_for_preview': self.sample_for_preview
        }
        self.model_data_path.write_bytes(pickle.dumps(model_data))

        bckp_filename_list = [
            self.get_strpath_storage_for_file(filename)
            for _, filename in self.get_model_filename_list()
        ]
        bckp_filename_list += [str(summary_path), str(self.model_data_path)]

        if self.autobackup:
            current_hour = time.localtime().tm_hour
            if self.autobackup_current_hour != current_hour:
                self.autobackup_current_hour = current_hour

                for i in range(15, 0, -1):
                    idx_str = '%.2d' % i
                    next_idx_str = '%.2d' % (i + 1)

                    idx_backup_path = self.autobackups_path / idx_str
                    next_idx_packup_path = self.autobackups_path / next_idx_str

                    if idx_backup_path.exists():
                        if i == 15:
                            Path_utils.delete_all_files(idx_backup_path)
                        else:
                            next_idx_packup_path.mkdir(exist_ok=True)
                            Path_utils.move_all_files(idx_backup_path,
                                                      next_idx_packup_path)

                    if i == 1:
                        idx_backup_path.mkdir(exist_ok=True)
                        for filename in bckp_filename_list:
                            shutil.copy(
                                str(filename),
                                str(idx_backup_path / Path(filename).name))

                        previews = self.get_previews()
                        plist = []
                        for i in range(len(previews)):
                            name, bgr = previews[i]
                            plist += [(bgr, idx_backup_path /
                                       (('preview_%s.jpg') % (name)))]

                        for preview, filepath in plist:
                            preview_lh = ModelBase.get_loss_history_preview(
                                self.loss_history, self.iter, self.batch_size,
                                preview.shape[1], preview.shape[2])
                            img = (
                                np.concatenate([preview_lh, preview], axis=0) *
                                255).astype(np.uint8)
                            cv2_imwrite(filepath, img)