Beispiel #1
0
def match_by_pitch(data_src_path, data_dst_path):
    r = 0.05
    mn = 1
    mx = 3
    import cv
    import shutil
    # 准备各种路径
    src_aligned_store = os.path.join(data_src_path, "aligned_store")
    if not os.path.exists(src_aligned_store):
        raise Exception("No Src Aligned Store")
    src_aligned = os.path.join(data_dst_path, "src")
    if os.path.exists(src_aligned):
        shutil.rmtree(src_aligned)
    os.mkdir(src_aligned)
    dst_aligned = os.path.join(data_dst_path, "aligned")
    dst_aligned_trash = os.path.join(data_dst_path, "aligned_trash")
    if not os.path.exists(dst_aligned_trash):
        os.mkdir(dst_aligned_trash)
    # 读取角度信息
    src_img_list = get_pitch_yaw_roll(src_aligned_store)
    dst_img_list = get_pitch_yaw_roll(dst_aligned)
    src_pitch = list([i[1] for i in src_img_list])
    src_yaw = list([i[2] for i in src_img_list])
    dst_pitch = list([i[1] for i in dst_img_list])
    dst_yaw = list([i[2] for i in dst_img_list])
    src_ps = np.array(list(zip(src_pitch, src_yaw)), "float")
    dst_ps = np.array(list(zip(dst_pitch, dst_yaw)), "float")

    # 计算最近的n个点
    src_match = set()
    dst_match = set()
    for p, i in io.progress_bar_generator(zip(dst_ps, range(len(dst_ps))),
                                          "Calculating"):
        ds = np.linalg.norm(src_ps - p, axis=1, keepdims=True)
        idxs = np.argsort(ds, axis=0)
        min_idx = idxs[mn - 1][0]
        # 极端情况所有距离都不满足半径范围
        if ds[min_idx] > r:
            continue
        # 至少有一个满足半径条件了,dst_point可以留下
        dst_match.add(i)
        # 所有满足条件的加入到src_match
        for idx in idxs[:mx]:
            idx = idx[0]
            if ds[idx] > r:
                break
            src_match.add(idx)
    io.log_info("%s, %s, %s, %s" %
                ("Src Match", len(src_match), "Src All", len(src_img_list)))
    io.log_info("%s, %s, %s, %s" %
                ("Dst Match", len(dst_match), "Dst All", len(dst_img_list)))

    # 画图
    width = 800
    xycr = []
    for idx in range(len(src_img_list)):
        t = src_img_list[idx]
        if idx in src_match:
            xycr.append([t[1], t[2], (128, 128, 128),
                         int(r * width / 2)])  # 蓝色,匹配到的
            shutil.copy(t[0], src_aligned)
        else:
            xycr.append([t[1], t[2], (128, 128, 128), 2])  # 灰色,没匹配到
    for idx in range(len(dst_img_list)):
        t = dst_img_list[idx]
        if idx in dst_match:
            xycr.append([t[1], t[2], (0, 255, 0), 2])  # 绿色,保留
        else:
            xycr.append([t[1], t[2], (0, 0, 255), 2])  # 红色,删除
            shutil.move(t[0], dst_aligned_trash)
    img = cv.cv_new((width, width))
    xs = [i[0] for i in xycr]
    ys = [i[1] for i in xycr]
    cs = [i[2] for i in xycr]
    rs = [i[3] for i in xycr]
    cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs)
    cv.cv_save(img, os.path.join(dst_aligned, "_match_by_pitch.bmp"))

    # 加入base
    base_dir = os.path.join(data_src_path, "aligned_base")
    if os.path.exists(base_dir):
        for img in os.listdir(base_dir):
            if img.endswith(".jpg") or img.endswith(".png"):
                img_path = os.path.join(base_dir, img)
                shutil.copy(img_path, src_aligned)
Beispiel #2
0
def sort_best(input_path, faster=False):
    target_count = io.input_int ("Target number of faces?", 2000)

    io.log_info ("Performing sort by best faces.")
    if faster:
        io.log_info("Using faster algorithm. Faces will be sorted by source-rect-area instead of blur.")

    img_list, trash_img_list = FinalLoaderSubprocessor( pathex.get_image_paths(input_path), faster ).run()
    final_img_list = []

    grads = 128
    imgs_per_grad = round (target_count / grads)

    #instead of math.pi / 2, using -1.2,+1.2 because actually maximum yaw for 2DFAN landmarks are -1.2+1.2
    grads_space = np.linspace (-1.2, 1.2,grads)

    yaws_sample_list = [None]*grads
    for g in io.progress_bar_generator ( range(grads), "Sort by yaw"):
        yaw = grads_space[g]
        next_yaw = grads_space[g+1] if g < grads-1 else yaw

        yaw_samples = []
        for img in img_list:
            s_yaw = -img[3]
            if (g == 0          and s_yaw < next_yaw) or \
               (g < grads-1     and s_yaw >= yaw and s_yaw < next_yaw) or \
               (g == grads-1    and s_yaw >= yaw):
                yaw_samples += [ img ]
        if len(yaw_samples) > 0:
            yaws_sample_list[g] = yaw_samples

    total_lack = 0
    for g in io.progress_bar_generator ( range(grads), ""):
        img_list = yaws_sample_list[g]
        img_list_len = len(img_list) if img_list is not None else 0

        lack = imgs_per_grad - img_list_len
        total_lack += max(lack, 0)

    imgs_per_grad += total_lack // grads


    sharpned_imgs_per_grad = imgs_per_grad*10
    for g in io.progress_bar_generator ( range (grads), "Sort by blur"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue

        img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

        if len(img_list) > sharpned_imgs_per_grad:
            trash_img_list += img_list[sharpned_imgs_per_grad:]
            img_list = img_list[0:sharpned_imgs_per_grad]

        yaws_sample_list[g] = img_list


    yaw_pitch_sample_list = [None]*grads
    pitch_grads = imgs_per_grad

    for g in io.progress_bar_generator ( range (grads), "Sort by pitch"):
        img_list = yaws_sample_list[g]
        if img_list is None:
            continue

        pitch_sample_list = [None]*pitch_grads

        grads_space = np.linspace (-math.pi / 2,math.pi / 2, pitch_grads )

        for pg in range (pitch_grads):

            pitch = grads_space[pg]
            next_pitch = grads_space[pg+1] if pg < pitch_grads-1 else pitch

            pitch_samples = []
            for img in img_list:
                s_pitch = img[4]
                if (pg == 0                and s_pitch < next_pitch) or \
                   (pg < pitch_grads-1     and s_pitch >= pitch and s_pitch < next_pitch) or \
                   (pg == pitch_grads-1    and s_pitch >= pitch):
                    pitch_samples += [ img ]

            if len(pitch_samples) > 0:
                pitch_sample_list[pg] = pitch_samples
        yaw_pitch_sample_list[g] = pitch_sample_list

    yaw_pitch_sample_list = FinalHistDissimSubprocessor(yaw_pitch_sample_list).run()

    for g in io.progress_bar_generator (range (grads), "Fetching the best"):
        pitch_sample_list = yaw_pitch_sample_list[g]
        if pitch_sample_list is None:
            continue

        n = imgs_per_grad

        while n > 0:
            n_prev = n
            for pg in range(pitch_grads):
                img_list = pitch_sample_list[pg]
                if img_list is None:
                    continue
                final_img_list += [ img_list.pop(0) ]
                if len(img_list) == 0:
                    pitch_sample_list[pg] = None
                n -= 1
                if n == 0:
                    break
            if n_prev == n:
                break

        for pg in range(pitch_grads):
            img_list = pitch_sample_list[pg]
            if img_list is None:
                continue
            trash_img_list += img_list

    return final_img_list, trash_img_list
Beispiel #3
0
    def __init__(self, is_training=False,
                       saved_models_path=None,
                       training_data_src_path=None,
                       training_data_dst_path=None,
                       pretraining_data_path=None,
                       pretrained_model_path=None,
                       no_preview=False,
                       force_model_name=None,
                       force_gpu_idxs=None,
                       cpu_only=False,
                       debug=False,
                       force_model_class_name=None,
                       silent_start=False,
                       **kwargs):
        self.is_training = is_training
        self.saved_models_path = saved_models_path
        self.training_data_src_path = training_data_src_path
        self.training_data_dst_path = training_data_dst_path
        self.pretraining_data_path = pretraining_data_path
        self.pretrained_model_path = pretrained_model_path
        self.no_preview = no_preview
        self.debug = debug

        self.model_class_name = model_class_name = Path(inspect.getmodule(self).__file__).parent.name.rsplit("_", 1)[1]

        if force_model_class_name is None:
            if force_model_name is not None:
                self.model_name = force_model_name
            else:
                while True:
                    # gather all model dat files
                    saved_models_names = []
                    for filepath in pathex.get_file_paths(saved_models_path):
                        filepath_name = filepath.name
                        if filepath_name.endswith(f'{model_class_name}_data.dat'):
                            saved_models_names += [ (filepath_name.split('_')[0], os.path.getmtime(filepath)) ]

                    # sort by modified datetime
                    saved_models_names = sorted(saved_models_names, key=operator.itemgetter(1), reverse=True )
                    saved_models_names = [ x[0] for x in saved_models_names ]


                    if len(saved_models_names) != 0:
                        if silent_start:
                            self.model_name = saved_models_names[0]
                            io.log_info(f'Silent start: choosed model "{self.model_name}"')
                        else:
                            io.log_info ("Choose one of saved models, or enter a name to create a new model.")
                            io.log_info ("[r] : rename")
                            io.log_info ("[d] : delete")
                            io.log_info ("")
                            for i, model_name in enumerate(saved_models_names):
                                s = f"[{i}] : {model_name} "
                                if i == 0:
                                    s += "- latest"
                                io.log_info (s)

                            inp = io.input_str(f"", "0", show_default_value=False )
                            model_idx = -1
                            try:
                                model_idx = np.clip ( int(inp), 0, len(saved_models_names)-1 )
                            except:
                                pass

                            if model_idx == -1:
                                if len(inp) == 1:
                                    is_rename = inp[0] == 'r'
                                    is_delete = inp[0] == 'd'

                                    if is_rename or is_delete:
                                        if len(saved_models_names) != 0:

                                            if is_rename:
                                                name = io.input_str(f"Enter the name of the model you want to rename")
                                            elif is_delete:
                                                name = io.input_str(f"Enter the name of the model you want to delete")

                                            if name in saved_models_names:

                                                if is_rename:
                                                    new_model_name = io.input_str(f"Enter new name of the model")

                                                for filepath in pathex.get_paths(saved_models_path):
                                                    filepath_name = filepath.name

                                                    model_filename, remain_filename = filepath_name.split('_', 1)
                                                    if model_filename == name:

                                                        if is_rename:
                                                            new_filepath = filepath.parent / ( new_model_name + '_' + remain_filename )
                                                            filepath.rename (new_filepath)
                                                        elif is_delete:
                                                            filepath.unlink()
                                        continue

                                self.model_name = inp
                            else:
                                self.model_name = saved_models_names[model_idx]

                    else:
                        self.model_name = io.input_str(f"No saved models found. Enter a name of a new model", "new")
                        self.model_name = self.model_name.replace('_', ' ')
                    break


            self.model_name = self.model_name + '_' + self.model_class_name
        else:
            self.model_name = force_model_class_name

        self.iter = 0
        self.options = {}
        self.options_show_override = {}
        self.loss_history = []
        self.sample_for_preview = None
        self.choosed_gpu_indexes = None

        model_data = {}
        self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
        if self.model_data_path.exists():
            io.log_info (f"Loading {self.model_name} model...")
            model_data = pickle.loads ( self.model_data_path.read_bytes() )
            self.iter = model_data.get('iter',0)
            if self.iter != 0:
                self.options = model_data['options']
                self.loss_history = model_data.get('loss_history', [])
                self.sample_for_preview = model_data.get('sample_for_preview', None)
                self.choosed_gpu_indexes = model_data.get('choosed_gpu_indexes', None)

        if self.is_first_run():
            io.log_info ("\nModel first run.")

        if silent_start:
            self.device_config = nn.DeviceConfig.BestGPU()
            io.log_info (f"Silent start: choosed device {'CPU' if self.device_config.cpu_only else self.device_config.devices[0].name}")
        else:
            self.device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(suggest_best_multi_gpu=True)) \
                                if not cpu_only else nn.DeviceConfig.CPU()

        nn.initialize(self.device_config)

        ####
        self.default_options_path = saved_models_path / f'{self.model_class_name}_default_options.dat'
        self.default_options = {}
        if self.default_options_path.exists():
            try:
                self.default_options = pickle.loads ( self.default_options_path.read_bytes() )
            except:
                pass

        self.choose_preview_history = False
        self.batch_size = self.load_or_def_option('batch_size', 1)
        #####

        io.input_skip_pending()
        self.on_initialize_options()

        if self.is_first_run():
            # save as default options only for first run model initialize
            self.default_options_path.write_bytes( pickle.dumps (self.options) )

        self.autobackup_hour = self.options.get('autobackup_hour', 0)
        self.write_preview_history = self.options.get('write_preview_history', False)
        self.target_iter = self.options.get('target_iter',0)
        self.random_flip = self.options.get('random_flip',True)

        self.on_initialize()
        self.options['batch_size'] = self.batch_size

        self.preview_history_writer = None
        if self.is_training:
            self.preview_history_path = self.saved_models_path / ( f'{self.get_model_name()}_history' )
            self.autobackups_path     = self.saved_models_path / ( f'{self.get_model_name()}_autobackups' )

            if self.write_preview_history or io.is_colab():
                if not self.preview_history_path.exists():
                    self.preview_history_path.mkdir(exist_ok=True)
                else:
                    if self.iter == 0:
                        for filename in pathex.get_image_paths(self.preview_history_path):
                            Path(filename).unlink()

            if self.generator_list is None:
                raise ValueError( 'You didnt set_training_data_generators()')
            else:
                for i, generator in enumerate(self.generator_list):
                    if not isinstance(generator, SampleGeneratorBase):
                        raise ValueError('training data generator is not subclass of SampleGeneratorBase')

            self.update_sample_for_preview(choose_preview_history=self.choose_preview_history)

            if self.autobackup_hour != 0:
                self.autobackup_start_time = time.time()

                if not self.autobackups_path.exists():
                    self.autobackups_path.mkdir(exist_ok=True)

        io.log_info( self.get_summary_text() )
Beispiel #4
0
 def process_info_generator(self):
     cpu_count = len(self.img_chunks_list)
     io.log_info(f'Running on {cpu_count} threads')
     for i in range(cpu_count):
         yield 'CPU%d' % (i), {'i':i}, {}
Beispiel #5
0
def sort_by_hue(input_path):
    io.log_info ("Sorting by hue...")
    img_list = [ [x, np.mean ( cv2.cvtColor(cv2_imread(x), cv2.COLOR_BGR2HSV)[...,0].flatten()  )] for x in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading") ]
    io.log_info ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
    return img_list, []
Beispiel #6
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    image_size=256,
    face_type='full_face',
    max_faces_from_image=0,
    cpu_only=False,
    force_gpu_idxs=None,
):
    face_type = FaceType.fromString(face_type)

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if detector is None:
        io.log_info("Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    output_debug_path = output_path.parent / (output_path.name + '_debug')

    if output_debug is None:
        output_debug = io.input_bool(
            f"Write debug images to {output_debug_path.name}?", False)

    if output_path.exists():
        if not manual_output_debug_fix and input_path != output_path:
            output_images_paths = pathex.get_image_paths(output_path)
            if len(output_images_paths) > 0:
                io.input(
                    f"WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue."
                )
                for filename in output_images_paths:
                    Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    input_path_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_path_image_paths = DeletedFilesSearcherSubprocessor(
                input_path_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_path_image_paths = sorted(input_path_image_paths)
            io.log_info('Found %d images.' % (len(input_path_image_paths)))
    else:
        if output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()
        else:
            output_debug_path.mkdir(parents=True, exist_ok=True)

    images_found = len(input_path_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'landmarks-manual',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('Extracting faces...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_path_image_paths
                ],
                'all',
                image_size,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #7
0
    def summary(self):
        layers = self.get_layers()
        layers_names = []
        layers_params = []

        max_len_str = 0
        max_len_param_str = 0
        delim_str = "-"

        total_params = 0

        #Get layers names and str lenght for delim
        for l in layers:
            if len(str(l))>max_len_str:
                max_len_str = len(str(l))
            layers_names+=[str(l).capitalize()]

        #Get params for each layer
        layers_params = [ int(np.sum(np.prod(w.shape) for w in l.get_weights())) for l in layers ]
        total_params = np.sum(layers_params)

        #Get str lenght for delim
        for p in layers_params:
            if len(str(p))>max_len_param_str:
                max_len_param_str=len(str(p))

        #Set delim
        for i in range(max_len_str+max_len_param_str+3):
            delim_str += "-"

        output = "\n"+delim_str+"\n"

        #Format model name str
        model_name_str = "| "+self.name.capitalize()
        len_model_name_str = len(model_name_str)
        for i in range(len(delim_str)-len_model_name_str):
            model_name_str+= " " if i!=(len(delim_str)-len_model_name_str-2) else " |"

        output += model_name_str +"\n"
        output += delim_str +"\n"


        #Format layers table
        for i in range(len(layers_names)):
            output += delim_str +"\n"

            l_name = layers_names[i]
            l_param = str(layers_params[i])
            l_param_str = ""
            if len(l_name)<=max_len_str:
                for i in range(max_len_str - len(l_name)):
                    l_name+= " "

            if len(l_param)<=max_len_param_str:
                for i in range(max_len_param_str - len(l_param)):
                    l_param_str+= " "

            l_param_str += l_param


            output +="| "+l_name+"|"+l_param_str+"| \n"

        output += delim_str +"\n"

        #Format sum of params
        total_params_str = "| Total params count: "+str(total_params)
        len_total_params_str = len(total_params_str)
        for i in range(len(delim_str)-len_total_params_str):
            total_params_str+= " " if i!=(len(delim_str)-len_total_params_str-2) else " |"

        output += total_params_str +"\n"
        output += delim_str +"\n"

        io.log_info(output)
Beispiel #8
0
    def tick(self):
        for cli in self.clis[:]:
            while not cli.c2s.empty():
                obj = cli.c2s.get()
                op = obj.get('op', '')
                if op == 'success':
                    #success processed data, return data and result to on_result
                    self.on_result(cli.host_dict, obj['data'], obj['result'])
                    self.sent_data = None
                    cli.state = 0
                elif op == 'error':
                    #some error occured while process data, returning chunk to on_data_return
                    if 'data' in obj.keys():
                        self.on_data_return(cli.host_dict, obj['data'])
                    #and killing process
                    cli.kill()
                    self.clis.remove(cli)
                elif op == 'log_info':
                    io.log_info(obj['msg'])
                elif op == 'log_err':
                    io.log_err(obj['msg'])
                elif op == 'progress_bar_inc':
                    io.progress_bar_inc(obj['c'])

        for cli in self.clis[:]:
            if cli.state == 1:
                if cli.sent_time != 0 and self.no_response_time_sec != 0 and (
                        time.time() -
                        cli.sent_time) > self.no_response_time_sec:
                    #subprocess busy too long
                    io.log_info('%s doesnt response, terminating it.' %
                                (cli.name))
                    self.on_data_return(cli.host_dict, cli.sent_data)
                    cli.kill()
                    self.clis.remove(cli)

        for cli in self.clis[:]:
            if cli.state == 0:
                #free state of subprocess, get some data from get_data
                data = self.get_data(cli.host_dict)
                if data is not None:
                    #and send it to subprocess
                    cli.s2c.put({'op': 'data', 'data': data})
                    cli.sent_time = time.time()
                    cli.sent_data = data
                    cli.state = 1

        if all([cli.state == 0 for cli in self.clis]):
            #gracefully terminating subprocesses
            for cli in self.clis[:]:
                cli.s2c.put({'op': 'close'})
                cli.sent_time = time.time()

            while True:
                for cli in self.clis[:]:
                    terminate_it = False
                    while not cli.c2s.empty():
                        obj = cli.c2s.get()
                        obj_op = obj['op']
                        if obj_op == 'finalized':
                            terminate_it = True
                            break

                    if (time.time() - cli.sent_time) > 30:
                        terminate_it = True

                    if terminate_it:
                        cli.state = 2
                        cli.kill()

                if all([cli.state == 2 for cli in self.clis]):
                    break

            #finalizing host logic
            self.q_timer.stop()
            self.q_timer = None
            self.on_clients_finalized()
Beispiel #9
0
    def pack(samples_path):
        samples_dat_path = samples_path / packed_faceset_filename

        if samples_dat_path.exists():
            io.log_info(f"{samples_dat_path} : file already exists !")
            io.input("Press enter to continue and overwrite.")

        as_person_faceset = False
        dir_names = pathex.get_all_dir_names(samples_path)
        if len(dir_names) != 0:
            as_person_faceset = io.input_bool(
                f"{len(dir_names)} subdirectories found, process as person faceset?",
                True)

        if as_person_faceset:
            image_paths = []

            for dir_name in dir_names:
                image_paths += pathex.get_image_paths(samples_path / dir_name)
        else:
            image_paths = pathex.get_image_paths(samples_path)

        samples = samplelib.SampleLoader.load_face_samples(image_paths)
        samples_len = len(samples)

        samples_configs = []
        for sample in io.progress_bar_generator(samples, "Processing"):
            sample_filepath = Path(sample.filename)
            sample.filename = sample_filepath.name

            if as_person_faceset:
                sample.person_name = sample_filepath.parent.name
            samples_configs.append(sample.get_config())
        samples_bytes = pickle.dumps(samples_configs, 4)

        of = open(samples_dat_path, "wb")
        of.write(struct.pack("Q", PackedFaceset.VERSION))
        of.write(struct.pack("Q", len(samples_bytes)))
        of.write(samples_bytes)

        del samples_bytes  #just free mem
        del samples_configs

        sample_data_table_offset = of.tell()
        of.write(bytes(8 * (samples_len + 1)))  #sample data offset table

        data_start_offset = of.tell()
        offsets = []

        for sample in io.progress_bar_generator(samples, "Packing"):
            try:
                if sample.person_name is not None:
                    sample_path = samples_path / sample.person_name / sample.filename
                else:
                    sample_path = samples_path / sample.filename

                with open(sample_path, "rb") as f:
                    b = f.read()

                offsets.append(of.tell() - data_start_offset)
                of.write(b)
            except:
                raise Exception(f"error while processing sample {sample_path}")

        offsets.append(of.tell())

        of.seek(sample_data_table_offset, 0)
        for offset in offsets:
            of.write(struct.pack("Q", offset))
        of.seek(0, 2)
        of.close()

        if io.input_bool(f"Delete original files?", True):
            for filename in io.progress_bar_generator(image_paths,
                                                      "Deleting files"):
                Path(filename).unlink()

            if as_person_faceset:
                for dir_name in io.progress_bar_generator(
                        dir_names, "Deleting dirs"):
                    dir_path = samples_path / dir_name
                    try:
                        shutil.rmtree(dir_path)
                    except:
                        io.log_info(f"unable to remove: {dir_path} ")
Beispiel #10
0
def video_from_sequence(input_dir,
                        output_file,
                        reference_file=None,
                        ext=None,
                        fps=None,
                        bitrate=None,
                        include_audio=False,
                        lossless=None):
    input_path = Path(input_dir)
    output_file_path = Path(output_file)
    reference_file_path = Path(
        reference_file) if reference_file is not None else None

    if not input_path.exists():
        io.log_err("input_dir not found.")
        return

    if not output_file_path.parent.exists():
        output_file_path.parent.mkdir(parents=True, exist_ok=True)
        return

    out_ext = output_file_path.suffix

    if ext is None:
        ext = io.input_str("Input image format (extension)", "png")

    if lossless is None:
        lossless = io.input_bool("Use lossless codec", False)

    video_id = None
    audio_id = None
    ref_in_a = None
    if reference_file_path is not None:
        if reference_file_path.suffix == '.*':
            reference_file_path = pathex.get_first_file_by_stem(
                reference_file_path.parent, reference_file_path.stem)
        else:
            if not reference_file_path.exists():
                reference_file_path = None

        if reference_file_path is None:
            io.log_err("reference_file not found.")
            return

        #probing reference file
        probe = ffmpeg.probe(str(reference_file_path),
                             cmd=UIParamReflect.GlobalConfig.ffprobe_cmd_path)

        #getting first video and audio streams id with fps
        for stream in probe['streams']:
            if video_id is None and stream['codec_type'] == 'video':
                video_id = stream['index']
                fps = stream['r_frame_rate']

            if audio_id is None and stream['codec_type'] == 'audio':
                audio_id = stream['index']

        if audio_id is not None:
            #has audio track
            ref_in_a = ffmpeg.input(str(reference_file_path))[str(audio_id)]

    if fps is None:
        #if fps not specified and not overwritten by reference-file
        #ps = max (1, io.input_int ("Enter FPS", 25) )
        fps = UIParamReflect.UIParam2Config.default_fps
        InfoNotifier.InfoNotifier.g_progress_info.append(f"默认合成帧率:{fps}")

    if not lossless and bitrate is None:
        # bitrate = max (1, io.input_int ("Bitrate of output file in MB/s", 16) )
        bitrate = UIParamReflect.UIParam2Config.bit_rate
        io.log_info("Bitrate of output file in MB/s " + str(bitrate))

    input_image_paths = pathex.get_image_paths(input_path)

    i_in = ffmpeg.input('pipe:', format='image2pipe', r=fps)

    output_args = [i_in]

    if include_audio and ref_in_a is not None:
        output_args += [ref_in_a]

    output_args += [str(output_file_path)]

    output_kwargs = {}

    if lossless:
        output_kwargs.update({
            "c:v": "libx264",
            "crf": "0",
            "pix_fmt": "yuv420p",
        })
    else:
        output_kwargs.update({
            "c:v": "libx264",
            "b:v": "%dM" % (bitrate),
            "pix_fmt": "yuv420p",
        })

    if include_audio and ref_in_a is not None:
        output_kwargs.update({"c:a": "aac", "b:a": "192k", "ar": "48000"})

    job = (ffmpeg.output(*output_args, **output_kwargs).overwrite_output())

    try:
        job_run = job.run_async(
            pipe_stdin=True, cmd=UIParamReflect.GlobalConfig.ffmpeg_cmd_path)

        for image_path in input_image_paths:
            with open(image_path, "rb") as f:
                image_bytes = f.read()
                job_run.stdin.write(image_bytes)

        job_run.stdin.close()
        job_run.wait()
    except:
        io.log_err("ffmpeg fail, job commandline:" + str(job.compile()))
Beispiel #11
0
    def __init__(self,
                 name,
                 SubprocessorCli_class,
                 no_response_time_sec=0,
                 io_loop_sleep_time=0.005):
        if not issubclass(SubprocessorCli_class, QSubprocessor.Cli):
            raise ValueError(
                "SubprocessorCli_class must be subclass of QSubprocessor.Cli")

        self.name = name
        self.SubprocessorCli_class = SubprocessorCli_class
        self.no_response_time_sec = no_response_time_sec
        self.io_loop_sleep_time = io_loop_sleep_time

        self.clis = []

        #getting info about name of subprocesses, host and client dicts, and spawning them
        for name, host_dict, client_dict in self.process_info_generator():
            try:
                cli = self.SubprocessorCli_class(client_dict)
                cli.state = 1
                cli.sent_time = 0
                cli.sent_data = None
                cli.name = name
                cli.host_dict = host_dict

                self.clis.append(cli)
            except:
                raise Exception(
                    f"Unable to start subprocess {name}. Error: {traceback.format_exc()}"
                )

        if len(self.clis) == 0:
            raise Exception("Unable to start QSubprocessor '%s' " %
                            (self.name))

        #waiting subprocesses their success(or not) initialization
        while True:
            for cli in self.clis[:]:
                while not cli.c2s.empty():
                    obj = cli.c2s.get()
                    op = obj.get('op', '')
                    if op == 'init_ok':
                        cli.state = 0
                    elif op == 'log_info':
                        io.log_info(obj['msg'])
                    elif op == 'log_err':
                        io.log_err(obj['msg'])
                    elif op == 'error':
                        cli.kill()
                        self.clis.remove(cli)
                        break
            if all([cli.state == 0 for cli in self.clis]):
                break
            io.process_messages(0.005)

        if len(self.clis) == 0:
            raise Exception("Unable to start subprocesses.")

        #ok some processes survived, initialize host logic
        self.on_clients_initialized()

        self.q_timer = QTimer()
        self.q_timer.timeout.connect(self.tick)
        self.q_timer.start(5)
Beispiel #12
0
def save_faceset_metadata_folder(input_path):
    input_path = Path(input_path)

    metadata_filepath = input_path / 'meta.dat'

    io.log_info(f"Saving metadata to {str(metadata_filepath)}\r\n")

    d = {}
    for filepath in io.progress_bar_generator(
            pathex.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)
        dflimg = DFLIMG.load(filepath)
        if dflimg is None or not dflimg.has_data():
            io.log_info(f"{filepath} is not a dfl image file")
            continue

        dfl_dict = dflimg.get_dict()
        d[filepath.name] = (dflimg.get_shape(), dfl_dict)

    try:
        with open(metadata_filepath, "wb") as f:
            f.write(pickle.dumps(d))
    except:
        raise Exception('cannot save %s' % (filename))

    io.log_info("Now you can edit images.")
    io.log_info("!!! Keep same filenames in the folder.")
    io.log_info(
        "You can change size of images, restoring process will downscale back to original size."
    )
    io.log_info("After that, use restore metadata.")
Beispiel #13
0
    def initialize(device_config=None, floatx="float32", data_format="NHWC"):

        if nn.tf is None:
            if device_config is None:
                device_config = nn.getCurrentDeviceConfig()
            nn.setCurrentDeviceConfig(device_config)

            # Manipulate environment variables before import tensorflow
            
            if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
                os.environ.pop('CUDA_VISIBLE_DEVICES')

            first_run = False
            if len(device_config.devices) != 0:
                if sys.platform[0:3] == 'win':
                    # Windows specific env vars
                    if all( [ x.name == device_config.devices[0].name for x in device_config.devices ] ):
                        devices_str = "_" + device_config.devices[0].name.replace(' ','_')
                    else:
                        devices_str = ""
                        for device in device_config.devices:
                            devices_str += "_" + device.name.replace(' ','_')

                    compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache' + devices_str)
                    if not compute_cache_path.exists():
                        first_run = True
                    os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path)

            os.environ['CUDA_​CACHE_​MAXSIZE'] = '536870912' #512Mb (32mb default)
            os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # tf log errors only

            if first_run:
                io.log_info("Caching GPU kernels...")

            import tensorflow as tf
            nn.tf = tf
            
            import logging
            # Disable tensorflow warnings
            logging.getLogger('tensorflow').setLevel(logging.ERROR)

            # Initialize framework
            import core.leras.ops
            import core.leras.layers
            import core.leras.initializers
            import core.leras.optimizers
            import core.leras.models
            import core.leras.archis
            
            # Configure tensorflow session-config
            if len(device_config.devices) == 0:
                nn.tf_default_device = "/CPU:0"
                config = tf.ConfigProto(device_count={'GPU': 0})
            else:
                nn.tf_default_device = "/GPU:0"
                config = tf.ConfigProto()
                config.gpu_options.visible_device_list = ','.join([str(device.index) for device in device_config.devices])

            config.gpu_options.force_gpu_compatible = True
            config.gpu_options.allow_growth = True
            nn.tf_sess_config = config
            
        if nn.tf_sess is None:
            nn.tf_sess = tf.Session(config=nn.tf_sess_config)

        if floatx == "float32":
            floatx = nn.tf.float32
        elif floatx == "float16":
            floatx = nn.tf.float16
        else:
            raise ValueError(f"unsupported floatx {floatx}")
        nn.set_floatx(floatx)
        nn.set_data_format(data_format)
Beispiel #14
0
    def ask_choose_device_idxs(choose_only_one=False, allow_cpu=True, suggest_best_multi_gpu=False, suggest_all_gpu=False):
        devices = Devices.getDevices()
        if len(devices) == 0:
            return []

        all_devices_indexes = [device.index for device in devices]

        if choose_only_one:
            suggest_best_multi_gpu = False
            suggest_all_gpu = False

        if suggest_all_gpu:
            best_device_indexes = all_devices_indexes
        elif suggest_best_multi_gpu:
            best_device_indexes = [device.index for device in devices.get_equal_devices(devices.get_best_device()) ]
        else:
            best_device_indexes = [ devices.get_best_device().index ]
        best_device_indexes = ",".join([str(x) for x in best_device_indexes])

        io.log_info ("")
        if choose_only_one:
            io.log_info ("Choose one GPU idx.")
        else:
            io.log_info ("Choose one or several GPU idxs (separated by comma).")
        io.log_info ("")

        if allow_cpu:
            io.log_info ("[CPU] : CPU")
        for device in devices:
            io.log_info (f"  [{device.index}] : {device.name}")

        io.log_info ("")

        while True:
            try:
                if choose_only_one:
                    choosed_idxs = io.input_str("Which GPU index to choose?", best_device_indexes)
                else:
                    choosed_idxs = io.input_str("Which GPU indexes to choose?", best_device_indexes)

                if allow_cpu and choosed_idxs.lower() == "cpu":
                    choosed_idxs = []
                    break

                choosed_idxs = [ int(x) for x in choosed_idxs.split(',') ]

                if choose_only_one:
                    if len(choosed_idxs) == 1:
                        break
                else:
                    if all( [idx in all_devices_indexes for idx in choosed_idxs] ):
                        break
            except:
                pass
        io.log_info ("")

        return choosed_idxs
Beispiel #15
0
    def __init__(self, is_interactive, merger_session_filepath, predictor_func, predictor_input_shape, face_enhancer_func, xseg_256_extract_func, merger_config, frames, frames_root_path, output_path, output_mask_path, model_iter):
        if len (frames) == 0:
            raise ValueError ("len (frames) == 0")

        super().__init__('Merger', InteractiveMergerSubprocessor.Cli, io_loop_sleep_time=0.001)

        self.is_interactive = is_interactive
        self.merger_session_filepath = Path(merger_session_filepath)
        self.merger_config = merger_config

        self.predictor_func = predictor_func
        self.predictor_input_shape = predictor_input_shape

        self.face_enhancer_func = face_enhancer_func
        self.xseg_256_extract_func = xseg_256_extract_func

        self.frames_root_path = frames_root_path
        self.output_path = output_path
        self.output_mask_path = output_mask_path
        self.model_iter = model_iter

        self.prefetch_frame_count = self.process_count = multiprocessing.cpu_count()

        session_data = None
        if self.is_interactive and self.merger_session_filepath.exists():
            io.input_skip_pending()
            if io.input_bool ("Use saved session?", True):
                try:
                    with open( str(self.merger_session_filepath), "rb") as f:
                        session_data = pickle.loads(f.read())

                except Exception as e:
                    pass

        rewind_to_frame_idx = None
        self.frames = frames
        self.frames_idxs = [ *range(len(self.frames)) ]
        self.frames_done_idxs = []

        if self.is_interactive and session_data is not None:
            # Loaded session data, check it
            s_frames = session_data.get('frames', None)
            s_frames_idxs = session_data.get('frames_idxs', None)
            s_frames_done_idxs = session_data.get('frames_done_idxs', None)
            s_model_iter = session_data.get('model_iter', None)

            frames_equal = (s_frames is not None) and \
                           (s_frames_idxs is not None) and \
                           (s_frames_done_idxs is not None) and \
                           (s_model_iter is not None) and \
                           (len(frames) == len(s_frames)) # frames count must match

            if frames_equal:
                for i in range(len(frames)):
                    frame = frames[i]
                    s_frame = s_frames[i]
                    # frames filenames must match
                    if frame.frame_info.filepath.name != s_frame.frame_info.filepath.name:
                        frames_equal = False
                    if not frames_equal:
                        break

            if frames_equal:
                io.log_info ('Using saved session from ' + '/'.join (self.merger_session_filepath.parts[-2:]) )

                for frame in s_frames:
                    if frame.cfg is not None:
                        # recreate MergerConfig class using constructor with get_config() as dict params
                        # so if any new param will be added, old merger session will work properly
                        frame.cfg = frame.cfg.__class__( **frame.cfg.get_config() )

                self.frames = s_frames
                self.frames_idxs = s_frames_idxs
                self.frames_done_idxs = s_frames_done_idxs

                if self.model_iter != s_model_iter:
                    # model was more trained, recompute all frames
                    rewind_to_frame_idx = -1
                    for frame in self.frames:
                        frame.is_done = False
                elif len(self.frames_idxs) == 0:
                    # all frames are done?
                    rewind_to_frame_idx = -1

                if len(self.frames_idxs) != 0:
                    cur_frame = self.frames[self.frames_idxs[0]]
                    cur_frame.is_shown = False

            if not frames_equal:
                session_data = None

        if session_data is None:
            for filename in pathex.get_image_paths(self.output_path): #remove all images in output_path
                Path(filename).unlink()

            for filename in pathex.get_image_paths(self.output_mask_path): #remove all images in output_mask_path
                Path(filename).unlink()


            frames[0].cfg = self.merger_config.copy()

        for i in range( len(self.frames) ):
            frame = self.frames[i]
            frame.idx = i
            frame.output_filepath      = self.output_path      / ( frame.frame_info.filepath.stem + '.png' )
            frame.output_mask_filepath = self.output_mask_path / ( frame.frame_info.filepath.stem + '.png' )

            if not frame.output_filepath.exists() or \
               not frame.output_mask_filepath.exists():
                # if some frame does not exist, recompute and rewind
                frame.is_done = False
                frame.is_shown = False

                if rewind_to_frame_idx is None:
                    rewind_to_frame_idx = i-1
                else:
                    rewind_to_frame_idx = min(rewind_to_frame_idx, i-1)

        if rewind_to_frame_idx is not None:
            while len(self.frames_done_idxs) > 0:
                if self.frames_done_idxs[-1] > rewind_to_frame_idx:
                    prev_frame = self.frames[self.frames_done_idxs.pop()]
                    self.frames_idxs.insert(0, prev_frame.idx)
                else:
                    break
Beispiel #16
0
    def ask_settings(self):
        s = """Choose mode: \n"""
        for key in mode_dict.keys():
            s += f"""({key}) {mode_dict[key]}\n"""
        io.log_info(s)
        mode = io.input_int("", mode_str_dict.get(self.default_mode, 1))

        self.mode = mode_dict.get(mode, self.default_mode)

        if 'raw' not in self.mode:
            if self.mode == 'hist-match':
                self.masked_hist_match = io.input_bool("Masked hist match?",
                                                       True)

            if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
                self.hist_match_threshold = np.clip(
                    io.input_int("Hist match threshold",
                                 255,
                                 add_info="0..255"), 0, 255)

        s = """Choose mask mode: \n"""
        for key in mask_mode_dict.keys():
            s += f"""({key}) {mask_mode_dict[key]}\n"""
        io.log_info(s)
        self.mask_mode = io.input_int("", 1, valid_list=mask_mode_dict.keys())

        if 'raw' not in self.mode:
            self.erode_mask_modifier = np.clip(
                io.input_int("Choose erode mask modifier",
                             0,
                             add_info="-400..400"), -400, 400)
            self.blur_mask_modifier = np.clip(
                io.input_int("Choose blur mask modifier", 0,
                             add_info="0..400"), 0, 400)
            self.motion_blur_power = np.clip(
                io.input_int("Choose motion blur power", 0, add_info="0..100"),
                0, 100)

        self.output_face_scale = np.clip(
            io.input_int("Choose output face scale modifier",
                         0,
                         add_info="-50..50"), -50, 50)

        if 'raw' not in self.mode:
            self.color_transfer_mode = io.input_str(
                "Color transfer to predicted face",
                None,
                valid_list=list(ctm_str_dict.keys())[1:])
            self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]

        super().ask_settings()

        self.super_resolution_power = np.clip(
            io.input_int(
                "Choose super resolution power",
                0,
                add_info="0..100",
                help_message=
                "Enhance details by applying superresolution network."), 0,
            100)

        if 'raw' not in self.mode:
            self.image_denoise_power = np.clip(
                io.input_int("Choose image degrade by denoise power",
                             0,
                             add_info="0..500"), 0, 500)
            self.bicubic_degrade_power = np.clip(
                io.input_int("Choose image degrade by bicubic rescale power",
                             0,
                             add_info="0..100"), 0, 100)
            self.color_degrade_power = np.clip(
                io.input_int("Degrade color power of final image",
                             0,
                             add_info="0..100"), 0, 100)

        io.log_info("")
Beispiel #17
0
    def on_tick(self):
        io.process_messages()

        go_prev_frame = False
        go_first_frame = False
        go_prev_frame_overriding_cfg = False
        go_first_frame_overriding_cfg = False

        go_next_frame = self.process_remain_frames
        go_next_frame_overriding_cfg = False
        go_last_frame_overriding_cfg = False

        cur_frame = None
        if len(self.frames_idxs) != 0:
            cur_frame = self.frames[self.frames_idxs[0]]

        if self.is_interactive:

            screen_image = None if self.process_remain_frames else \
                                   self.main_screen.get_image()

            self.main_screen.set_waiting_icon( self.process_remain_frames or \
                                               self.is_interactive_quitting )

            if cur_frame is not None and not self.is_interactive_quitting:

                if not self.process_remain_frames:
                    if cur_frame.is_done:
                        if not cur_frame.is_shown:
                            if cur_frame.image is None:
                                image      = cv2_imread (cur_frame.output_filepath, verbose=False)
                                image_mask = cv2_imread (cur_frame.output_mask_filepath, verbose=False)
                                if image is None or image_mask is None:
                                    # unable to read? recompute then
                                    cur_frame.is_done = False
                                else:
                                    image = imagelib.normalize_channels(image, 3)
                                    image_mask = imagelib.normalize_channels(image_mask, 1)
                                    cur_frame.image = np.concatenate([image, image_mask], -1)

                            if cur_frame.is_done:
                                io.log_info (cur_frame.cfg.to_string( cur_frame.frame_info.filepath.name) )
                                cur_frame.is_shown = True
                                screen_image = cur_frame.image
                    else:
                        self.main_screen.set_waiting_icon(True)

            self.main_screen.set_image(screen_image)
            self.screen_manager.show_current()

            key_events = self.screen_manager.get_key_events()
            key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[-1] if len(key_events) > 0 else (0,0,False,False,False)

            if key == 9: #tab
                self.screen_manager.switch_screens()
            else:
                if key == 27: #esc
                    self.is_interactive_quitting = True
                elif self.screen_manager.get_current() is self.main_screen:

                    if self.merger_config.type == MergerConfig.TYPE_MASKED and chr_key in self.masked_keys:
                        self.process_remain_frames = False

                        if cur_frame is not None:
                            cfg = cur_frame.cfg
                            prev_cfg = cfg.copy()

                            if cfg.type == MergerConfig.TYPE_MASKED:
                                self.masked_keys_funcs[chr_key](cfg, shift_pressed)

                            if prev_cfg != cfg:
                                io.log_info ( cfg.to_string(cur_frame.frame_info.filepath.name) )
                                cur_frame.is_done = False
                                cur_frame.is_shown = False
                    else:

                        if chr_key == ',' or chr_key == 'm':
                            self.process_remain_frames = False
                            go_prev_frame = True

                            if chr_key == ',':
                                if shift_pressed:
                                    go_first_frame = True

                            elif chr_key == 'm':
                                if not shift_pressed:
                                    go_prev_frame_overriding_cfg = True
                                else:
                                    go_first_frame_overriding_cfg = True

                        elif chr_key == '.' or chr_key == '/':
                            self.process_remain_frames = False
                            go_next_frame = True

                            if chr_key == '.':
                                if shift_pressed:
                                    self.process_remain_frames = not self.process_remain_frames

                            elif chr_key == '/':
                                if not shift_pressed:
                                    go_next_frame_overriding_cfg = True
                                else:
                                    go_last_frame_overriding_cfg = True

                        elif chr_key == '-':
                            self.screen_manager.get_current().diff_scale(-0.1)
                        elif chr_key == '=':
                            self.screen_manager.get_current().diff_scale(0.1)
                        elif chr_key == 'v':
                            self.screen_manager.get_current().toggle_show_checker_board()

        if go_prev_frame:
            if cur_frame is None or cur_frame.is_done:
                if cur_frame is not None:
                    cur_frame.image = None

                while True:
                    if len(self.frames_done_idxs) > 0:
                        prev_frame = self.frames[self.frames_done_idxs.pop()]
                        self.frames_idxs.insert(0, prev_frame.idx)
                        prev_frame.is_shown = False
                        io.progress_bar_inc(-1)

                        if cur_frame is not None and (go_prev_frame_overriding_cfg or go_first_frame_overriding_cfg):
                            if prev_frame.cfg != cur_frame.cfg:
                                prev_frame.cfg = cur_frame.cfg.copy()
                                prev_frame.is_done = False

                        cur_frame = prev_frame

                    if go_first_frame_overriding_cfg or go_first_frame:
                        if len(self.frames_done_idxs) > 0:
                            continue
                    break

        elif go_next_frame:
            if cur_frame is not None and cur_frame.is_done:
                cur_frame.image = None
                cur_frame.is_shown = True
                self.frames_done_idxs.append(cur_frame.idx)
                self.frames_idxs.pop(0)
                io.progress_bar_inc(1)

                f = self.frames

                if len(self.frames_idxs) != 0:
                    next_frame = f[ self.frames_idxs[0] ]
                    next_frame.is_shown = False

                    if go_next_frame_overriding_cfg or go_last_frame_overriding_cfg:

                        if go_next_frame_overriding_cfg:
                            to_frames = next_frame.idx+1
                        else:
                            to_frames = len(f)

                        for i in range( next_frame.idx, to_frames ):
                            f[i].cfg = None

                    for i in range( min(len(self.frames_idxs), self.prefetch_frame_count) ):
                        frame = f[ self.frames_idxs[i] ]
                        if frame.cfg is None:
                            if i == 0:
                                frame.cfg = cur_frame.cfg.copy()
                            else:
                                frame.cfg = f[ self.frames_idxs[i-1] ].cfg.copy()

                            frame.is_done = False #initiate solve again
                            frame.is_shown = False

            if len(self.frames_idxs) == 0:
                self.process_remain_frames = False

        return (self.is_interactive and self.is_interactive_quitting) or \
               (not self.is_interactive and self.process_remain_frames == False)
Beispiel #18
0
def dev_test_68(input_dir):
    # process 68 landmarks dataset with .pts files
    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('input_dir not found. Please ensure it exists.')

    output_path = input_path.parent / (input_path.name + '_aligned')

    io.log_info(f'Output dir is % {output_path}')

    if output_path.exists():
        output_images_paths = pathex.get_image_paths(output_path)
        if len(output_images_paths) > 0:
            io.input_bool(
                "WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue."
                % (str(output_path)), False)
            for filename in output_images_paths:
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    images_paths = pathex.get_image_paths(input_path)

    for filepath in io.progress_bar_generator(images_paths, "Processing"):
        filepath = Path(filepath)

        pts_filepath = filepath.parent / (filepath.stem + '.pts')
        if pts_filepath.exists():
            pts = pts_filepath.read_text()
            pts_lines = pts.split('\n')

            lmrk_lines = None
            for pts_line in pts_lines:
                if pts_line == '{':
                    lmrk_lines = []
                elif pts_line == '}':
                    break
                else:
                    if lmrk_lines is not None:
                        lmrk_lines.append(pts_line)

            if lmrk_lines is not None and len(lmrk_lines) == 68:
                try:
                    lmrks = [
                        np.array(lmrk_line.strip().split(' ')).astype(
                            np.float32).tolist() for lmrk_line in lmrk_lines
                    ]
                except Exception as e:
                    print(e)
                    print(filepath)
                    continue

                rect = LandmarksProcessor.get_rect_from_landmarks(lmrks)

                output_filepath = output_path / (filepath.stem + '.jpg')

                img = cv2_imread(filepath)
                img = imagelib.normalize_channels(img, 3)
                cv2_imwrite(output_filepath, img,
                            [int(cv2.IMWRITE_JPEG_QUALITY), 95])

                raise Exception("unimplemented")
                #DFLJPG.x(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
                #                                landmarks=lmrks,
                #                                source_filename=filepath.name,
                #                                source_rect=rect,
                #                                source_landmarks=lmrks
                #                    )

    io.log_info("Done.")
Beispiel #19
0
    def run(self):
        if not self.on_check_run():
            return self.get_result()

        self.clis = []

        #getting info about name of subprocesses, host and client dicts, and spawning them
        for name, host_dict, client_dict in self.process_info_generator():
            try:
                cli = self.SubprocessorCli_class(client_dict)
                cli.state = 1
                cli.sent_time = 0
                cli.sent_data = None
                cli.name = name
                cli.host_dict = host_dict

                self.clis.append (cli)

                if self.initialize_subprocesses_in_serial:
                    while True:
                        while not cli.c2s.empty():
                            obj = cli.c2s.get()
                            op = obj.get('op','')
                            if op == 'init_ok':
                                cli.state = 0
                            elif op == 'log_info':
                                io.log_info(obj['msg'])
                            elif op == 'log_err':
                                io.log_err(obj['msg'])
                            elif op == 'error':
                                cli.kill()
                                self.clis.remove(cli)
                                break
                        if cli.state == 0:
                            break
                        io.process_messages(0.005)
            except:
                raise Exception (f"Unable to start subprocess {name}. Error: {traceback.format_exc()}")

        if len(self.clis) == 0:
            raise Exception ("Unable to start Subprocessor '%s' " % (self.name))

        #waiting subprocesses their success(or not) initialization
        while True:
            for cli in self.clis[:]:
                while not cli.c2s.empty():
                    obj = cli.c2s.get()
                    op = obj.get('op','')
                    if op == 'init_ok':
                        cli.state = 0
                    elif op == 'log_info':
                        io.log_info(obj['msg'])
                    elif op == 'log_err':
                        io.log_err(obj['msg'])
                    elif op == 'error':
                        cli.kill()
                        self.clis.remove(cli)
                        break
            if all ([cli.state == 0 for cli in self.clis]):
                break
            io.process_messages(0.005)

        if len(self.clis) == 0:
            raise Exception ( "Unable to start subprocesses." )

        #ok some processes survived, initialize host logic

        self.on_clients_initialized()

        #main loop of data processing
        while True:
            for cli in self.clis[:]:
                while not cli.c2s.empty():
                    obj = cli.c2s.get()
                    op = obj.get('op','')
                    if op == 'success':
                        #success processed data, return data and result to on_result
                        self.on_result (cli.host_dict, obj['data'], obj['result'])
                        self.sent_data = None
                        cli.state = 0
                    elif op == 'error':
                        #some error occured while process data, returning chunk to on_data_return
                        if 'data' in obj.keys():
                            self.on_data_return (cli.host_dict, obj['data'] )
                        #and killing process
                        cli.kill()
                        self.clis.remove(cli)
                    elif op == 'log_info':
                        io.log_info(obj['msg'])
                    elif op == 'log_err':
                        io.log_err(obj['msg'])
                    elif op == 'progress_bar_inc':
                        io.progress_bar_inc(obj['c'])

            for cli in self.clis[:]:
                if cli.state == 1:
                    if cli.sent_time != 0 and self.no_response_time_sec != 0 and (time.time() - cli.sent_time) > self.no_response_time_sec:
                        #subprocess busy too long
                        print ( '%s doesnt response, terminating it.' % (cli.name) )
                        self.on_data_return (cli.host_dict, cli.sent_data )
                        cli.kill()
                        self.clis.remove(cli)

            for cli in self.clis[:]:
                if cli.state == 0:
                    #free state of subprocess, get some data from get_data
                    data = self.get_data(cli.host_dict)
                    if data is not None:
                        #and send it to subprocess
                        cli.s2c.put ( {'op': 'data', 'data' : data} )
                        cli.sent_time = time.time()
                        cli.sent_data = data
                        cli.state = 1

            if self.io_loop_sleep_time != 0:
                io.process_messages(self.io_loop_sleep_time)

            if self.on_tick() and all ([cli.state == 0 for cli in self.clis]):
                #all subprocesses free and no more data available to process, ending loop
                break



        #gracefully terminating subprocesses
        for cli in self.clis[:]:
            cli.s2c.put ( {'op': 'close'} )
            cli.sent_time = time.time()

        while True:
            for cli in self.clis[:]:
                terminate_it = False
                while not cli.c2s.empty():
                    obj = cli.c2s.get()
                    obj_op = obj['op']
                    if obj_op == 'finalized':
                        terminate_it = True
                        break

                if (time.time() - cli.sent_time) > 30:
                    terminate_it = True

                if terminate_it:
                    cli.state = 2
                    cli.kill()

            if all ([cli.state == 2 for cli in self.clis]):
                break

        #finalizing host logic and return result
        self.on_clients_finalized()

        return self.get_result()
Beispiel #20
0
def extract_umd_csv(input_file_csv, face_type='full_face', device_args={}):

    #extract faces from umdfaces.io dataset csv file with pitch,yaw,roll info.
    multi_gpu = device_args.get('multi_gpu', False)
    cpu_only = device_args.get('cpu_only', False)
    face_type = FaceType.fromString(face_type)

    input_file_csv_path = Path(input_file_csv)
    if not input_file_csv_path.exists():
        raise ValueError('input_file_csv not found. Please ensure it exists.')

    input_file_csv_root_path = input_file_csv_path.parent
    output_path = input_file_csv_path.parent / ('aligned_' +
                                                input_file_csv_path.name)

    io.log_info("Output dir is %s." % (str(output_path)))

    if output_path.exists():
        output_images_paths = pathex.get_image_paths(output_path)
        if len(output_images_paths) > 0:
            io.input_bool(
                "WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue."
                % (str(output_path)), False)
            for filename in output_images_paths:
                Path(filename).unlink()
    else:
        output_path.mkdir(parents=True, exist_ok=True)

    try:
        with open(str(input_file_csv_path), 'r') as f:
            csv_file = f.read()
    except Exception as e:
        io.log_err("Unable to open or read file " + str(input_file_csv_path) +
                   ": " + str(e))
        return

    strings = csv_file.split('\n')
    keys = strings[0].split(',')
    keys_len = len(keys)
    csv_data = []
    for i in range(1, len(strings)):
        values = strings[i].split(',')
        if keys_len != len(values):
            io.log_err("Wrong string in csv file, skipping.")
            continue

        csv_data += [{keys[n]: values[n] for n in range(keys_len)}]

    data = []
    for d in csv_data:
        filename = input_file_csv_root_path / d['FILE']

        x, y, w, h = float(d['FACE_X']), float(d['FACE_Y']), float(
            d['FACE_WIDTH']), float(d['FACE_HEIGHT'])

        data += [
            ExtractSubprocessor.Data(filename=filename,
                                     rects=[[x, y, x + w, y + h]])
        ]

    images_found = len(data)
    faces_detected = 0
    if len(data) > 0:
        io.log_info("Performing 2nd pass from csv file...")
        data = ExtractSubprocessor(data,
                                   'landmarks',
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only).run()

        io.log_info('Performing 3rd pass...')
        data = ExtractSubprocessor(data,
                                   'final',
                                   face_type,
                                   None,
                                   multi_gpu=multi_gpu,
                                   cpu_only=cpu_only,
                                   manual=False,
                                   final_output_path=output_path).run()
        faces_detected += sum([d.faces_detected for d in data])

    io.log_info('-------------------------')
    io.log_info('Images found:        %d' % (images_found))
    io.log_info('Faces detected:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #21
0
def main(
    detector=None,
    input_path=None,
    output_path=None,
    output_debug=None,
    manual_fix=False,
    manual_output_debug_fix=False,
    manual_window_size=1368,
    face_type='full_face',
    max_faces_from_image=None,
    image_size=None,
    jpeg_quality=None,
    cpu_only=False,
    force_gpu_idxs=None,
):

    if not input_path.exists():
        io.log_err('Input directory not found. Please ensure it exists.')
        return

    if not output_path.exists():
        output_path.mkdir(parents=True, exist_ok=True)

    if face_type is not None:
        face_type = FaceType.fromString(face_type)

    if face_type is None:
        if manual_output_debug_fix:
            files = pathex.get_image_paths(output_path)
            if len(files) != 0:
                dflimg = DFLIMG.load(Path(files[0]))
                if dflimg is not None and dflimg.has_data():
                    face_type = FaceType.fromString(dflimg.get_face_type())

    input_image_paths = pathex.get_image_unique_filestem_paths(
        input_path, verbose_print_func=io.log_info)
    output_images_paths = pathex.get_image_paths(output_path)
    output_debug_path = output_path.parent / (output_path.name + '_debug')

    continue_extraction = False
    if not manual_output_debug_fix and len(output_images_paths) > 0:
        if len(output_images_paths) > 128:
            continue_extraction = io.input_bool(
                "继续提取?",
                True,
                help_message=
                "Extraction can be continued, but you must specify the same options again."
            )

        if len(output_images_paths) > 128 and continue_extraction:
            try:
                input_image_paths = input_image_paths[
                    [Path(x).stem for x in input_image_paths].
                    index(Path(output_images_paths[-128]).stem.split('_')[0]):]
            except:
                io.log_err(
                    "Error in fetching the last index. Extraction cannot be continued."
                )
                return
        elif input_path != output_path:
            io.input(
                f"\n WARNING !!! \n {output_path} contains files! \n They will be deleted. \n Press enter to continue.\n"
            )
            for filename in output_images_paths:
                Path(filename).unlink()

    device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(choose_only_one=detector=='manual', suggest_all_gpu=True) ) \
                    if not cpu_only else nn.DeviceConfig.CPU()

    if face_type is None:
        face_type = io.input_str(
            "脸类型 Face type",
            'wf', ['f', 'wf', 'head'],
            help_message=
            "Full face / whole face / head. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset."
        ).lower()
        face_type = {
            'f': FaceType.FULL,
            'wf': FaceType.WHOLE_FACE,
            'head': FaceType.HEAD
        }[face_type]

    if max_faces_from_image is None:
        max_faces_from_image = io.input_int(
            f"单张图中提取人脸数量上限 Max number of faces from image",
            0,
            help_message=
            "If you extract a src faceset that has frames with a large number of faces, it is advisable to set max faces to 3 to speed up extraction. 0 - unlimited"
        )

    if image_size is None:
        image_size = io.input_int(
            f"图片大小 Image size",
            512 if face_type < FaceType.HEAD else 768,
            valid_range=[256, 2048],
            help_message=
            "Output image size. The higher image size, the worse face-enhancer works. Use higher than 512 value only if the source image is sharp enough and the face does not need to be enhanced."
        )

    if jpeg_quality is None:
        jpeg_quality = io.input_int(
            f"图片质量 Jpeg quality",
            90,
            valid_range=[1, 100],
            help_message=
            "Jpeg quality. The higher jpeg quality the larger the output file size."
        )

    if detector is None:
        io.log_info("选择提取算法 Choose detector type.")
        io.log_info("[0] S3FD")
        io.log_info("[1] manual")
        detector = {0: 's3fd', 1: 'manual'}[io.input_int("", 0, [0, 1])]

    if output_debug is None:
        output_debug = io.input_bool(
            f"保存调试图片 Write debug images to {output_debug_path.name}?", False)

    if output_debug:
        output_debug_path.mkdir(parents=True, exist_ok=True)

    if manual_output_debug_fix:
        if not output_debug_path.exists():
            io.log_err(
                f'{output_debug_path} not found. Re-extract faces with "Write debug images" option.'
            )
            return
        else:
            detector = 'manual'
            io.log_info(
                'Performing re-extract frames which were deleted from _debug directory.'
            )

            input_image_paths = DeletedFilesSearcherSubprocessor(
                input_image_paths,
                pathex.get_image_paths(output_debug_path)).run()
            input_image_paths = sorted(input_image_paths)
            io.log_info('Found %d images.' % (len(input_image_paths)))
    else:
        if not continue_extraction and output_debug_path.exists():
            for filename in pathex.get_image_paths(output_debug_path):
                Path(filename).unlink()

    images_found = len(input_image_paths)
    faces_detected = 0
    if images_found != 0:
        if detector == 'manual':
            io.log_info('Performing manual extract...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'landmarks-manual',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                manual_window_size=manual_window_size,
                device_config=device_config).run()

            io.log_info('Performing 3rd pass...')
            data = ExtractSubprocessor(
                data,
                'final',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                final_output_path=output_path,
                device_config=device_config).run()

        else:
            io.log_info('正在提取人脸...')
            data = ExtractSubprocessor(
                [
                    ExtractSubprocessor.Data(Path(filename))
                    for filename in input_image_paths
                ],
                'all',
                image_size,
                jpeg_quality,
                face_type,
                output_debug_path if output_debug else None,
                max_faces_from_image=max_faces_from_image,
                final_output_path=output_path,
                device_config=device_config).run()

        faces_detected += sum([d.faces_detected for d in data])

        if manual_fix:
            if all(np.array([d.faces_detected > 0 for d in data]) == True):
                io.log_info('All faces are detected, manual fix not needed.')
            else:
                fix_data = [
                    ExtractSubprocessor.Data(d.filepath) for d in data
                    if d.faces_detected == 0
                ]
                io.log_info('Performing manual fix for %d images...' %
                            (len(fix_data)))
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'landmarks-manual',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    manual_window_size=manual_window_size,
                    device_config=device_config).run()
                fix_data = ExtractSubprocessor(
                    fix_data,
                    'final',
                    image_size,
                    jpeg_quality,
                    face_type,
                    output_debug_path if output_debug else None,
                    final_output_path=output_path,
                    device_config=device_config).run()
                faces_detected += sum([d.faces_detected for d in fix_data])

    io.log_info('-------------------------')
    io.log_info('图片数量:      %d' % (images_found))
    io.log_info('人脸数量:      %d' % (faces_detected))
    io.log_info('-------------------------')
Beispiel #22
0
def trainerThread(s2c,
                  c2s,
                  e,
                  model_class_name=None,
                  saved_models_path=None,
                  training_data_src_path=None,
                  training_data_dst_path=None,
                  pretraining_data_path=None,
                  pretrained_model_path=None,
                  no_preview=False,
                  force_model_name=None,
                  force_gpu_idxs=None,
                  cpu_only=None,
                  silent_start=False,
                  execute_programs=None,
                  debug=False,
                  dump_ckpt=False,
                  **kwargs):
    while True:
        try:
            start_time = time.time()

            save_interval_min = 25

            if not training_data_src_path.exists():
                training_data_src_path.mkdir(exist_ok=True, parents=True)

            if not training_data_dst_path.exists():
                training_data_dst_path.mkdir(exist_ok=True, parents=True)

            if not saved_models_path.exists():
                saved_models_path.mkdir(exist_ok=True, parents=True)

            if dump_ckpt:
                cpu_only = True

            model = models.import_model(model_class_name)(
                is_training=not dump_ckpt,
                saved_models_path=saved_models_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                pretraining_data_path=pretraining_data_path,
                pretrained_model_path=pretrained_model_path,
                no_preview=no_preview,
                force_model_name=force_model_name,
                force_gpu_idxs=force_gpu_idxs,
                cpu_only=cpu_only,
                silent_start=silent_start,
                debug=debug)

            if dump_ckpt:
                e.set()
                model.dump_ckpt()
                break

            is_reached_goal = model.is_reached_iter_goal()

            shared_state = {'after_save': False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end='\r')
                    model.save()
                    shared_state['after_save'] = True

            def model_backup():
                if not debug and not is_reached_goal:
                    model.create_backup()

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})
                e.set()  #Set the GUI Thread as Ready

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        'Model already trained to target iteration. You can use preview.'
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter()))
            else:
                io.log_info(
                    'Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()

            execute_programs = [[x[0], x[1], time.time()]
                                for x in execute_programs]

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog, last_time = x
                        exec_prog = False
                        if prog_time > 0 and (cur_time -
                                              start_time) >= prog_time:
                            x[0] = 0
                            exec_prog = True
                        elif prog_time < 0 and (cur_time -
                                                last_time) >= -prog_time:
                            x[2] = cur_time
                            exec_prog = True

                        if exec_prog:
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:

                        if model.get_iter() == 0:
                            io.log_info("")
                            io.log_info(
                                "Trying to do the first iteration. If an error occurs, reduce the model parameters."
                            )
                            io.log_info("")

                            if sys.platform[0:3] == 'win':
                                io.log_info("!!!")
                                io.log_info(
                                    "Windows 10 users IMPORTANT notice. You should set this setting in order to work correctly."
                                )
                                io.log_info("https://i.imgur.com/B7cmDCB.jpg")
                                io.log_info("!!!")

                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, '{:0.4f}'.format(iter_time))
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000))

                        if shared_state['after_save']:
                            shared_state['after_save'] = False

                            mean_loss = np.mean(loss_history[save_iter:iter],
                                                axis=0)

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info('\r' + loss_string, end='')
                            else:
                                io.log_info(loss_string, end='\r')

                        if model.get_iter() == 1:
                            model_save()

                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('Reached target iteration.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('You can use preview now.')

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    last_save_time += save_interval_min * 60
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'backup':
                        model_backup()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #23
0
def sort_by_hist(input_path):
    io.log_info ("Sorting by histogram similarity...")
    img_list = HistSsimSubprocessor(pathex.get_image_paths(input_path)).run()
    return img_list, []
Beispiel #24
0
def main(**kwargs):
    io.log_info("Running trainer.\r\n")

    no_preview = kwargs.get('no_preview', False)

    s2c = queue.Queue()
    c2s = queue.Queue()

    e = threading.Event()
    thread = threading.Thread(target=trainerThread,
                              args=(s2c, c2s, e),
                              kwargs=kwargs)
    thread.start()

    e.wait()  #Wait for inital load to occur.

    if no_preview:
        while True:
            if not c2s.empty():
                input = c2s.get()
                op = input.get('op', '')
                if op == 'close':
                    break
            try:
                io.process_messages(0.1)
            except KeyboardInterrupt:
                s2c.put({'op': 'close'})
    else:
        wnd_name = "Training preview"
        io.named_window(wnd_name)
        io.capture_keys(wnd_name)

        previews = None
        loss_history = None
        selected_preview = 0
        update_preview = False
        is_showing = False
        is_waiting_preview = False
        show_last_history_iters_count = 0
        iter = 0
        while True:
            if not c2s.empty():
                input = c2s.get()
                op = input['op']
                if op == 'show':
                    is_waiting_preview = False
                    loss_history = input[
                        'loss_history'] if 'loss_history' in input.keys(
                        ) else None
                    previews = input['previews'] if 'previews' in input.keys(
                    ) else None
                    iter = input['iter'] if 'iter' in input.keys() else 0
                    if previews is not None:
                        max_w = 0
                        max_h = 0
                        for (preview_name, preview_rgb) in previews:
                            (h, w, c) = preview_rgb.shape
                            max_h = max(max_h, h)
                            max_w = max(max_w, w)

                        max_size = 800
                        if max_h > max_size:
                            max_w = int(max_w / (max_h / max_size))
                            max_h = max_size

                        #make all previews size equal
                        for preview in previews[:]:
                            (preview_name, preview_rgb) = preview
                            (h, w, c) = preview_rgb.shape
                            if h != max_h or w != max_w:
                                previews.remove(preview)
                                previews.append(
                                    (preview_name,
                                     cv2.resize(preview_rgb, (max_w, max_h))))
                        selected_preview = selected_preview % len(previews)
                        update_preview = True
                elif op == 'close':
                    break

            if update_preview:
                update_preview = False

                selected_preview_name = previews[selected_preview][0]
                selected_preview_rgb = previews[selected_preview][1]
                (h, w, c) = selected_preview_rgb.shape

                # HEAD
                head_lines = [
                    '[s]:save [b]:backup [enter]:exit',
                    '[p]:update [space]:next preview [l]:change history range',
                    'Preview: "%s" [%d/%d]' %
                    (selected_preview_name, selected_preview + 1,
                     len(previews))
                ]
                head_line_height = 15
                head_height = len(head_lines) * head_line_height
                head = np.ones((head_height, w, c)) * 0.1

                for i in range(0, len(head_lines)):
                    t = i * head_line_height
                    b = (i + 1) * head_line_height
                    head[t:b, 0:w] += imagelib.get_text_image(
                        (head_line_height, w, c),
                        head_lines[i],
                        color=[0.8] * c)

                final = head

                if loss_history is not None:
                    if show_last_history_iters_count == 0:
                        loss_history_to_show = loss_history
                    else:
                        loss_history_to_show = loss_history[
                            -show_last_history_iters_count:]

                    lh_img = models.ModelBase.get_loss_history_preview(
                        loss_history_to_show, iter, w, c)
                    final = np.concatenate([final, lh_img], axis=0)

                final = np.concatenate([final, selected_preview_rgb], axis=0)
                final = np.clip(final, 0, 1)

                io.show_image(wnd_name, (final * 255).astype(np.uint8))
                is_showing = True

            key_events = io.get_key_events(wnd_name)
            key, chr_key, ctrl_pressed, alt_pressed, shift_pressed = key_events[
                -1] if len(key_events) > 0 else (0, 0, False, False, False)

            if key == ord('\n') or key == ord('\r'):
                s2c.put({'op': 'close'})
            elif key == ord('s'):
                s2c.put({'op': 'save'})
            elif key == ord('b'):
                s2c.put({'op': 'backup'})
            elif key == ord('p'):
                if not is_waiting_preview:
                    is_waiting_preview = True
                    s2c.put({'op': 'preview'})
            elif key == ord('l'):
                if show_last_history_iters_count == 0:
                    show_last_history_iters_count = 5000
                elif show_last_history_iters_count == 5000:
                    show_last_history_iters_count = 10000
                elif show_last_history_iters_count == 10000:
                    show_last_history_iters_count = 50000
                elif show_last_history_iters_count == 50000:
                    show_last_history_iters_count = 100000
                elif show_last_history_iters_count == 100000:
                    show_last_history_iters_count = 0
                update_preview = True
            elif key == ord(' '):
                selected_preview = (selected_preview + 1) % len(previews)
                update_preview = True

            try:
                io.process_messages(0.1)
            except KeyboardInterrupt:
                s2c.put({'op': 'close'})

        io.destroy_all_windows()
Beispiel #25
0
    def process_info_generator(self):
        cpu_count = multiprocessing.cpu_count()
        io.log_info(f'Running on {cpu_count} CPUs')

        for i in range(cpu_count):
            yield 'CPU%d' % (i), {}, {}
Beispiel #26
0
 def model_save():
     if not debug and not is_reached_goal:
         io.log_info("Saving....", end='\r')
         model.save()
         shared_state['after_save'] = True
Beispiel #27
0
def sort_by_absdiff(input_path):
    io.log_info ("Sorting by absolute difference...")

    is_sim = io.input_bool ("Sort by similar?", True, help_message="Otherwise sort by dissimilar.")

    from core.leras import nn

    device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True)
    nn.initialize( device_config=device_config, data_format="NHWC" )
    tf = nn.tf

    image_paths = pathex.get_image_paths(input_path)
    image_paths_len = len(image_paths)

    batch_size = 512
    batch_size_remain = image_paths_len % batch_size

    i_t = tf.placeholder (tf.float32, (None,None,None,None) )
    j_t = tf.placeholder (tf.float32, (None,None,None,None) )

    outputs_full = []
    outputs_remain = []

    for i in range(batch_size):
        diff_t = tf.reduce_sum( tf.abs(i_t-j_t[i]), axis=[1,2,3] )
        outputs_full.append(diff_t)
        if i < batch_size_remain:
            outputs_remain.append(diff_t)

    def func_bs_full(i,j):
        return nn.tf_sess.run (outputs_full, feed_dict={i_t:i,j_t:j})

    def func_bs_remain(i,j):
        return nn.tf_sess.run (outputs_remain, feed_dict={i_t:i,j_t:j})

    import h5py
    db_file_path = Path(tempfile.gettempdir()) / 'sort_cache.hdf5'
    db_file = h5py.File( str(db_file_path), "w")
    db = db_file.create_dataset("results", (image_paths_len,image_paths_len), compression="gzip")

    pg_len = image_paths_len // batch_size
    if batch_size_remain != 0:
        pg_len += 1

    pg_len = int( (  pg_len*pg_len - pg_len ) / 2 + pg_len )

    io.progress_bar ("Computing", pg_len)
    j=0
    while j < image_paths_len:
        j_images = [ cv2_imread(x) for x in image_paths[j:j+batch_size] ]
        j_images_len = len(j_images)

        func = func_bs_remain if image_paths_len-j < batch_size else func_bs_full

        i=0
        while i < image_paths_len:
            if i >= j:
                i_images = [ cv2_imread(x) for x in image_paths[i:i+batch_size] ]
                i_images_len = len(i_images)
                result = func (i_images,j_images)
                db[j:j+j_images_len,i:i+i_images_len] = np.array(result)
                io.progress_bar_inc(1)

            i += batch_size
        db_file.flush()
        j += batch_size

    io.progress_bar_close()

    next_id = 0
    sorted = [next_id]
    for i in io.progress_bar_generator ( range(image_paths_len-1), "Sorting" ):
        id_ar = np.concatenate ( [ db[:next_id,next_id], db[next_id,next_id:] ] )
        id_ar = np.argsort(id_ar)


        next_id = np.setdiff1d(id_ar, sorted, True)[ 0 if is_sim else -1]
        sorted += [next_id]
    db_file.close()
    db_file_path.unlink()

    img_list = [ (image_paths[x],) for x in sorted]
    return img_list, []
Beispiel #28
0
def main(model_class_name=None,
         saved_models_path=None,
         training_data_src_path=None,
         force_model_name=None,
         input_path=None,
         output_path=None,
         output_mask_path=None,
         aligned_path=None,
         force_gpu_idxs=None,
         cpu_only=None,
         src_src=False):
    io.log_info("Running merger.\r\n")

    try:
        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not output_mask_path.exists():
            output_mask_path.mkdir(parents=True, exist_ok=True)

        if not saved_models_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        # Initialize model
        import models
        model = models.import_model(model_class_name)(
            is_training=False,
            src_src=src_src,
            saved_models_path=saved_models_path,
            force_gpu_idxs=force_gpu_idxs,
            cpu_only=cpu_only)

        predictor_func, predictor_input_shape, cfg = model.get_MergerConfig()

        # Preparing MP functions
        predictor_func = MPFunc(predictor_func)

        run_on_cpu = len(nn.getCurrentDeviceConfig().devices) == 0
        xseg_256_extract_func = MPClassFuncOnDemand(
            XSegNet,
            'extract',
            name='XSeg',
            resolution=256,
            weights_file_root=saved_models_path,
            place_model_on_cpu=True,
            run_on_cpu=run_on_cpu)

        face_enhancer_func = MPClassFuncOnDemand(FaceEnhancer,
                                                 'enhance',
                                                 place_model_on_cpu=True,
                                                 run_on_cpu=run_on_cpu)

        if src_src:
            is_interactive = False
        else:
            is_interactive = io.input_bool(
                "Use interactive merger?",
                True) if not io.is_colab() else False

        if not is_interactive:
            cfg.ask_settings()

        subprocess_count = io.input_int(
            "Number of workers?",
            max(8, multiprocessing.cpu_count()),
            valid_range=[1, multiprocessing.cpu_count()],
            help_message=
            "Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores."
        )

        input_path_image_paths = pathex.get_image_paths(input_path)

        if cfg.type == MergerConfig.TYPE_MASKED:
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            packed_samples = None
            try:
                packed_samples = samplelib.PackedFaceset.load(aligned_path)
            except:
                io.log_err(
                    f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}"
                )

            if packed_samples is not None:
                io.log_info("Using packed faceset.")

                def generator():
                    for sample in io.progress_bar_generator(
                            packed_samples, "Collecting alignments"):
                        filepath = Path(sample.filename)
                        yield filepath, DFLIMG.load(
                            filepath,
                            loader_func=lambda x: sample.read_raw_file())
            else:

                def generator():
                    for filepath in io.progress_bar_generator(
                            pathex.get_image_paths(aligned_path),
                            "Collecting alignments"):
                        filepath = Path(filepath)
                        yield filepath, DFLIMG.load(filepath)

            alignments = {}
            multiple_faces_detected = False

            for filepath, dflimg in generator():
                if dflimg is None or not dflimg.has_data():
                    io.log_err(f"{filepath.name} is not a dfl image file")
                    continue

                source_filename = filepath.name if src_src else dflimg.get_source_filename(
                )
                if source_filename is None:
                    continue

                source_filepath = Path(source_filename)
                source_filename_stem = source_filepath.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(
                    (dflimg.get_source_landmarks(), filepath, source_filepath,
                     dflimg.get_image_to_face_mat(), dflimg.get_shape()[0],
                     dflimg.get_face_type()))

                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info("")
                io.log_info(
                    "Warning: multiple faces detected. Only one alignment file should refer one source file."
                )
                io.log_info("")

            for a_key in list(alignments.keys()):
                a_ar = alignments[a_key]
                if len(a_ar) > 1:
                    print(a_ar)
                    for _, filepath, source_filepath, _, _, _ in a_ar:
                        io.log_info(
                            f"alignment {filepath.name} refers to {source_filepath.name} "
                        )
                    io.log_info("")
                alignments[a_key] = [(a[0], a[3], a[4], a[5]) for a in a_ar]

            if multiple_faces_detected:
                io.log_info(
                    "It is strongly recommended to process the faces separatelly."
                )
                io.log_info(
                    "Use 'recover original filename' to determine the exact duplicates."
                )
                io.log_info("")

            frames = []
            for p in input_path_image_paths:
                alignment = alignments.get(Path(p).stem, None)
                landmarks_list = None
                image_to_face_mat = None
                aligned_size = None
                aligned_face_type = None
                if alignment is not None:
                    landmarks_list, image_to_face_mat, aligned_size, aligned_face_type = alignment[
                        0]
                    landmarks_list = [landmarks_list]
                frame_info = FrameInfo(filepath=Path(p),
                                       landmarks_list=landmarks_list,
                                       image_to_face_mat=image_to_face_mat,
                                       aligned_size=aligned_size,
                                       face_type=aligned_face_type)
                frame = InteractiveMergerSubprocessor.Frame(
                    frame_info=frame_info)
                frames.append(frame)

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
                io.log_info("")
            elif src_src:
                io.log_info(
                    "SRC-SRC mode configured, skipping motion blur calculation..."
                )
                io.log_info("")
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        if len(frames) == 0:
            io.log_info("No frames to merge in input_dir.")
        else:
            if False:
                pass
            else:
                InteractiveMergerSubprocessor(
                    is_interactive=is_interactive,
                    merger_session_filepath=model.get_strpath_storage_for_file(
                        'merger_session.dat'),
                    predictor_func=predictor_func,
                    predictor_input_shape=predictor_input_shape,
                    face_enhancer_func=face_enhancer_func,
                    xseg_256_extract_func=xseg_256_extract_func,
                    merger_config=cfg,
                    frames=frames,
                    frames_root_path=input_path,
                    output_path=output_path,
                    output_mask_path=output_mask_path,
                    model_iter=model.get_iter(),
                    subprocess_count=subprocess_count,
                    src_src=src_src,
                ).run()

        model.finalize()

    except Exception as e:
        print(traceback.format_exc())
Beispiel #29
0
def extract_vggface2_dataset(input_dir, device_args={}):
    multi_gpu = device_args.get('multi_gpu', False)
    cpu_only = device_args.get('cpu_only', False)

    input_path = Path(input_dir)
    if not input_path.exists():
        raise ValueError('Input directory not found. Please ensure it exists.')

    bb_csv = input_path / 'loose_bb_train.csv'
    if not bb_csv.exists():
        raise ValueError('loose_bb_train.csv found. Please ensure it exists.')

    bb_lines = bb_csv.read_text().split('\n')
    bb_lines.pop(0)

    bb_dict = {}
    for line in bb_lines:
        name, l, t, w, h = line.split(',')
        name = name[1:-1]
        l, t, w, h = [int(x) for x in (l, t, w, h)]
        bb_dict[name] = (l, t, w, h)

    output_path = input_path.parent / (input_path.name + '_out')

    dir_names = pathex.get_all_dir_names(input_path)

    if not output_path.exists():
        output_path.mkdir(parents=True, exist_ok=True)

    data = []
    for dir_name in io.progress_bar_generator(dir_names, "Collecting"):
        cur_input_path = input_path / dir_name
        cur_output_path = output_path / dir_name

        if not cur_output_path.exists():
            cur_output_path.mkdir(parents=True, exist_ok=True)

        input_path_image_paths = pathex.get_image_paths(cur_input_path)

        for filename in input_path_image_paths:
            filename_path = Path(filename)

            name = filename_path.parent.name + '/' + filename_path.stem
            if name not in bb_dict:
                continue

            l, t, w, h = bb_dict[name]
            if min(w, h) < 128:
                continue

            data += [
                ExtractSubprocessor.Data(filename=filename,
                                         rects=[(l, t, l + w, t + h)],
                                         landmarks_accurate=False,
                                         force_output_path=cur_output_path)
            ]

    face_type = FaceType.fromString('full_face')

    io.log_info('Performing 2nd pass...')
    data = ExtractSubprocessor(data,
                               'landmarks',
                               256,
                               face_type,
                               debug_dir=None,
                               multi_gpu=multi_gpu,
                               cpu_only=cpu_only,
                               manual=False).run()

    io.log_info('Performing 3rd pass...')
    ExtractSubprocessor(data,
                        'final',
                        256,
                        face_type,
                        debug_dir=None,
                        multi_gpu=multi_gpu,
                        cpu_only=cpu_only,
                        manual=False,
                        final_output_path=None).run()
Beispiel #30
0
def extract_src():
    import os
    import shutil

    root_dir = get_root_path()
    extract_workspace = os.path.join(root_dir, "extract_workspace")
    target_dir = os.path.join(extract_workspace, "aligned_")

    valid_exts = [".mp4", ".avi", ".wmv", ".mkv", ".ts"]

    fps = io.input_int(
        "Enter FPS ( ?:help skip:fullfps ) : ",
        0,
        help_message=
        "How many frames of every second of the video will be extracted.")

    def file_filter(file):
        if os.path.isdir(os.path.join(extract_workspace, file)):
            return False
        ext = os.path.splitext(file)[-1]
        if ext not in valid_exts:
            return False
        return True

    files = list(filter(file_filter, os.listdir(extract_workspace)))
    files.sort()
    pos = 0
    for file in files:
        pos += 1
        io.log_info("@@@@@  Start Process %s, %d / %d" %
                    (file, pos, len(files)))
        # 提取图片
        input_file = os.path.join(extract_workspace, file)
        output_dir = os.path.join(extract_workspace, "extract_images")
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        for f in os.listdir(output_dir):
            os.remove(os.path.join(output_dir, f))
        dfl.dfl_extract_video(input_file, output_dir, fps)
        io.log_info("@@@@@  Start Extract %s, %d / %d" %
                    (file, pos, len(files)))
        # 提取人脸
        input_dir = output_dir
        output_dir = os.path.join(extract_workspace, "_current")
        dfl.dfl_extract_faces(input_dir, output_dir, output_debug=True)
        # 复制到结果集
        io.log_info("@@@@@  Start Move %s, %d / %d" % (file, pos, len(files)))
        if not os.path.exists(target_dir):
            os.mkdir(target_dir)
        ts = get_time_str()
        for f in os.listdir(output_dir):
            src = os.path.join(output_dir, f)
            dst = os.path.join(target_dir, "%s_%s" % (ts, f))
            shutil.move(src, dst)
        # 全部做完,删除该文件
        io.log_info("@@@@@  Finish %s, %d / %d" % (file, pos, len(files)))
        os.remove(os.path.join(extract_workspace, file))
        os.rmdir(output_dir)
    # 做完后排序
    io.log_info("@@@@@  Sort By Hist")
    dfl.dfl_sort_by_hist(target_dir)
    beep()