Exemple #1
0
    def run(self) -> None:
        os.environ["CUDA_DEVICES_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu_id)

        device = torch.device("cuda:0")

        processor = Preprocessor(cfg=self.opt,
                                 proc_size=self.opt.image_size,
                                 device=device)

        while self.is_run and not self.queue.empty():
            try:
                meta_proc, is_ref = self.queue.get()

                processed_info = ProcessInfo(meta_proc)
                processed_info.deserialize()

                if is_ref:
                    processor.execute(
                        processed_info,
                        crop_size=self.opt.image_size,
                        num_workers=self.opt.num_workers,
                        estimate_boxes_first=self.opt.Preprocess.
                        estimate_boxes_first,
                        factor=self.opt.Preprocess.Cropper.ref_crop_factor,
                        use_simplify=self.opt.Preprocess.use_smplify,
                        temporal=True,
                        filter_invalid=True,
                        inpaintor=False,
                        parser=False,
                        find_front=False,
                        visual=False)
                else:
                    processor.execute(
                        processed_info,
                        crop_size=self.opt.image_size,
                        num_workers=self.opt.num_workers,
                        estimate_boxes_first=self.opt.Preprocess.
                        estimate_boxes_first,
                        factor=self.opt.Preprocess.Cropper.src_crop_factor,
                        use_simplify=self.opt.Preprocess.use_smplify,
                        temporal=False,
                        filter_invalid=True,
                        find_front=True,
                        parser=True,
                        num_candidate=self.opt.Preprocess.FrontInfo.
                        NUM_CANDIDATE,
                        render_size=self.opt.Preprocess.FrontInfo.RENDER_SIZE,
                        inpaintor=True,
                        dilate_kernel_size=self.opt.Preprocess.
                        BackgroundInpaintor.dilate_kernel_size,
                        dilate_iter_num=self.opt.Preprocess.
                        BackgroundInpaintor.dilate_iter_num,
                        bg_replace=self.opt.Preprocess.BackgroundInpaintor.
                        bg_replace,
                        visual=True,
                    )

            except Exception("model error!") as e:
                print(e.message)
Exemple #2
0
    def test_01_deserialize(self):
        meta_input = MetaInputInfo(
            path="/p300/tpami/datasets/iPER/images_HD/004/1/2", name="004/1/2")

        meta_process = MetaProcess(
            meta_input,
            root_primitives_dir="/p300/tpami/datasets/iPER/primitives")
        proc_info = ProcessInfo(meta_process)
        proc_info.deserialize()

        src_infos = proc_info.convert_to_src_info(num_source=8)
Exemple #3
0
def post_update_opt(opt):
    """
    Post update the configurations based on the results of preprocessing.
    Args:
        opt:

    Returns:

    """

    meta_src_proc = opt.meta_data["meta_src"]
    valid_meta_src_proc = []

    cur_num_source = 1
    for meta_proc in meta_src_proc:
        process_info = ProcessInfo(meta_proc)
        process_info.deserialize()

        # check it has been processed successfully
        if process_info.check_has_been_processed(process_info.vid_infos,
                                                 verbose=False):
            valid_meta_src_proc.append(meta_proc)
            num_source = process_info.num_sources()
            cur_num_source = max(cur_num_source, num_source)
        else:
            # otherwise, clean this inputs
            process_info.declare()

    meta_ref_proc = opt.meta_data["meta_ref"]
    valid_meta_ref_proc = []
    for meta_proc in meta_ref_proc:
        if meta_proc.check_has_been_processed(verbose=False):
            valid_meta_ref_proc.append(meta_proc)

    ## 3.1 update the personalization.txt
    checkpoints_dir = opt.meta_data["checkpoints_dir"]
    with open(os.path.join(checkpoints_dir, "personalization.txt"),
              "w") as writer:
        for meta_src in valid_meta_src_proc:
            writer.write(meta_src.primitives_dir + "\n")

    # update the number sources
    print(f"the current number of sources are {cur_num_source}, "
          f"while the pre-defined number of sources are {opt.num_source}. ")
    opt.num_source = min(cur_num_source, opt.num_source)

    # update the source information
    opt.meta_data["meta_src"] = valid_meta_src_proc

    # update the reference information
    opt.meta_data["meta_ref"] = valid_meta_ref_proc

    return opt
Exemple #4
0
def digital_deform(opt) -> None:
    """
        Digitalizing the source images.
    Args:
        opt:

    Returns:
        None
    """

    print("\t\tPre-processing: digital deformation start...")

    que = Queue()
    need_to_process = 0

    meta_src_proc = opt.meta_data["meta_src"]

    for i, meta_proc in enumerate(meta_src_proc):

        processed_info = ProcessInfo(meta_proc)
        processed_info.deserialize()

        if not HumanDigitalDeformConsumer.check_has_been_deformed(
                processed_info):
            que.put(processed_info)
            need_to_process += 1

    if need_to_process > 0:
        MAX_PER_GPU_PROCESS = opt.Preprocess.MAX_PER_GPU_PROCESS
        per_gpu_process = int(np.ceil(need_to_process / len(opt.gpu_ids)))
        candidate_gpu_process = opt.gpu_ids * min(MAX_PER_GPU_PROCESS,
                                                  per_gpu_process)
        num_gpu_process = min(len(candidate_gpu_process), need_to_process)

        consumers = []
        for gpu_process_id in range(num_gpu_process):
            gpu_id = candidate_gpu_process[gpu_process_id]
            consumer = HumanDigitalDeformConsumer(que, gpu_id, opt)
            consumers.append(consumer)

        # all processors start
        for consumer in consumers:
            consumer.start()

        # all processors join
        for consumer in consumers:
            consumer.join()

    print("\t\tPre-processing: digital deformation completed...")
def process_data():
    global args, iPER_train_txt, iPER_val_txt

    # 1. dumps videos to frames

    # 1. prepreocess
    vid_names = get_video_dirs(iPER_train_txt) + get_video_dirs(iPER_val_txt)
    src_paths = prepare_src_path(vid_names)

    args.src_path = "|".join(src_paths)

    # print(args.src_path)

    # set this as empty when preprocessing the training dataset.
    args.ref_path = ""

    cfg = setup(args)

    # 1. human estimation, including 2D pose, tracking, 3D pose, parsing, and front estimation.
    human_estimate(opt=cfg)

    # 2. digital deformation.
    digital_deform(opt=cfg)

    # 3. check
    meta_src_proc = cfg.meta_data["meta_src"]
    invalid_meta_process = []
    print(f"Check all videos have been processed...")
    for meta_proc in tqdm(meta_src_proc):
        process_info = ProcessInfo(meta_proc)
        process_info.deserialize()

        # check it has been processed successfully
        if not process_info.check_has_been_processed(process_info.vid_infos,
                                                     verbose=False):
            invalid_meta_process.append(meta_proc)

    num_invalid = len(invalid_meta_process)
    if num_invalid > 0:
        for meta_proc in invalid_meta_process:
            print(f"invalid meta proc {meta_proc}")

    else:
        print(f"process successfully.")
Exemple #6
0
    def _read_vids_info(self):
        vid_infos_list = []

        for meta_process in self._meta_process_list:
            # print(vid_dir)
            process_info = ProcessInfo(meta_process)
            process_info.deserialize()
            vid_info = process_info.convert_to_src_info(self._opt.num_source)

            length = vid_info["length"]

            if length == 0:
                continue

            vid_info["probs"] = self._sample_probs(vid_info)

            vid_infos_list.append(vid_info)
            self._num_videos += 1
            self._dataset_size += vid_info["length"]

        self._vids_info = vid_infos_list
def process_data():
    # 1. preprocess
    src_paths = prepare_src_path()

    args.src_path = "|".join(src_paths)

    print(args.src_path)

    # set this as empty when preprocessing the training dataset.
    args.ref_path = ""

    cfg = setup(args)

    # 1. human estimation, including 2D pose, tracking, 3D pose, parsing, and front estimation.
    human_estimate(opt=cfg)

    # 2. digital deformation.
    digital_deform(opt=cfg)

    # 3. check
    meta_src_proc = cfg.meta_data["meta_src"]
    invalid_meta_process = []
    for meta_proc in meta_src_proc:
        process_info = ProcessInfo(meta_proc)
        process_info.deserialize()

        # check it has been processed successfully
        if not process_info.check_has_been_processed(process_info.vid_infos,
                                                     verbose=False):
            invalid_meta_process.append(meta_proc)

    num_invalid = len(invalid_meta_process)
    if num_invalid > 0:
        for meta_proc in invalid_meta_process:
            print(f"invalid meta proc {meta_proc}")

    else:
        print(f"process successfully.")
def merge_all_source_processed_info(opt, meta_src_proc):
    # merge all source processed information
    vid_info_list = []
    for i, meta_src in enumerate(meta_src_proc):
        """
        meta_input:
                path: /p300/tpami/neuralAvatar/sources/fange_1/fange_1_ns=2
                bg_path: /p300/tpami/neuralAvatar/sources/fange_1/IMG_7225.JPG
                name: fange_1
        primitives_dir: ../tests/debug/primitives/fange_1
        processed_dir: ../tests/debug/primitives/fange_1/processed
        vid_info_path: ../tests/debug/primitives/fange_1/processed/vid_info.pkl
        """
        src_proc_info = ProcessInfo(meta_src)
        src_proc_info.deserialize()

        src_info = src_proc_info.convert_to_src_info(num_source=src_proc_info.num_sources())
        vid_info_list.append(src_info)

    src_info_for_inference = get_src_info_for_swapper_inference(opt, vid_info_list)

    # return src_info_for_inference
    return src_info_for_inference
def imitate(opt):
    """

    Args:
        opt:

    Returns:
        all_meta_outputs (list of MetaOutput):

    """

    print("Step 3: running imitator.")

    if opt.ip:
        from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer
        visualizer = VisdomVisualizer(env=opt.model_id,
                                      ip=opt.ip,
                                      port=opt.port)
    else:
        visualizer = None

    # set imitator
    imitator = ModelsFactory.get_by_name("imitator", opt)

    meta_src_proc = opt.meta_data["meta_src"]
    meta_ref_proc = opt.meta_data["meta_ref"]

    all_meta_outputs = []
    for i, meta_src in enumerate(meta_src_proc):
        """
        meta_input:
                path: /p300/tpami/neuralAvatar/sources/fange_1/fange_1_ns=2
                bg_path: /p300/tpami/neuralAvatar/sources/fange_1/IMG_7225.JPG
                name: fange_1
        primitives_dir: ../tests/debug/primitives/fange_1
        processed_dir: ../tests/debug/primitives/fange_1/processed
        vid_info_path: ../tests/debug/primitives/fange_1/processed/vid_info.pkl
        """
        src_proc_info = ProcessInfo(meta_src)
        src_proc_info.deserialize()

        src_info = src_proc_info.convert_to_src_info(num_source=opt.num_source)
        src_info_for_inference = get_src_info_for_inference(opt, src_info)

        # source setup
        imitator.source_setup(src_path=src_info_for_inference["paths"],
                              src_smpl=src_info_for_inference["smpls"],
                              masks=src_info_for_inference["masks"],
                              bg_img=src_info_for_inference["bg"],
                              offsets=src_info_for_inference["offsets"],
                              links_ids=src_info_for_inference["links"],
                              visualizer=visualizer)

        for j, meta_ref in enumerate(meta_ref_proc):
            """
            meta_input:
                path: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp4
                bg_path: 
                name: bantangzhuyi_1
                audio: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp3
                fps: 30.02
                pose_fc: 400.0
                cam_fc: 150.0
            primitives_dir: ../tests/debug/primitives/bantangzhuyi_1
            processed_dir: ../tests/debug/primitives/bantangzhuyi_1/processed
            vid_info_path: ../tests/debug/primitives/bantangzhuyi_1/processed/vid_info.pkl
            """
            meta_output = MetaImitateOutput(meta_src, meta_ref)

            ref_proc_info = ProcessInfo(meta_ref)
            ref_proc_info.deserialize()

            ref_info = ref_proc_info.convert_to_ref_info()

            results_dict = call_imitator_inference(
                opt,
                imitator,
                meta_output,
                ref_paths=ref_info["images"],
                ref_smpls=ref_info["smpls"],
                visualizer=visualizer)

            # save to video
            fuse_src_ref_multi_outputs(meta_output.out_mp4,
                                       src_info_for_inference["paths"],
                                       results_dict["ref_imgs_paths"],
                                       results_dict["outputs"],
                                       audio_path=meta_output.audio,
                                       fps=meta_output.fps,
                                       image_size=opt.image_size,
                                       pool_size=opt.num_workers)

            all_meta_outputs.append(meta_output)

    for meta_output in all_meta_outputs:
        print(meta_output)

    print("Step 3: running imitator done.")
    return all_meta_outputs
def swap(opt):
    """

    Args:
        opt:

    Returns:
        all_meta_outputs (list of MetaOutput):

    """

    print("Step 3: running swapper.")

    if opt.ip:
        from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer
        visualizer = VisdomVisualizer(env=opt.model_id, ip=opt.ip, port=opt.port)
    else:
        visualizer = None

    # set imitator
    swapper = ModelsFactory.get_by_name("swapper", opt)

    # merge all sources
    meta_src_proc = opt.meta_data["meta_src"]
    src_info_for_inference = merge_all_source_processed_info(opt, meta_src_proc)

    # update number source
    opt.num_source = sum(src_info_for_inference["num_source"])
    print(f"update the number of sources {src_info_for_inference['num_source']} = {opt.num_source}")

    # source setup
    swapper.swap_source_setup(
        src_path_list=src_info_for_inference["paths"],
        src_smpl_list=src_info_for_inference["smpls"],
        masks_list=src_info_for_inference["masks"],
        bg_img_list=src_info_for_inference["bg"],
        offsets_list=src_info_for_inference["offsets"],
        links_ids_list=src_info_for_inference["links"],
        swap_parts=src_info_for_inference["swap_parts"],
        visualizer=visualizer,
        swap_masks=None
    )

    # call swap
    all_meta_outputs = []

    # check whether it has reference or not
    meta_ref_proc = opt.meta_data["meta_ref"]

    for j, meta_ref in enumerate(meta_ref_proc):
        """
        meta_input:
            path: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp4
            bg_path: 
            name: bantangzhuyi_1
            audio: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp3
            fps: 30.02
            pose_fc: 400.0
            cam_fc: 150.0
        primitives_dir: ../tests/debug/primitives/bantangzhuyi_1
        processed_dir: ../tests/debug/primitives/bantangzhuyi_1/processed
        vid_info_path: ../tests/debug/primitives/bantangzhuyi_1/processed/vid_info.pkl
        """
        meta_output = MetaSwapImitateOutput(meta_src_proc, meta_ref)

        ref_proc_info = ProcessInfo(meta_ref)
        ref_proc_info.deserialize()

        ref_info = ref_proc_info.convert_to_ref_info()

        results_dict = call_imitator_inference(
            opt, swapper, meta_output,
            ref_paths=ref_info["images"],
            ref_smpls=ref_info["smpls"],
            visualizer=visualizer
        )

        # save to video
        fuse_src_ref_multi_outputs(
            meta_output.out_mp4, src_info_for_inference["src_paths"],
            results_dict["ref_imgs_paths"], results_dict["outputs"],
            audio_path=meta_output.audio, fps=meta_output.fps,
            image_size=opt.image_size, pool_size=opt.num_workers
        )

        all_meta_outputs.append(meta_output)

    for meta_output in all_meta_outputs:
        print(meta_output)

    print("Step 3: running swapper done.")
    return all_meta_outputs
Exemple #11
0
def novel_view(opt):
    """

    Args:
        opt:

    Returns:
        all_meta_outputs (list of MetaOutput):

    """

    print("Step 3: running novel viewer.")

    if opt.ip:
        from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer
        visualizer = VisdomVisualizer(env=opt.model_id,
                                      ip=opt.ip,
                                      port=opt.port)
    else:
        visualizer = None

    # set imitator
    viewer = ModelsFactory.get_by_name("viewer", opt)

    meta_src_proc = opt.meta_data["meta_src"]

    all_meta_outputs = []
    for i, meta_src in enumerate(meta_src_proc):
        """
        meta_input:
                path: /p300/tpami/neuralAvatar/sources/fange_1/fange_1_ns=2
                bg_path: /p300/tpami/neuralAvatar/sources/fange_1/IMG_7225.JPG
                name: fange_1
        primitives_dir: ../tests/debug/primitives/fange_1
        processed_dir: ../tests/debug/primitives/fange_1/processed
        vid_info_path: ../tests/debug/primitives/fange_1/processed/vid_info.pkl
        """
        src_proc_info = ProcessInfo(meta_src)
        src_proc_info.deserialize()

        src_info = src_proc_info.convert_to_src_info(num_source=opt.num_source)
        src_info_for_inference = get_src_info_for_inference(opt, src_info)

        # source setup
        viewer.source_setup(src_path=src_info_for_inference["paths"],
                            src_smpl=src_info_for_inference["smpls"],
                            masks=src_info_for_inference["masks"],
                            bg_img=src_info_for_inference["bg"],
                            offsets=src_info_for_inference["offsets"],
                            links_ids=src_info_for_inference["links"],
                            visualizer=visualizer)

        novel_smpls = create_T_pose_novel_view_smpl(length=180)
        novel_smpls[:, -10:] = src_info_for_inference["smpls"][0, -10:]

        if not opt.T_pose:
            novel_smpls[:, 6:-10] = src_info_for_inference["smpls"][0, 6:-10]

        novel_smpls = add_hands_params_to_smpl(novel_smpls,
                                               viewer.body_rec.np_hands_mean)
        meta_output = MetaNovelViewOutput(meta_src)

        out_imgs_dir = clear_dir(meta_output.out_img_dir)
        outputs = viewer.inference(tgt_smpls=novel_smpls,
                                   cam_strategy="smooth",
                                   output_dir=out_imgs_dir,
                                   visualizer=visualizer,
                                   verbose=True)

        fuse_source_output(meta_output.out_mp4,
                           src_info_for_inference["paths"],
                           outputs,
                           audio_path=None,
                           fps=25,
                           image_size=opt.image_size,
                           pool_size=opt.num_workers)

        all_meta_outputs.append(meta_output)

    for meta_output in all_meta_outputs:
        print(meta_output)

    print("Step 3: running novel viewer done.")
    return all_meta_outputs
Exemple #12
0
def imitate(opt):
    """

    Args:
        opt:

    Returns:
        all_meta_outputs (list of MetaOutput):

    """

    print("Step 3: running imitator.")

    if opt.ip:
        from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer
        visualizer = VisdomVisualizer(env=opt.model_id,
                                      ip=opt.ip,
                                      port=opt.port)
    else:
        visualizer = None

    # set imitator
    imitator = ModelsFactory.get_by_name("imitator", opt)

    meta_src_proc = opt.meta_data["meta_src"]
    meta_ref_proc = opt.meta_data["meta_ref"]

    all_meta_outputs = []
    for i, meta_src in enumerate(meta_src_proc):
        """
        meta_input:
                path: /p300/tpami/neuralAvatar/sources/fange_1/fange_1_ns=2
                bg_path: /p300/tpami/neuralAvatar/sources/fange_1/IMG_7225.JPG
                name: fange_1
        primitives_dir: ../tests/debug/primitives/fange_1
        processed_dir: ../tests/debug/primitives/fange_1/processed
        vid_info_path: ../tests/debug/primitives/fange_1/processed/vid_info.pkl
        """
        src_proc_info = ProcessInfo(meta_src)
        src_proc_info.deserialize()

        src_info = src_proc_info.convert_to_src_info(num_source=opt.num_source)
        src_info_for_inference = get_src_info_for_inference(opt, src_info)

        # 1. personalization
        imitator.source_setup(src_path=src_info_for_inference["paths"],
                              src_smpl=src_info_for_inference["smpls"],
                              masks=src_info_for_inference["masks"],
                              bg_img=src_info_for_inference["bg"],
                              offsets=src_info_for_inference["offsets"],
                              links_ids=src_info_for_inference["links"],
                              visualizer=visualizer)

        for j, meta_ref in enumerate(meta_ref_proc):
            """
            meta_input:
                path: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp4
                bg_path: 
                name: bantangzhuyi_1
                audio: /p300/tpami/neuralAvatar/references/videos/bantangzhuyi_1.mp3
                fps: 30.02
                pose_fc: 400.0
                cam_fc: 150.0
            primitives_dir: ../tests/debug/primitives/bantangzhuyi_1
            processed_dir: ../tests/debug/primitives/bantangzhuyi_1/processed
            vid_info_path: ../tests/debug/primitives/bantangzhuyi_1/processed/vid_info.pkl
            """

            ref_proc_info = ProcessInfo(meta_ref)
            ref_proc_info.deserialize()

            ref_info = ref_proc_info.convert_to_ref_info()
            ref_imgs_paths = ref_info["images"]
            ref_smpls = ref_info["smpls"]
            ref_smpls = add_hands_params_to_smpl(
                ref_smpls, imitator.body_rec.np_hands_mean)

            meta_output = MetaOutput(meta_src, meta_ref)

            # if there are more than 10 frames, then we will use temporal smooth of smpl.
            if len(ref_smpls) > 10:
                ref_smpls = temporal_smooth_smpls(ref_smpls,
                                                  pose_fc=meta_output.pose_fc,
                                                  cam_fc=meta_output.cam_fc)

            out_imgs_dir = clear_dir(meta_output.imitation_dir)

            outputs = imitator.inference(tgt_paths=ref_imgs_paths,
                                         tgt_smpls=ref_smpls,
                                         cam_strategy=opt.cam_strategy,
                                         output_dir=out_imgs_dir,
                                         visualizer=visualizer,
                                         verbose=True)

            fuse_source_reference_output(
                meta_output.imitation_mp4,
                src_info_for_inference["paths"],
                ref_imgs_paths,
                outputs,
                # sorted(glob.glob(os.path.join(meta_output.imitation_dir, "pred_*"))),
                audio_path=meta_output.audio,
                fps=meta_output.fps,
                image_size=opt.image_size,
                pool_size=opt.num_workers)

            all_meta_outputs.append(meta_output)

    for meta_output in all_meta_outputs:
        print(meta_output)

    print("Step 3: running imitator done.")
    return all_meta_outputs