Exemple #1
0
def wrap_3d_photo_inpainting(config_path, *,
                             depth_handler: Optional[FileChoose] = None,
                             bar_total: Optional[ComplexProgressBar] = None,
                             bar_current: Optional[ComplexProgressBar] = None,
                             just_depth: bool = False
                             ):
    bar_current.reset()
    config = yaml.load(open(config_path, 'r'))
    if config['offscreen_rendering'] is True:
        vispy.use(app='egl')
    init_fs(config)
    sample_list = get_MiDaS_samples(config['src_folder'], config['depth_folder'], config, config['specific'])
    normal_canvas, all_canvas = None, None

    if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
        device = config["gpu_ids"]
    else:
        device = "cpu"

    bar_current.add(bar_current.max)
    bar_total.add(bar_total.max * 2 / 100)

    print(f(_("running on device {device}")))

    for idx in tqdm(range(len(sample_list))):
        bar_current.reset()
        depth = None
        sample = sample_list[idx]
        print(f(_("Current Source ==> {sample['src_pair_name']}")))
        mesh_fi = os.path.join(config['mesh_folder'], sample['src_pair_name'] +'.ply')
        image = imageio.imread(sample['ref_img_fi'])

        print(f(_("Running depth extraction at {datetime.now():%Y-%m-%d %H:%M:%S.%f}")))
        if just_depth or config['require_midas'] is True:
            run_depth([sample['ref_img_fi']], config['src_folder'], config['depth_folder'],
                      config['MiDaS_model_ckpt'], MonoDepthNet, MiDaS_utils, target_w=640)

            update_image_handler(image_handler=depth_handler,
                                 path=Path(f"{config['depth_folder']}/{sample['src_pair_name']}.png"))

        if just_depth:
            bar_total.reset()
            bar_total.value = bar_total.max
            bar_current.reset()
            bar_current.value = bar_current.max
            return

        bar_current.add(bar_current.max)
        bar_total.add(bar_total.max * (2 / len(sample_list)) / 100)

        bar_current.reset()

        image = prepare_config_and_image(config=config, sample=sample, image=image)

        bar_current.add(bar_current.max)
        bar_total.add(bar_total.max * (2 / len(sample_list)) / 100)

        bar_current.reset()

        image = cv2.resize(image, (config['output_w'], config['output_h']), interpolation=cv2.INTER_AREA)
        depth = read_MiDaS_depth(sample['depth_fi'], 3.0, config['output_h'], config['output_w'])
        mean_loc_depth = depth[depth.shape[0]//2, depth.shape[1]//2]

        bar_current.add(bar_current.max)
        bar_total.add(bar_total.max * (2 / len(sample_list)) / 100)

        bar_current.reset()

        if not(config['load_ply'] is True and os.path.exists(mesh_fi)):
            vis_photos, vis_depths = sparse_bilateral_filtering(depth.copy(), image.copy(), config, num_iter=config['sparse_iter'], spdb=False)
            depth = vis_depths[-1]
            model = None
            torch.cuda.empty_cache()
            print(_("Start Running 3D_Photo ..."))

            depth_edge_model = load_edge_model(device=device, depth_edge_model_ckpt=config['depth_edge_model_ckpt'])
            depth_edge_model.eval()

            depth_feat_model = load_depth_model(device=device, depth_feat_model_ckpt=config['depth_feat_model_ckpt'])

            rgb_model = load_rgb_model(device=device, rgb_feat_model_ckpt=config['rgb_feat_model_ckpt'])
            graph = None

            def up_bars(dt=None):
                bar_current.add(bar_current.max * 1.5 / 100)
                bar_total.add(bar_total.max * (1 / len(sample_list)) / 100)

            # increase the bars every 5 sec, up to 5 min
            event = schedule_interval(up_bars, 5, 60 * 5)

            print(f(_("Writing depth ply (and basically doing everything) at {datetime.now():%Y-%m-%d %H:%M:%S.%f}")))
            rt_info = write_ply(image,
                                  depth,
                                  sample['int_mtx'],
                                  mesh_fi,
                                  config,
                                  rgb_model,
                                  depth_edge_model,
                                  depth_edge_model,
                                  depth_feat_model)

            if rt_info is False:
                continue
            rgb_model = None
            color_feat_model = None
            depth_edge_model = None
            depth_feat_model = None
            torch.cuda.empty_cache()

            event.cancel()

        bar_current.add(bar_current.max)
        bar_total.value_normalized = 75 / 100

        bar_current.reset()

        props = read_ply(mesh_fi) if config['save_ply'] is True or config['load_ply'] is True else rt_info
        make_video(
            sample=sample, config=config, props=props,
            depth=depth, normal_canvas=normal_canvas, all_canvas=all_canvas,
        )

        bar_current.value_normalized = 1
        bar_total.value_normalized = 1
Exemple #2
0
from MiDaS.monodepth_net import MonoDepthNet
import MiDaS.MiDaS_utils as MiDaS_utils

parser = argparse.ArgumentParser()
parser.add_argument('--config',
                    type=str,
                    default='argument.yml',
                    help='Configure of post processing')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'))
if config['offscreen_rendering'] is True:
    vispy.use(app='egl')
os.makedirs(config['mesh_folder'], exist_ok=True)
os.makedirs(config['video_folder'], exist_ok=True)
os.makedirs(config['depth_folder'], exist_ok=True)
sample_list = get_MiDaS_samples(config['src_folder'], config['depth_folder'],
                                config, config['specific'])
normal_canvas, all_canvas = None, None

if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
    device = config["gpu_ids"]
else:
    device = "cpu"

for idx in tqdm(range(len(sample_list))):
    depth = None
    sample = sample_list[idx]
    print("Current Source ==> ", sample['src_pair_name'])
    mesh_fi = os.path.join(config['mesh_folder'],
                           sample['src_pair_name'] + '.ply')
    image = imageio.imread(sample['ref_img_fi'])
    run_depth([sample['ref_img_fi']],
Exemple #3
0
from MiDaS import MiDaS_utils
from bilateral_filtering import sparse_bilateral_filtering

parser = argparse.ArgumentParser()
parser.add_argument(
    "--config", type=str, default="argument.yml", help="Configure of post processing"
)
args = parser.parse_args()
config = yaml.load(open(args.config, "r"))
if config["offscreen_rendering"] is True:
    vispy.use(app="egl")
os.makedirs(config["mesh_folder"], exist_ok=True)
os.makedirs(config["video_folder"], exist_ok=True)
os.makedirs(config["depth_folder"], exist_ok=True)
sample_list = get_MiDaS_samples(
    config["src_folder"], config["depth_folder"], config, config["specific"]
)
normal_canvas, all_canvas = None, None

if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
    device = config["gpu_ids"]
else:
    device = "cpu"

print(f"running on device {device}")

for idx in tqdm(range(len(sample_list))):
    depth = None
    sample = sample_list[idx]
    print("Current Source ==> ", sample["src_pair_name"])
    mesh_fi = os.path.join(config["mesh_folder"], sample["src_pair_name"] + ".ply")
def make_video(params_file):
    print("Making Video...")
    with open(params_file) as f:
        params = json.load(f)

    os.makedirs(params['mesh_folder'], exist_ok=True)
    os.makedirs(params['video_folder'], exist_ok=True)
    os.makedirs(params['depth_folder'], exist_ok=True)
    sample_list = get_MiDaS_samples(params, params['specific'])
    normal_canvas, all_canvas = None, None

    device = "cuda"

    depth = None
    try:
        sample = sample_list[0]
    except:
        sample = sample_list
    # print("Current Source ==> ", sample['src_pair_name'])
    mesh_fi = os.path.join(params['mesh_folder'],
                           sample['src_pair_name'] + '.ply')
    image = imageio.imread(sample['ref_img_fi'])

    # print(f"Running depth extraction at {time.time()}")
    if params['require_midas'] is True:
        run_depth([sample['ref_img_fi']],
                  params['src_dir'],
                  params['depth_folder'],
                  params['MiDaS_model_ckpt'],
                  MonoDepthNet,
                  MiDaS_utils,
                  target_w=640)
    if 'npy' in params['depth_format']:
        params['output_h'], params['output_w'] = np.load(
            sample['depth_fi']).shape[:2]
    else:
        params['output_h'], params['output_w'] = imageio.imread(
            sample['depth_fi']).shape[:2]
    frac = params['longer_side_len'] / max(params['output_h'],
                                           params['output_w'])
    params['output_h'], params['output_w'] = int(
        params['output_h'] * frac), int(params['output_w'] * frac)
    params['original_h'], params['original_w'] = params['output_h'], params[
        'output_w']
    if image.ndim == 2:
        image = image[..., None].repeat(3, -1)
    if np.sum(np.abs(image[..., 0] - image[..., 1])) == 0 and np.sum(
            np.abs(image[..., 1] - image[..., 2])) == 0:
        params['gray_image'] = True
    else:
        params['gray_image'] = False
    image = cv2.resize(image, (params['output_w'], params['output_h']),
                       interpolation=cv2.INTER_AREA)
    depth = read_MiDaS_depth(sample['depth_fi'], 3.0, params['output_h'],
                             params['output_w'])
    mean_loc_depth = depth[depth.shape[0] // 2, depth.shape[1] // 2]
    if not (params['load_ply'] is True and os.path.exists(mesh_fi)):
        vis_photos, vis_depths = sparse_bilateral_filtering(
            depth.copy(),
            image.copy(),
            params,
            num_iter=params['sparse_iter'],
            spdb=False)
        depth = vis_depths[-1]
        model = None
        torch.cuda.empty_cache()

        depth_edge_model = Inpaint_Edge_Net(init_weights=True)
        depth_edge_weight = torch.load(params['depth_edge_model_ckpt'],
                                       map_location=torch.device(device))
        depth_edge_model.load_state_dict(depth_edge_weight)
        depth_edge_model = depth_edge_model.to(device)
        depth_edge_model.eval()

        depth_feat_model = Inpaint_Depth_Net()
        depth_feat_weight = torch.load(params['depth_feat_model_ckpt'],
                                       map_location=torch.device(device))
        depth_feat_model.load_state_dict(depth_feat_weight, strict=True)
        depth_feat_model = depth_feat_model.to(device)
        depth_feat_model.eval()
        depth_feat_model = depth_feat_model.to(device)

        rgb_model = Inpaint_Color_Net()
        rgb_feat_weight = torch.load(params['rgb_feat_model_ckpt'],
                                     map_location=torch.device(device))
        rgb_model.load_state_dict(rgb_feat_weight)
        rgb_model.eval()
        rgb_model = rgb_model.to(device)
        graph = None

        rt_info = write_ply("", image, depth, sample['int_mtx'], mesh_fi,
                            params, rgb_model, depth_edge_model,
                            depth_edge_model, depth_feat_model)

        if rt_info is False:
            return
        rgb_model = None
        color_feat_model = None
        depth_edge_model = None
        depth_feat_model = None
        torch.cuda.empty_cache()
    if params['save_ply'] is True or params['load_ply'] is True:
        verts, colors, faces, Height, Width, hFov, vFov = read_ply(mesh_fi)
    else:
        verts, colors, faces, Height, Width, hFov, vFov = rt_info

    videos_poses, video_basename = copy.deepcopy(
        sample['tgts_poses']), sample['tgt_name']
    top = (params.get('original_h') // 2 -
           sample['int_mtx'][1, 2] * params['output_h'])
    left = (params.get('original_w') // 2 -
            sample['int_mtx'][0, 2] * params['output_w'])
    down, right = top + params['output_h'], left + params['output_w']
    border = [int(xx) for xx in [top, down, left, right]]
    normal_canvas, all_canvas = output_3d_photo(
        verts.copy(),
        colors.copy(),
        faces.copy(),
        copy.deepcopy(Height),
        copy.deepcopy(Width),
        copy.deepcopy(hFov),
        copy.deepcopy(vFov),
        copy.deepcopy(sample['tgt_pose']),
        sample['video_postfix'],
        copy.deepcopy(sample['ref_pose']),
        copy.deepcopy(params['video_folder']),
        image.copy(),
        copy.deepcopy(sample['int_mtx']),
        params,
        image,
        videos_poses,
        video_basename,
        params.get('original_h'),
        params.get('original_w'),
        border=border,
        depth=depth,
        normal_canvas=normal_canvas,
        all_canvas=all_canvas,
        mean_loc_depth=mean_loc_depth)
    print("Done!")
Exemple #5
0
def Main(config_dict):
    config = config_dict
    print(config)
    if config['offscreen_rendering'] is True:
        vispy.use(app='egl')
    os.makedirs(config['mesh_folder'], exist_ok=True)
    os.makedirs(config['video_folder'], exist_ok=True)
    os.makedirs(config['depth_folder'], exist_ok=True)
    sample_list = get_MiDaS_samples(config['src_folder'], config['depth_folder'], config, config['specific'])
    normal_canvas, all_canvas = None, None

    if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
        device = config["gpu_ids"]
    else:
        device = "cpu"

    print(f"running on device {device}")

    for idx in tqdm(range(len(sample_list))):
        depth = None
        sample = sample_list[idx]
        print("Current Source ==> ", sample['src_pair_name'])
        mesh_fi = os.path.join(config['mesh_folder'], sample['src_pair_name'] +'.ply')
        image = imageio.imread(sample['ref_img_fi'])

        print(f"Running depth extraction at {time.time()}")
        if config['require_midas'] is True:
            run_depth([sample['ref_img_fi']], config['src_folder'], config['depth_folder'],
                    config['MiDaS_model_ckpt'], MonoDepthNet, MiDaS_utils, target_w=640)
        if 'npy' in config['depth_format']:
            config['output_h'], config['output_w'] = np.load(sample['depth_fi']).shape[:2]
        else:
            config['output_h'], config['output_w'] = imageio.imread(sample['depth_fi']).shape[:2]
        frac = config['longer_side_len'] / max(config['output_h'], config['output_w'])
        config['output_h'], config['output_w'] = int(config['output_h'] * frac), int(config['output_w'] * frac)
        config['original_h'], config['original_w'] = config['output_h'], config['output_w']
        if image.ndim == 2:
            image = image[..., None].repeat(3, -1)
        if np.sum(np.abs(image[..., 0] - image[..., 1])) == 0 and np.sum(np.abs(image[..., 1] - image[..., 2])) == 0:
            config['gray_image'] = True
        else:
            config['gray_image'] = False
        image = cv2.resize(image, (config['output_w'], config['output_h']), interpolation=cv2.INTER_AREA)
        depth = read_MiDaS_depth(sample['depth_fi'], 3.0, config['output_h'], config['output_w'])
        mean_loc_depth = depth[depth.shape[0]//2, depth.shape[1]//2]
        if not(config['load_ply'] is True and os.path.exists(mesh_fi)):
            vis_photos, vis_depths = sparse_bilateral_filtering(depth.copy(), image.copy(), config, num_iter=config['sparse_iter'], spdb=False)
            depth = vis_depths[-1]
            model = None
            torch.cuda.empty_cache()
            print("Start Running 3D_Photo ...")
            print(f"Loading edge model at {time.time()}")
            depth_edge_model = Inpaint_Edge_Net(init_weights=True)
            depth_edge_weight = torch.load(config['depth_edge_model_ckpt'],
                                        map_location=torch.device(device))
            depth_edge_model.load_state_dict(depth_edge_weight)
            depth_edge_model = depth_edge_model.to(device)
            depth_edge_model.eval()

            print(f"Loading depth model at {time.time()}")
            depth_feat_model = Inpaint_Depth_Net()
            depth_feat_weight = torch.load(config['depth_feat_model_ckpt'],
                                        map_location=torch.device(device))
            depth_feat_model.load_state_dict(depth_feat_weight, strict=True)
            depth_feat_model = depth_feat_model.to(device)
            depth_feat_model.eval()
            depth_feat_model = depth_feat_model.to(device)
            print(f"Loading rgb model at {time.time()}")
            rgb_model = Inpaint_Color_Net()
            rgb_feat_weight = torch.load(config['rgb_feat_model_ckpt'],
                                        map_location=torch.device(device))
            rgb_model.load_state_dict(rgb_feat_weight)
            rgb_model.eval()
            rgb_model = rgb_model.to(device)
            graph = None


            print(f"Writing depth ply (and basically doing everything) at {time.time()}")
            rt_info = write_ply(image,
                                depth,
                                sample['int_mtx'],
                                mesh_fi,
                                config,
                                rgb_model,
                                depth_edge_model,
                                depth_edge_model,
                                depth_feat_model)

            if rt_info is False:
                continue
            rgb_model = None
            color_feat_model = None
            depth_edge_model = None
            depth_feat_model = None
            torch.cuda.empty_cache()
        if config['save_ply'] is True or config['load_ply'] is True:
            verts, colors, faces, Height, Width, hFov, vFov = read_ply(mesh_fi)
        else:
            verts, colors, faces, Height, Width, hFov, vFov = rt_info


        print(f"Making video at {time.time()}")
        videos_poses, video_basename = copy.deepcopy(sample['tgts_poses']), sample['tgt_name']
        top = (config.get('original_h') // 2 - sample['int_mtx'][1, 2] * config['output_h'])
        left = (config.get('original_w') // 2 - sample['int_mtx'][0, 2] * config['output_w'])
        down, right = top + config['output_h'], left + config['output_w']
        border = [int(xx) for xx in [top, down, left, right]]
        normal_canvas, all_canvas = output_3d_photo(verts.copy(), colors.copy(), faces.copy(), copy.deepcopy(Height), copy.deepcopy(Width), copy.deepcopy(hFov), copy.deepcopy(vFov),
                            copy.deepcopy(sample['tgt_pose']), sample['video_postfix'], copy.deepcopy(sample['ref_pose']), copy.deepcopy(config['video_folder']),
                            image.copy(), copy.deepcopy(sample['int_mtx']), config, image,
                            videos_poses, video_basename, config.get('original_h'), config.get('original_w'), border=border, depth=depth, normal_canvas=normal_canvas, all_canvas=all_canvas,
                            mean_loc_depth=mean_loc_depth)
    
    move_to_dir("video", "static")
    return 0
parser = argparse.ArgumentParser()
parser.add_argument('--config',
                    type=str,
                    default='argument.yml',
                    help='Configure of post processing')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'))
if config['offscreen_rendering'] is True:
    vispy.use(app='egl')

# create some directories
os.makedirs(config['mesh_folder'], exist_ok=True)
os.makedirs(config['video_folder'], exist_ok=True)
os.makedirs(config['depth_folder'], exist_ok=True)
sample_list = get_MiDaS_samples(config['src_folder'], config['depth_folder'],
                                config,
                                config['specific'])  # dict of important stuffs
normal_canvas, all_canvas = None, None

# find device
if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
    device = config["gpu_ids"]
else:
    device = "cpu"

print(f"running on device {device}")

# iterate over each image.
for idx in tqdm(range(len(sample_list))):
    depth = None
    sample = sample_list[idx]  # select image
Exemple #7
0
def render_mp4(input_dir, output_dir, filer_type):

    print("invoked render start")
    parser = argparse.ArgumentParser()
    parser.add_argument("--config",
                        type=str,
                        default="argument.yml",
                        help="Configure of post processing")
    args = parser.parse_args()
    config = yaml.load(open(args.config, "r"))
    if config["offscreen_rendering"] is True:
        vispy.use(app="egl")
    os.makedirs(config["mesh_folder"], exist_ok=True)
    os.makedirs(config["video_folder"], exist_ok=True)
    os.makedirs(config["depth_folder"], exist_ok=True)

    sample_list = get_MiDaS_samples(input_dir, config["depth_folder"], config,
                                    filer_type, config["specific"])
    normal_canvas, all_canvas = None, None

    if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
        device = config["gpu_ids"]
    else:
        device = "cpu"

    print(f"running on device {device}")

    for idx in tqdm(range(len(sample_list))):
        depth = None
        sample = sample_list[idx]
        print("Current Source ==> ", sample["src_pair_name"])
        mesh_fi = os.path.join(config["mesh_folder"],
                               sample["src_pair_name"] + ".ply")
        image = imageio.imread(sample["ref_img_fi"])

        print(f"Running depth extraction at {time.time()}")
        if config["require_midas"] is True:
            run_depth(
                [sample["ref_img_fi"]],
                config["src_folder"],
                config["depth_folder"],
                config["MiDaS_model_ckpt"],
                MonoDepthNet,
                MiDaS_utils,
                target_w=640,
            )
        if "npy" in config["depth_format"]:
            config["output_h"], config["output_w"] = np.load(
                sample["depth_fi"]).shape[:2]
        else:
            config["output_h"], config["output_w"] = imageio.imread(
                sample["depth_fi"]).shape[:2]
        frac = config["longer_side_len"] / max(config["output_h"],
                                               config["output_w"])
        config["output_h"], config["output_w"] = int(
            config["output_h"] * frac), int(config["output_w"] * frac)
        config["original_h"], config["original_w"] = config[
            "output_h"], config["output_w"]
        if image.ndim == 2:
            image = image[..., None].repeat(3, -1)
        if np.sum(np.abs(image[..., 0] - image[..., 1])) == 0 and np.sum(
                np.abs(image[..., 1] - image[..., 2])) == 0:
            config["gray_image"] = True
        else:
            config["gray_image"] = False
        image = cv2.resize(image, (config["output_w"], config["output_h"]),
                           interpolation=cv2.INTER_AREA)
        depth = read_MiDaS_depth(sample["depth_fi"], 3.0, config["output_h"],
                                 config["output_w"])
        mean_loc_depth = depth[depth.shape[0] // 2, depth.shape[1] // 2]
        if not (config["load_ply"] is True and os.path.exists(mesh_fi)):
            vis_photos, vis_depths = sparse_bilateral_filtering(
                depth.copy(),
                image.copy(),
                config,
                num_iter=config["sparse_iter"],
                spdb=False)
            depth = vis_depths[-1]
            model = None
            torch.cuda.empty_cache()
            print("Start Running 3D_Photo ...")
            print(f"Loading edge model at {time.time()}")
            depth_edge_model = Inpaint_Edge_Net(init_weights=True)
            depth_edge_weight = torch.load(config["depth_edge_model_ckpt"],
                                           map_location=torch.device(device))
            depth_edge_model.load_state_dict(depth_edge_weight)
            depth_edge_model = depth_edge_model.to(device)
            depth_edge_model.eval()

            print(f"Loading depth model at {time.time()}")
            depth_feat_model = Inpaint_Depth_Net()
            depth_feat_weight = torch.load(config["depth_feat_model_ckpt"],
                                           map_location=torch.device(device))
            depth_feat_model.load_state_dict(depth_feat_weight, strict=True)
            depth_feat_model = depth_feat_model.to(device)
            depth_feat_model.eval()
            depth_feat_model = depth_feat_model.to(device)
            print(f"Loading rgb model at {time.time()}")
            rgb_model = Inpaint_Color_Net()
            rgb_feat_weight = torch.load(config["rgb_feat_model_ckpt"],
                                         map_location=torch.device(device))
            rgb_model.load_state_dict(rgb_feat_weight)
            rgb_model.eval()
            rgb_model = rgb_model.to(device)
            graph = None

            print(
                f"Writing depth ply (and basically doing everything) at {time.time()}"
            )
            rt_info = write_ply(
                image,
                depth,
                sample["int_mtx"],
                mesh_fi,
                config,
                rgb_model,
                depth_edge_model,
                depth_edge_model,
                depth_feat_model,
            )

            if rt_info is False:
                continue
            rgb_model = None
            color_feat_model = None
            depth_edge_model = None
            depth_feat_model = None
            torch.cuda.empty_cache()
        if config["save_ply"] is True or config["load_ply"] is True:
            verts, colors, faces, Height, Width, hFov, vFov = read_ply(mesh_fi)
        else:
            verts, colors, faces, Height, Width, hFov, vFov = rt_info

        print(f"Making video at {time.time()}")
        videos_poses, video_basename = copy.deepcopy(
            sample["tgts_poses"]), sample["tgt_name"]
        top = config.get("original_h") // 2 - sample["int_mtx"][
            1, 2] * config["output_h"]
        left = config.get("original_w") // 2 - sample["int_mtx"][
            0, 2] * config["output_w"]
        down, right = top + config["output_h"], left + config["output_w"]
        border = [int(xx) for xx in [top, down, left, right]]
        normal_canvas, all_canvas = output_3d_photo(
            verts.copy(),
            colors.copy(),
            faces.copy(),
            copy.deepcopy(Height),
            copy.deepcopy(Width),
            copy.deepcopy(hFov),
            copy.deepcopy(vFov),
            copy.deepcopy(sample["tgt_pose"]),
            sample["video_postfix"],
            copy.deepcopy(sample["ref_pose"]),
            copy.deepcopy(output_dir),
            image.copy(),
            copy.deepcopy(sample["int_mtx"]),
            config,
            image,
            videos_poses,
            video_basename,
            config.get("original_h"),
            config.get("original_w"),
            border=border,
            depth=depth,
            normal_canvas=normal_canvas,
            all_canvas=all_canvas,
            mean_loc_depth=mean_loc_depth,
        )
    print("invoked render end")