Пример #1
0
def meta_imitate(opt, imitator, prior_tgt_path, save_imgs=True, visualizer=None):
    src_path = opt.src_path

    all_tgt_paths = scan_tgt_paths(prior_tgt_path, itv=40)
    output_dir = opt.output_dir

    out_img_dir, out_pair_dir = mkdirs([os.path.join(output_dir, 'imgs'), os.path.join(output_dir, 'pairs')])

    img_pair_list = []

    for t in tqdm(range(len(all_tgt_paths))):
        tgt_path = all_tgt_paths[t]
        preds = imitator.inference([tgt_path], visualizer=visualizer, cam_strategy=opt.cam_strategy, verbose=False)

        tgt_name = os.path.split(tgt_path)[-1]
        out_path = os.path.join(out_img_dir, 'pred_' + tgt_name)

        if save_imgs:
            cv_utils.save_cv2_img(preds[0], out_path, normalize=True)
            write_pair_info(imitator.src_info, imitator.tsf_info,
                            os.path.join(out_pair_dir, '{:0>8}.pkl'.format(t)), imitator=imitator,
                            only_vis=opt.only_vis)

            img_pair_list.append((src_path, tgt_path))

    if save_imgs:
        write_pickle_file(os.path.join(output_dir, 'pairs_meta.pkl'), img_pair_list)
Пример #2
0
def write_pair_info(src_info, tsf_info, out_file, imitator, only_vis):
    """
    Args:
        src_info:
        tsf_info:
        out_file:
        imitator:
    Returns:

    """
    pair_data = dict()

    pair_data['from_face_index_map'] = src_info['fim'][0][:, :, None].cpu().numpy()
    pair_data['to_face_index_map'] = tsf_info['fim'][0][:, :, None].cpu().numpy()
    pair_data['T'] = tsf_info['T'][0].cpu().numpy()
    pair_data['warp'] = tsf_info['tsf_img'][0].cpu().numpy()
    pair_data['smpls'] = torch.cat([src_info['theta'], tsf_info['theta']], dim=0).cpu().numpy()
    pair_data['j2d'] = torch.cat([src_info['j2d'], tsf_info['j2d']], dim=0).cpu().numpy()

    tsf_f2verts, tsf_fim, tsf_wim = imitator.render.render_fim_wim(tsf_info['cam'], tsf_info['verts'])
    tsf_p2verts = tsf_f2verts[:, :, :, 0:2]
    tsf_p2verts[:, :, :, 1] *= -1

    T_cycle = imitator.render.cal_bc_transform(tsf_p2verts, src_info['fim'], src_info['wim'])
    pair_data['T_cycle'] = T_cycle[0].cpu().numpy()

    # back_face_ids = mesh.get_part_face_ids(part_type='head_back')
    # tsf_p2verts[:, back_face_ids] = -2
    # T_cycle_vis = imitator.render.cal_bc_transform(tsf_p2verts, src_info['fim'], src_info['wim'])
    # pair_data['T_cycle_vis'] = T_cycle_vis[0].cpu().numpy()

    # for key, val in pair_data.items():
    #     print(key, val.shape)

    write_pickle_file(out_file, pair_data)
Пример #3
0
				if len(hull.vertices) == 3:
					pt_normal[idx] = f_n * (-1)
	
	return pt_position, pt_normal

if __name__ == '__main__':
	data_root = 'data/Future-3D-Reconstruction'
	model_dir = os.path.join(data_root, 'train', 'model')
	verts_normals_dir = os.path.join(data_root, 'train', 'verts_normals')
	if not os.path.exists(verts_normals_dir):
		os.mkdir(verts_normals_dir)

	model_IDs = os.listdir(model_dir)
	model_IDs = [ID for ID in model_IDs if ID[-4:] == '.obj']
	print('There are {} models'.format(len(model_IDs)))

	widgets = ['Progress: ', Percentage(), ' ', Bar('#'), ' ', Timer(), ' ', ETA()]
	total = len(model_IDs)
	pbar = ProgressBar(widgets=widgets, maxval=10*total).start()
	for i, model_ID in enumerate(model_IDs):
		model_path = os.path.join(model_dir, model_ID)
		verts_normals_path = os.path.join(verts_normals_dir, model_ID.replace('.obj', '.pth'))
		face_pts, face_axis = readFaceInfo(model_path)
		#print(face_pts.shape, face_axis.shape)
		verts = get_obj_vertex_ali(model_path)
		#print(verts.shape)
		verts, normals = generate_normal(verts, face_pts, face_axis)
		#print(verts.shape, normals.shape)
		write_pickle_file(verts_normals_path, {'verts': verts, 'normals': normals})
		pbar.update(10 * i + 1)
	pbar.finish()
Пример #4
0
if __name__ == '__main__':
    data_root = 'data/Future-3D-Reconstruction'
    camera_dir = os.path.join(data_root, 'train', 'cameras')
    with open(os.path.join(data_root, 'train', 'data_info',
                           'train_set.json')) as f:
        train_set_info = json.load(f)
    total = len(train_set_info)
    for i in range(total):
        model_ID = train_set_info[i]['model']
        img = Image.open(
            os.path.join(data_root, 'train', 'image',
                         train_set_info[i]['image'])).convert('RGB')
        trans = np.array(train_set_info[i]['pose']['translation'])
        rot = np.array(train_set_info[i]['pose']['rotation'])
        init_blender(img.size)
        try:
            #fov = math.radians(train_set_info[i]['fov'])
            fov = train_set_info[i]['fov']
            cam = add_camera((0, 0, 0), fov, 'camera')
            K_blender = get_calibration_matrix_K_from_blender(cam.data)
            K = np.array(K_blender)
            clear_scene()
            clear_mv()
        except:
            continue
        cam_path = os.path.join(camera_dir, model_ID + '.pkl')
        write_pickle_file(cam_path, {'K': K})
        print('[ {} / {} ] Saved to {}'.format(i, total, cam_path))
    print('Finished!')