def new_render(pc=False, sparse=False, down=16): _, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf( args) bds_dict = { 'near': tf.cast(near, tf.float32), 'far': tf.cast(far, tf.float32), } render_kwargs_test.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs_test) render_kwargs_fast = {k: render_kwargs_test[k] for k in render_kwargs_test} render_kwargs_fast['N_importance'] = 128 # c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix if not sparse: test = run_nerf.render(H // down, W // down, focal / down, c2w=poses[0], pc=pc, cloudsize=16, **render_kwargs_fast) else: test = run_nerf_fast.render(H // down, W // down, focal / down, c2w=poses[0], **render_kwargs_fast) img = np.clip(test[0], 0, 1) disp = test[1] disp = (disp - np.min(disp)) / (np.max(disp) - np.min(disp)) return img, disp
def load_nerf(args): _, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args) # The point cloud functionality should only be used for ndc image sets near = 0. far = 1. bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs_test.update(bds_dict) render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test} render_kwargs_fast['N_importance'] = 128 return render_kwargs_fast
def get_data(): basedir = './logs' expname = 'fern_example' config = os.path.join(basedir, expname, 'config.txt') print('Args:') print(open(config, 'r').read()) parser = run_nerf.config_parser() weights_name = 'model_200000.npy' args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, weights_name))) print('loaded args') images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor, recenter=True, bd_factor=.75, spherify=args.spherify) H, W, focal = poses[0,:3,-1].astype(np.float32) poses = poses[:, :3, :4] H = int(H) W = int(W) hwf = [H, W, focal] images = images.astype(np.float32) poses = poses.astype(np.float32) near = 0. far = 1. if not isinstance(i_test, list): i_test = [i_test] if args.llffhold > 0: print('Auto LLFF holdout,', args.llffhold) i_test = np.arange(images.shape[0])[::args.llffhold] _, render_kwargs, start, grad_vars, models = run_nerf.create_nerf(args) bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs) results = {} results['pc'] = {} results['no_pc'] = {} # NOTE: Where to output results! result_directory = "./fern_pc_results" img_dir = os.path.join(result_directory, "imgs") down = 1 plt.imsave(os.path.join(img_dir, f"GT{i_test[0]}.png"), images[i_test[0]]) plt.imsave(os.path.join(img_dir, f"GT{i_test[1]}.png"), images[i_test[1]]) for num_samps in [4,8,16,32,64]: print(f'Running {num_samps} sample test') for pc in [True, False]: print(f'{"not " if not pc else ""}using pc') results['pc' if pc else 'no_pc'][num_samps] = {} render_kwargs['N_samples'] = num_samps render_kwargs['N_importance'] = 2*num_samps total_time = 0 total_mse = 0 total_psnr = 0 for i in [i_test[0], i_test[1]]: gt = images[i] start_time = time.time() ret_vals = run_nerf.render(H//down, W//down, focal/down, c2w=poses[i], pc=pc, cloudsize=16, **render_kwargs) end_time = time.time() # add to cum time total_time += (end_time - start_time) # add to accuracy img = np.clip(ret_vals[0],0,1) # TODO: make sure this is commented out for real results (just used to test that it runs) # mse = run_nerf.img2mse(np.zeros((H//down, W//down,3), dtype=np.float32), img) mse = run_nerf.img2mse(gt, img) psnr = run_nerf.mse2psnr(mse) total_mse += float(mse) total_psnr += float(psnr) plt.imsave(os.path.join(img_dir, f'IMG{i}_{"pc" if pc else "no_pc"}_{num_samps}samples.png'), img) total_time /= 2. total_mse /= 2. total_psnr /= 2. results['pc' if pc else 'no_pc'][num_samps]['time'] = total_time results['pc' if pc else 'no_pc'][num_samps]['mse'] = total_mse results['pc' if pc else 'no_pc'][num_samps]['psnr'] = total_psnr with open(os.path.join(result_directory, 'results.txt'), 'w') as outfile: json.dump(results,outfile)
def cloud_size_vs_performance(): basedir = './logs' expname = 'fern_example' config = os.path.join(basedir, expname, 'config.txt') print('Args:') print(open(config, 'r').read()) parser = run_nerf.config_parser() weights_name = 'model_200000.npy' args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, weights_name))) print('loaded args') images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor, recenter=True, bd_factor=.75, spherify=args.spherify) H, W, focal = poses[0,:3,-1].astype(np.float32) poses = poses[:, :3, :4] H = int(H) W = int(W) hwf = [H, W, focal] images = images.astype(np.float32) poses = poses.astype(np.float32) near = 0. far = 1. if not isinstance(i_test, list): i_test = [i_test] if args.llffhold > 0: print('Auto LLFF holdout,', args.llffhold) i_test = np.arange(images.shape[0])[::args.llffhold] _, render_kwargs, start, grad_vars, models = run_nerf.create_nerf(args) to_use = i_test[0] bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs) res_dir = "./cloud_size_test" res = {} res['cloud_size'] = [] res['mse'] = [] res['psnr'] = [] res['time'] = [] for i in [1,2,4,8,16,32]: print(f'Running with cloud downsampled {i}x') start_time = time.time() ret_vals = run_nerf.render(H, W, focal, c2w=poses[to_use], pc=True, cloudsize=i, **render_kwargs) end_time = time.time() img = np.clip(ret_vals[0],0,1) mse = run_nerf.img2mse(images[to_use], img) psnr = run_nerf.mse2psnr(mse) res['cloud_size'].append((17 * H * W) // (i * i)) res['mse'].append(float(mse)) res['psnr'].append(float(psnr)) res['time'].append(end_time - start_time) # a = [1,2,4,8,16,32] # b = [1/x for x in a] # make plots # cs vs psnr fig, ax = plt.subplots(1,1) fig.suptitle('PSNR vs Point Cloud Size') ax.set_xlabel('Cloud Size') ax.set_ylabel('PSNR') plt.xscale('log') ax.plot(res['cloud_size'],res['psnr']) plt.savefig(os.path.join(res_dir, 'cs_psnr.png')) fig, ax = plt.subplots(1,1) fig.suptitle('PSNR vs Running Time') ax.set_xlabel('Time') ax.set_ylabel('PSNR') plt.xscale('log') ax.plot(res['time'],res['psnr']) plt.savefig(os.path.join(res_dir, 'time_psnr.png')) fig, ax = plt.subplots(1,1) fig.suptitle('Running Time vs Cloud Size') ax.set_xlabel('Cloud Size') ax.set_ylabel('Running Time') plt.xscale('log') plt.yscale('log') ax.plot(res['cloud_size'],res['time']) plt.savefig(os.path.join(res_dir, 'cs_time.png')) with open(os.path.join(res_dir, 'results.txt'), 'w') as outfile: json.dump(res,outfile)
if __name__ == '__main__': args = parse_args() ckpt_path = os.path.join(args.basedir, args.expname) assert os.path.exists(ckpt_path) if args.random_seed is not None: print('Fixing random seed', args.random_seed) np.random.seed(args.random_seed) tf.compat.v1.set_random_seed(args.random_seed) random.seed(args.seed) # TODO seed everything train_set, test_set = read_dataset(args.metadatadir) render_kwargs_train, render_kwargs_test, start, grad_vars, models =\ create_nerf(args) # optimizer = tf.keras.optimizers.Adam(args.learning_rate, beta_1=0) test_writer = SummaryWriter(ckpt_path + '/test') loss_dict = meta_evaluate(models, metalearning_iter=start, test_scenes=test_set, N_importance=args.N_importance, half_res=args.half_res, testskip=args.testskip, white_bkgd=args.white_bkgd, log_fn=print, save_dir=ckpt_path + '/test', N_rand=args.N_rand, inner_iters=args.inner_iters,
images = images.astype(np.float32) poses = poses.astype(np.float32) if args.no_ndc: near = tf.reduce_min(bds) * .9 far = tf.reduce_max(bds) * 1. else: near = 0. far = 1. # In[3]: # Create nerf model _, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args) bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs_test.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs_test) down = 4 render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test} render_kwargs_fast['N_importance'] = 0