def get_depths(hwf, poses, chunk, render_kwargs): rays_l = [] rgbs = [] disps = [] depths = [] H, W, focal = hwf t = time.time() for i, c2w in enumerate(tqdm(poses)): print(i, time.time() - t) t = time.time() rays = get_rays(H, W, focal, c2w) rgb, disp, acc, all_ret = render(H, W, focal, chunk=chunk, c2w=c2w[:3, :4], rays=rays, **render_kwargs) rays_l.append(rays) rgbs.append(rgb.cpu().numpy()) disps.append(disp.cpu().numpy()) depths.append(all_ret["depth_map"]) return rays_l, rgbs, disps, depths
def new_render(pc=False, sparse=False, down=16): _, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf( args) bds_dict = { 'near': tf.cast(near, tf.float32), 'far': tf.cast(far, tf.float32), } render_kwargs_test.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs_test) render_kwargs_fast = {k: render_kwargs_test[k] for k in render_kwargs_test} render_kwargs_fast['N_importance'] = 128 # c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix if not sparse: test = run_nerf.render(H // down, W // down, focal / down, c2w=poses[0], pc=pc, cloudsize=16, **render_kwargs_fast) else: test = run_nerf_fast.render(H // down, W // down, focal / down, c2w=poses[0], **render_kwargs_fast) img = np.clip(test[0], 0, 1) disp = test[1] disp = (disp - np.min(disp)) / (np.max(disp) - np.min(disp)) return img, disp
def new_render(img_dir, fast=False, r2=128, d=3): _, render_kwargs_test, start, grad_vars, models = run_nerf_fast.create_nerf( args) bds_dict = { 'near': tf.cast(near, tf.float32), 'far': tf.cast(far, tf.float32), } render_kwargs_test.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs_test) render_kwargs_fast = {k: render_kwargs_test[k] for k in render_kwargs_test} render_kwargs_fast['N_importance'] = r2 # c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix c2w = poses[0] start_time = time.time() if fast: test = run_nerf_fast.render(H // down, W // down, focal / down, c2w=c2w, d_factor=d, **render_kwargs_fast) else: test = run_nerf.render(H // down, W // down, focal / down, c2w=c2w, pc=False, **render_kwargs_fast) end_time = time.time() net = end_time - start_time img = np.clip(test[0], 0, 1) if not fast: plt.imsave(os.path.join(img_dir, f"NeRF_render.png"), images[i_test[1]]) else: plt.imsave(os.path.join(img_dir, f"FastNeRF_sparse_{d}x.png"), images[i_test[1]]) mse = run_nerf_helpers_fast.img2mse(tf.cast(gt_down, tf.float32), img) psnr = run_nerf_helpers_fast.mse2psnr(mse) mse = float(mse) psnr = float(psnr) return net, mse, psnr
def get_data(): basedir = './logs' expname = 'fern_example' config = os.path.join(basedir, expname, 'config.txt') print('Args:') print(open(config, 'r').read()) parser = run_nerf.config_parser() weights_name = 'model_200000.npy' args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, weights_name))) print('loaded args') images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor, recenter=True, bd_factor=.75, spherify=args.spherify) H, W, focal = poses[0,:3,-1].astype(np.float32) poses = poses[:, :3, :4] H = int(H) W = int(W) hwf = [H, W, focal] images = images.astype(np.float32) poses = poses.astype(np.float32) near = 0. far = 1. if not isinstance(i_test, list): i_test = [i_test] if args.llffhold > 0: print('Auto LLFF holdout,', args.llffhold) i_test = np.arange(images.shape[0])[::args.llffhold] _, render_kwargs, start, grad_vars, models = run_nerf.create_nerf(args) bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs) results = {} results['pc'] = {} results['no_pc'] = {} # NOTE: Where to output results! result_directory = "./fern_pc_results" img_dir = os.path.join(result_directory, "imgs") down = 1 plt.imsave(os.path.join(img_dir, f"GT{i_test[0]}.png"), images[i_test[0]]) plt.imsave(os.path.join(img_dir, f"GT{i_test[1]}.png"), images[i_test[1]]) for num_samps in [4,8,16,32,64]: print(f'Running {num_samps} sample test') for pc in [True, False]: print(f'{"not " if not pc else ""}using pc') results['pc' if pc else 'no_pc'][num_samps] = {} render_kwargs['N_samples'] = num_samps render_kwargs['N_importance'] = 2*num_samps total_time = 0 total_mse = 0 total_psnr = 0 for i in [i_test[0], i_test[1]]: gt = images[i] start_time = time.time() ret_vals = run_nerf.render(H//down, W//down, focal/down, c2w=poses[i], pc=pc, cloudsize=16, **render_kwargs) end_time = time.time() # add to cum time total_time += (end_time - start_time) # add to accuracy img = np.clip(ret_vals[0],0,1) # TODO: make sure this is commented out for real results (just used to test that it runs) # mse = run_nerf.img2mse(np.zeros((H//down, W//down,3), dtype=np.float32), img) mse = run_nerf.img2mse(gt, img) psnr = run_nerf.mse2psnr(mse) total_mse += float(mse) total_psnr += float(psnr) plt.imsave(os.path.join(img_dir, f'IMG{i}_{"pc" if pc else "no_pc"}_{num_samps}samples.png'), img) total_time /= 2. total_mse /= 2. total_psnr /= 2. results['pc' if pc else 'no_pc'][num_samps]['time'] = total_time results['pc' if pc else 'no_pc'][num_samps]['mse'] = total_mse results['pc' if pc else 'no_pc'][num_samps]['psnr'] = total_psnr with open(os.path.join(result_directory, 'results.txt'), 'w') as outfile: json.dump(results,outfile)
def cloud_size_vs_performance(): basedir = './logs' expname = 'fern_example' config = os.path.join(basedir, expname, 'config.txt') print('Args:') print(open(config, 'r').read()) parser = run_nerf.config_parser() weights_name = 'model_200000.npy' args = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, weights_name))) print('loaded args') images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor, recenter=True, bd_factor=.75, spherify=args.spherify) H, W, focal = poses[0,:3,-1].astype(np.float32) poses = poses[:, :3, :4] H = int(H) W = int(W) hwf = [H, W, focal] images = images.astype(np.float32) poses = poses.astype(np.float32) near = 0. far = 1. if not isinstance(i_test, list): i_test = [i_test] if args.llffhold > 0: print('Auto LLFF holdout,', args.llffhold) i_test = np.arange(images.shape[0])[::args.llffhold] _, render_kwargs, start, grad_vars, models = run_nerf.create_nerf(args) to_use = i_test[0] bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs) res_dir = "./cloud_size_test" res = {} res['cloud_size'] = [] res['mse'] = [] res['psnr'] = [] res['time'] = [] for i in [1,2,4,8,16,32]: print(f'Running with cloud downsampled {i}x') start_time = time.time() ret_vals = run_nerf.render(H, W, focal, c2w=poses[to_use], pc=True, cloudsize=i, **render_kwargs) end_time = time.time() img = np.clip(ret_vals[0],0,1) mse = run_nerf.img2mse(images[to_use], img) psnr = run_nerf.mse2psnr(mse) res['cloud_size'].append((17 * H * W) // (i * i)) res['mse'].append(float(mse)) res['psnr'].append(float(psnr)) res['time'].append(end_time - start_time) # a = [1,2,4,8,16,32] # b = [1/x for x in a] # make plots # cs vs psnr fig, ax = plt.subplots(1,1) fig.suptitle('PSNR vs Point Cloud Size') ax.set_xlabel('Cloud Size') ax.set_ylabel('PSNR') plt.xscale('log') ax.plot(res['cloud_size'],res['psnr']) plt.savefig(os.path.join(res_dir, 'cs_psnr.png')) fig, ax = plt.subplots(1,1) fig.suptitle('PSNR vs Running Time') ax.set_xlabel('Time') ax.set_ylabel('PSNR') plt.xscale('log') ax.plot(res['time'],res['psnr']) plt.savefig(os.path.join(res_dir, 'time_psnr.png')) fig, ax = plt.subplots(1,1) fig.suptitle('Running Time vs Cloud Size') ax.set_xlabel('Cloud Size') ax.set_ylabel('Running Time') plt.xscale('log') plt.yscale('log') ax.plot(res['cloud_size'],res['time']) plt.savefig(os.path.join(res_dir, 'cs_time.png')) with open(os.path.join(res_dir, 'results.txt'), 'w') as outfile: json.dump(res,outfile)
bds_dict = { 'near' : tf.cast(near, tf.float32), 'far' : tf.cast(far, tf.float32), } render_kwargs_test.update(bds_dict) print('Render kwargs:') pprint.pprint(render_kwargs_test) down = 4 render_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test} render_kwargs_fast['N_importance'] = 0 c2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast) img = np.clip(test[0],0,1) # plt.imshow(img) # plt.show() # In[9]: # down started at 8 down = 1 # trade off resolution+aliasing for render speed to make this video faster frames = [] for i, c2w in enumerate(render_poses): if i%8==0: print(i) test = run_nerf.render(H//down, W//down, focal/down, c2w=c2w[:3,:4], **render_kwargs_fast) frames.append((255*np.clip(test[0],0,1)).astype(np.uint8))
def make_point_cloud(hwf, poses, i_train, args, render_kwargs, down=32): ''' Makes 3D point cloud using estimated depth data from images ''' near = 1. H, W, focal = hwf # use only the training images all_points = [] centers = [] for i in range(len(i_train)): print(f'Working on image #{i+1}') # c2w = poses[i_train[i]] c2w = poses[i_train[i]] # print(f"Using {c2w}") # print(c2w) centers.append(c2w[np.newaxis,:3, -1]) rays_o, rays_d = get_rays(H//down, W//down, focal/down, c2w) res = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs) t_prime = res[3]['depth_map'] # plt.imshow(t_prime) # plt.show() # convert to numpy rays_o, rays_d, t_prime = rays_o.numpy(), rays_d.numpy(), t_prime.numpy() oz, dz = rays_o[...,2], rays_d[...,2] # accounting for shifted origin before ndc conversion tn = -(near + oz) / dz # plt.imshow(tn) # plt.show() # print(tn) on = rays_o + tn[..., np.newaxis] * rays_d # print("RAYO") # print(rays_o) # print("RAY_D") # print(rays_d) # print("ON") # print(on) oz = on[...,2] # plt.imshow(oz) # plt.show() # solve for t given t prime using equation 15 from the paper # t_prime should be the ndc ray depth, while t is the real world ray depth t = (-1. * t_prime) + 1. # plt.imshow(t) # plt.show() t = 1. / t # plt.imshow(t) # plt.show() t = oz * t # plt.imshow(t) # plt.show() t = t - oz # plt.imshow(t) # plt.show() t = t / dz # plt.imshow(t) # plt.show() # get point cloud points_3d = on + t[..., np.newaxis] * rays_d points_3d = points_3d.reshape((-1,3)) all_points.append(points_3d) all_points = np.concatenate(all_points, axis=0) centers = np.concatenate(centers, axis=0) np.save(f"./clouds/pointcloud_down{down}.npy", all_points) np.save("./clouds/camcenters.npy", centers)