def run(colmap_folder, sfs_results_path, gt_path, out_folder): #import pdb;pdb.set_trace(); if not sfs_results_path.endswith('/'): sfs_results_path += '/' if not gt_path.endswith('/'): gt_path += '/' if not out_folder.endswith('/'): out_folder += '/' scene_manager = SceneManager(colmap_folder) scene_manager.load_cameras() camera = scene_manager.cameras[1] # assume single camera # initial values on unit sphere x, y = camera.get_image_grid() r_scale = np.sqrt(x * x + y * y + 1.) image = np.empty((camera.height, camera.width, 3)) #image[:,:,0] = 1. if not os.path.exists(out_folder): os.mkdir(out_folder) # iterate through rendered BRDFs #for brdf_name in os.listdir(sfs_results_path): for brdf_name in ['phantom']: brdf_folder = sfs_results_path + brdf_name + '/' if not os.path.isdir(brdf_folder): continue with open(out_folder + brdf_name + '.csv', 'w') as f: print >> f, HEADER1 % brdf_name print >> f, HEADER2 # iterate through reflectance models #for rm_name in os.listdir(brdf_folder): #for model_name in MODELS: model_folder = brdf_folder + '/' #model_name + '/' # percent of pixels within 1, 2, 5mm of the GT surface data = [list() for _ in THRESHOLDS] ### datasum = [] ### # iterate through SFS surfaces for filename in os.listdir(model_folder): if not filename.endswith('_z.bin'): continue name = filename[:-6] #import pdb;pdb.set_trace(); util.save_sfs_ply( '%s_%s.ply' % (name, 'est'), np.dstack((x, y, np.ones_like(x))) * np.fromfile( model_folder + filename, dtype=np.float32).reshape( camera.height, camera.width, 1)) # util.save_sfs_ply( '%s_%s.ply' % (name, 'gt'), np.dstack( (x, y, np.ones_like(x))) / WORLD_SCALE * np.fromfile( gt_path + name + '.bin', dtype=np.float32).reshape( camera.height, camera.width, 1)) #break r_est = WORLD_SCALE * r_scale * np.fromfile( model_folder + filename, dtype=np.float32).reshape( camera.height, camera.width) r_gt = r_scale * np.fromfile(gt_path + name + '.bin', dtype=np.float32).reshape( camera.height, camera.width) rdiff = np.abs(r_est - r_gt) #rdiff = np.concatenate(( # rdiff[:,:20].ravel(), rdiff[:,-20:].ravel(), # rdiff[20:-20,:20].ravel(), rdiff[20:-20,-20:].ravel())) #rdiff = rdiff[(r_gt > 5.0) & (r_gt < 20.0)] inv_size = 1. / float(rdiff.size) for i, t in enumerate(THRESHOLDS): data[i].append(np.count_nonzero(rdiff < t) * inv_size) #data[i].append(np.mean((rdiff / r_gt)[rdiff < t])) #data[i].append(np.mean((r_est / r_gt))) ### datasum.append(1. - (r_est / r_gt).ravel()) datasum = np.concatenate(datasum) print np.mean(datasum), np.std(datasum) ### data = np.array(data) data = np.mean(data, axis=1), np.std(data, axis=1) model_name = 'power' print >> f, model_name + ',' + ','.join('%f,%f' % d for d in izip(*data))
def run_iterative_sfs(out_file, model_type, fit_falloff, *extra_params): #if os.path.exists(out_file + '_z.bin'): # don't re-run existing results # return if model_type == sfs.LAMBERTIAN_MODEL: model_name = 'LAMBERTIAN_MODEL' elif model_type == sfs.OREN_NAYAR_MODEL: model_name = 'OREN_NAYAR_MODEL' elif model_type == sfs.PHONG_MODEL: model_name = 'PHONG_MODEL' elif model_type == sfs.COOK_TORRANCE_MODEL: model_name = 'COOK_TORRANCE_MODEL' elif model_type == sfs.POWER_MODEL: model_name = 'POWER_MODEL' if model_type == sfs.POWER_MODEL: print '%s_%i' % (model_name, extra_params[0]) else: print model_name S = S0.copy() z = S0[:, :, 2] for iteration in xrange(MAX_NUM_ITER): print 'Iteration', iteration z_old = z if get_initial_warp: #z_gt=util.load_point_ply('C:\\Users\\user\\Documents\\UNC\\Research\\ColonProject\\code\\Rui\\SFS_CPU\\frame0859.jpg_gt.ply') # warp to 3D points if iteration > -1: S, r_ratios = nearest_neighbor_warp( weights, nn_idxs, points2D_image, r_fixed, util.generate_surface(camera, z)) z_est = np.maximum(S[:, :, 2], 1e-6) S = util.generate_surface(camera, z_est) else: z_est = z S = util.generate_surface(camera, z_est) #util.save_sfs_ply('warp' + '.ply', S, im_rgb) #util.save_xyz('test.xyz',points3D); #z=z_est #break #z_est=z else: #import pdb;pdb.set_trace() #z_est = extract_depth_map(camera,ref_surf_name,R,image) z_est = np.fromfile( 'C:\Users\user\Documents\UNC\Research\ColonProject\code\SFS_Program_from_True\endo_evaluation\gt_surfaces\\frame0859.jpg.bin', dtype=np.float32).reshape(camera.height, camera.width) / WORLD_SCALE z_est = z_est.astype(float) #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, # points2D_image, r_fixed, util.generate_surface(camera, z_est)) z_est = np.maximum(z_est[:, :], 1e-6) #Sworld = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) S = util.generate_surface(camera, z_est) #util.save_sfs_ply('test' + '.ply', S, im_rgb) #util.save_sfs_ply(out_file + '_warp_%i.ply' % iteration, Sworld, im_rgb) #import pdb;pdb.set_trace() # if we need to, make corrections for non-positive depths #S = util.generate_surface(camera, z_est) mask = (z_est < INIT_Z) specular_mask = (L < 0.8) dark_mask = (L > 0.1) _mask = np.logical_and(specular_mask, mask) _mask = np.logical_and(_mask, dark_mask) # fit reflectance model r = np.linalg.norm(S, axis=-1) ndotl = util.calculate_ndotl(camera, S) falloff, model_params, residual = fit_reflectance_model( model_type, L[_mask], r.ravel(), r[_mask], ndotl.ravel(), ndotl[_mask], fit_falloff, camera.width, camera.height, *extra_params) #r = np.linalg.norm(S[specular_mask], axis=-1) #import pdb;pdb.set_trace() #model_params=np.array([26.15969874,-27.674055,-12.52426,7.579855,21.9768004,24.3911142,-21.7282996,-19.850894,-11.62229,-4.837014]) #model_params=np.array([-19.4837,-490.4796,812.4527,-426.09107,139.2602,351.8061,-388.1591,875.5013,-302.4748,-414.4384]) #falloff = 1.2 #ndotl = util.calculate_ndotl(camera, S)[specular_mask] #falloff, model_params, residual = fit_reflectance_model(model_type, # L[specular_mask], r, ndotl, fit_falloff, *extra_params) #r = np.linalg.norm(S[neighborhood_mask], axis=-1) #ndotl = util.calculate_ndotl(camera, S)[neighborhood_mask] #falloff, model_params, residual = fit_reflectance_model(model_type, # L[neighborhood_mask], r, ndotl, fit_falloff, *extra_params) # lambda values reflect our confidence in the current surface: 0 # corresponds to only using SFS at a pixel, 1 corresponds to equally # weighting SFS and the current estimate, and larger values increasingly # favor using only the current estimate rdiff = np.abs(r_fixed - get_estimated_r(S, points2D_image)) w = np.log10(r_fixed) - np.log10(rdiff) - np.log10(2.) lambdas = (np.sum(weights * w[nn_idxs], axis=-1) / np.sum(weights, axis=-1)) lambdas = np.maximum(lambdas, 0.) # just in case # lambdas[~mask] = 0 #if iteration == 0: # don't use current estimated surface on first pass #lambdas = np.zeros_like(z) #else: # r_ratios_postwarp = r_fixed / get_estimated_r(S, points2D_image) # ratio_diff = np.abs(r_ratios_prewarp - r_ratios_postwarp) # ratio_diff[ratio_diff == 0] = 1e-10 # arbitrarily high lambda # feature_lambdas = 1. / ratio_diff # lambdas = (np.sum(weights * feature_lambdas[nn_idxs], axis=-1) / # np.sum(weights, axis=-1)) # run SFS H_lut, dH_lut = compute_H_LUT(model_type, model_params, NUM_H_LUT_BINS) #import pdb;pdb.set_trace() # run SFS #H_lut = np.ascontiguousarray(H_lut.astype(np.float32)) #dH_lut = np.ascontiguousarray(dH_lut.astype(np.float32)) z = run_sfs(H_lut, dH_lut, camera, L, lambdas, z_est, model_type, model_params, falloff, vmask, use_image_weighted_derivatives) # check for convergence #diff = np.sum(np.abs(z_old[specular_mask] - z[specular_mask])) #if diff < CONVERGENCE_THRESHOLD * camera.height * camera.width: # break # save the surface #S = util.generate_surface(camera, z) #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) #util.save_sfs_ply(out_file + '_%i.ply' % iteration, S, im_rgb) else: print 'DID NOT CONVERGE' #import pdb;pdb.set_trace() S = util.generate_surface(camera, z) #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) util.save_sfs_ply(out_file + '.ply', S, im_rgb) z.astype(np.float32).tofile(out_file + '_z.bin') # save the surface #S = util.generate_surface(camera, z) #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, # points2D_image, r_fixed, util.generate_surface(camera, z)) #util.save_sfs_ply(out_file + '_warped.ply', S, im_rgb) #z = np.maximum(S[:,:,2], 1e-6) #z.astype(np.float32).tofile(out_file + '_warped_z.bin') #reflectance_models.save_reflectance_model(out_file + '_reflectance.txt', # model_name, residual, model_params, falloff, *extra_params) print
def run(colmap_folder, sfs_results_path, gt_path, out_folder): #import pdb;pdb.set_trace(); if not sfs_results_path.endswith('/'): sfs_results_path += '/' if not gt_path.endswith('/'): gt_path += '/' if not out_folder.endswith('/'): out_folder += '/' scene_manager = SceneManager(colmap_folder) scene_manager.load_cameras() camera = scene_manager.cameras[1] # assume single camera # initial values on unit sphere x, y = camera.get_image_grid() r_scale = np.sqrt(x * x + y * y + 1.) image = np.empty((camera.height, camera.width, 3)) #image[:,:,0] = 1. if not os.path.exists(out_folder): os.mkdir(out_folder) # iterate through rendered BRDFs #for brdf_name in os.listdir(sfs_results_path): for brdf_name in ['phantom']: brdf_folder = sfs_results_path + brdf_name + '/' if not os.path.isdir(brdf_folder): continue with open(out_folder + brdf_name + '.csv', 'w') as f: print>>f, HEADER1 % brdf_name print>>f, HEADER2 # iterate through reflectance models #for rm_name in os.listdir(brdf_folder): #for model_name in MODELS: model_folder = brdf_folder + '/'#model_name + '/' # percent of pixels within 1, 2, 5mm of the GT surface data = [list() for _ in THRESHOLDS] ### datasum = [] ### # iterate through SFS surfaces for filename in os.listdir(model_folder): if not filename.endswith('_z.bin'): continue name = filename[:-6] #import pdb;pdb.set_trace(); util.save_sfs_ply('%s_%s.ply' % (name, 'est'), np.dstack((x, y, np.ones_like(x))) * np.fromfile(model_folder + filename, dtype=np.float32).reshape( camera.height, camera.width, 1)) # util.save_sfs_ply('%s_%s.ply' % (name, 'gt'), np.dstack((x, y, np.ones_like(x)))/WORLD_SCALE * np.fromfile(gt_path + name+'.bin', dtype=np.float32).reshape( camera.height, camera.width, 1)) #break r_est = WORLD_SCALE * r_scale * np.fromfile( model_folder + filename, dtype=np.float32).reshape( camera.height, camera.width) r_gt = r_scale * np.fromfile( gt_path + name + '.bin', dtype=np.float32).reshape( camera.height, camera.width) rdiff = np.abs(r_est - r_gt) #rdiff = np.concatenate(( # rdiff[:,:20].ravel(), rdiff[:,-20:].ravel(), # rdiff[20:-20,:20].ravel(), rdiff[20:-20,-20:].ravel())) #rdiff = rdiff[(r_gt > 5.0) & (r_gt < 20.0)] inv_size = 1. / float(rdiff.size) for i, t in enumerate(THRESHOLDS): data[i].append(np.count_nonzero(rdiff < t) * inv_size) #data[i].append(np.mean((rdiff / r_gt)[rdiff < t])) #data[i].append(np.mean((r_est / r_gt))) ### datasum.append(1. - (r_est / r_gt).ravel()) datasum = np.concatenate(datasum) print np.mean(datasum), np.std(datasum) ### data = np.array(data) data = np.mean(data, axis=1), np.std(data, axis=1) model_name='power' print>>f, model_name + ',' + ','.join('%f,%f' % d for d in izip(*data))
def run_iterative_sfs(out_file, model_type, fit_falloff, *extra_params): #if os.path.exists(out_file + '_z.bin'): # don't re-run existing results # return if model_type == sfs.LAMBERTIAN_MODEL: model_name = 'LAMBERTIAN_MODEL' elif model_type == sfs.OREN_NAYAR_MODEL: model_name = 'OREN_NAYAR_MODEL' elif model_type == sfs.PHONG_MODEL: model_name = 'PHONG_MODEL' elif model_type == sfs.COOK_TORRANCE_MODEL: model_name = 'COOK_TORRANCE_MODEL' elif model_type == sfs.POWER_MODEL: model_name = 'POWER_MODEL' if model_type == sfs.POWER_MODEL: print '%s_%i' % (model_name, extra_params[0]) else: print model_name S = S0.copy() z = S0[:,:,2] for iteration in xrange(MAX_NUM_ITER): print 'Iteration', iteration z_old = z if get_initial_warp: #z_gt=util.load_point_ply('C:\\Users\\user\\Documents\\UNC\\Research\\ColonProject\\code\\Rui\\SFS_CPU\\frame0859.jpg_gt.ply') # warp to 3D points if iteration>-1: S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, points2D_image, r_fixed, util.generate_surface(camera, z)) z_est = np.maximum(S[:,:,2], 1e-6) S=util.generate_surface(camera, z_est) else: z_est=z S=util.generate_surface(camera, z_est) #util.save_sfs_ply('warp' + '.ply', S, im_rgb) #util.save_xyz('test.xyz',points3D); #z=z_est #break #z_est=z else: #import pdb;pdb.set_trace() #z_est = extract_depth_map(camera,ref_surf_name,R,image) z_est=np.fromfile( 'C:\Users\user\Documents\UNC\Research\ColonProject\code\SFS_Program_from_True\endo_evaluation\gt_surfaces\\frame0859.jpg.bin', dtype=np.float32).reshape( camera.height, camera.width)/WORLD_SCALE z_est=z_est.astype(float) #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, # points2D_image, r_fixed, util.generate_surface(camera, z_est)) z_est = np.maximum(z_est[:,:], 1e-6) #Sworld = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) S = util.generate_surface(camera, z_est) #util.save_sfs_ply('test' + '.ply', S, im_rgb) #util.save_sfs_ply(out_file + '_warp_%i.ply' % iteration, Sworld, im_rgb) #import pdb;pdb.set_trace() # if we need to, make corrections for non-positive depths #S = util.generate_surface(camera, z_est) mask = (z_est < INIT_Z) specular_mask=(L<0.8) dark_mask=(L>0.1) _mask=np.logical_and(specular_mask,mask) _mask=np.logical_and(_mask,dark_mask) # fit reflectance model r = np.linalg.norm(S, axis=-1) ndotl = util.calculate_ndotl(camera, S) falloff, model_params, residual = fit_reflectance_model(model_type, L[_mask],r.ravel(), r[_mask],ndotl.ravel(), ndotl[_mask], fit_falloff,camera.width,camera.height, *extra_params) #r = np.linalg.norm(S[specular_mask], axis=-1) #import pdb;pdb.set_trace() #model_params=np.array([26.15969874,-27.674055,-12.52426,7.579855,21.9768004,24.3911142,-21.7282996,-19.850894,-11.62229,-4.837014]) #model_params=np.array([-19.4837,-490.4796,812.4527,-426.09107,139.2602,351.8061,-388.1591,875.5013,-302.4748,-414.4384]) #falloff = 1.2 #ndotl = util.calculate_ndotl(camera, S)[specular_mask] #falloff, model_params, residual = fit_reflectance_model(model_type, # L[specular_mask], r, ndotl, fit_falloff, *extra_params) #r = np.linalg.norm(S[neighborhood_mask], axis=-1) #ndotl = util.calculate_ndotl(camera, S)[neighborhood_mask] #falloff, model_params, residual = fit_reflectance_model(model_type, # L[neighborhood_mask], r, ndotl, fit_falloff, *extra_params) # lambda values reflect our confidence in the current surface: 0 # corresponds to only using SFS at a pixel, 1 corresponds to equally # weighting SFS and the current estimate, and larger values increasingly # favor using only the current estimate rdiff = np.abs(r_fixed - get_estimated_r(S, points2D_image)) w = np.log10(r_fixed) - np.log10(rdiff) - np.log10(2.) lambdas = (np.sum(weights * w[nn_idxs], axis=-1) / np.sum(weights, axis=-1)) lambdas = np.maximum(lambdas, 0.) # just in case # lambdas[~mask] = 0 #if iteration == 0: # don't use current estimated surface on first pass #lambdas = np.zeros_like(z) #else: # r_ratios_postwarp = r_fixed / get_estimated_r(S, points2D_image) # ratio_diff = np.abs(r_ratios_prewarp - r_ratios_postwarp) # ratio_diff[ratio_diff == 0] = 1e-10 # arbitrarily high lambda # feature_lambdas = 1. / ratio_diff # lambdas = (np.sum(weights * feature_lambdas[nn_idxs], axis=-1) / # np.sum(weights, axis=-1)) # run SFS H_lut, dH_lut = compute_H_LUT(model_type, model_params, NUM_H_LUT_BINS) #import pdb;pdb.set_trace() # run SFS #H_lut = np.ascontiguousarray(H_lut.astype(np.float32)) #dH_lut = np.ascontiguousarray(dH_lut.astype(np.float32)) z = run_sfs(H_lut,dH_lut,camera, L, lambdas, z_est, model_type, model_params, falloff,vmask, use_image_weighted_derivatives) # check for convergence #diff = np.sum(np.abs(z_old[specular_mask] - z[specular_mask])) #if diff < CONVERGENCE_THRESHOLD * camera.height * camera.width: # break # save the surface #S = util.generate_surface(camera, z) #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) #util.save_sfs_ply(out_file + '_%i.ply' % iteration, S, im_rgb) else: print 'DID NOT CONVERGE' #import pdb;pdb.set_trace() S = util.generate_surface(camera, z) #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) util.save_sfs_ply(out_file + '.ply', S, im_rgb) z.astype(np.float32).tofile(out_file + '_z.bin') # save the surface #S = util.generate_surface(camera, z) #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R) #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, # points2D_image, r_fixed, util.generate_surface(camera, z)) #util.save_sfs_ply(out_file + '_warped.ply', S, im_rgb) #z = np.maximum(S[:,:,2], 1e-6) #z.astype(np.float32).tofile(out_file + '_warped_z.bin') #reflectance_models.save_reflectance_model(out_file + '_reflectance.txt', # model_name, residual, model_params, falloff, *extra_params) print