def pcInference(path_conf: str, path_weight: str, path_save: str, numepoch: int = 1): ''' It reconstructs and saves point clouds, given the path to the configuration file, pretrained weight and the path to save the inferenced point cloud. This function has to be run under the training folder, i.e. where the model.py and helpers.py are. Parameters ---------- path_conf : str path to the configuration file for training. path_weight : str path to the pretrained weight. path_save : str where to save the inferenced point cloud. numepoch : int the number of batches to do point cloud inference. Returns ------- None. ''' # load configuration and weight. check gpu state conf = helpers.load_conf(path_conf) trstate = torch.load(path_weight) gpu = torch.cuda.is_available() # resume pretrained model model = AtlasNetReimpl( M=conf['M'], code=conf['code'], num_patches=conf['num_patches'], normalize_cw=conf['normalize_cw'], freeze_encoder=conf['enc_freeze'], enc_load_weights=conf['enc_weights'], dec_activ_fns=conf['dec_activ_fns'], dec_use_tanh=conf['dec_use_tanh'], dec_batch_norm=conf['dec_batch_norm'], loss_scaled_isometry=conf['loss_scaled_isometry'], alpha_scaled_isometry=conf['alpha_scaled_isometry'], alphas_sciso=conf['alphas_sciso'], gpu=True) model.load_state_dict(trstate['weights']) # prepare data set ds_va = ShapeNet( conf['path_root_imgs'], conf['path_root_pclouds'], conf['path_category_file'], class_choice=conf['va_classes'], train=False, npoints=conf['N'], load_area=True) dl_va = DataLoaderDevice( DataLoader( ds_va, batch_size = conf['batch_size'], shuffle=False, num_workers=2, # shuffle is turned off drop_last=True), gpu=gpu ) # point cloud inference for e in range(numepoch): for bi, batch in enumerate(dl_va): model(batch['pcloud']) torch.save( model.pc_pred.detach().cpu(), pjn( path_save, 'pc{}.pt'.format(bi + e*conf['batch_size']) ) ) torch.save( batch['pcloud'].cpu(), pjn( path_save, 'gtpc{}.pt'.format(bi + e*conf['batch_size']) ) )
parser.add_argument('--conf', help='Path to the main config file of the model.', default='config.yaml') parser.add_argument('--output', help='Path to the output directory for storing ' 'weights and tensorboard data.', default='./data') parser.add_argument('--resume', help='Resume training from the given path', default=False) args = parser.parse_args() args.resume = False # Load the config file, prepare paths. conf = helpers.load_conf(args.conf) # Model type, color mode. model_type = 'atlasnet_orig' # Prepare TB writers. writer_tr = SummaryWriter(helpers.jn(args.output, 'tr')) writer_va = SummaryWriter(helpers.jn(args.output, 'va')) # Build a model. model = AtlasNetReimplEncImg( M=conf['M'], code=conf['code'], num_patches=conf['num_patches'], normalize_cw=conf['normalize_cw'], freeze_encoder=conf['enc_freeze'],
def compareOurs(path_conf: str, path_weight: str): ''' It compute the stitching error and normal difference for the given model with the given configurations. Parameters ---------- path_conf : str path to the configuration file. path_weight : str path to the pretrained model. Returns ------- stitchCriterion : list Stitiching loss. normalDifference : list normal difference. ''' # load configuration and weight. check gpu state conf = helpers.load_conf(path_conf) trstate = torch.load(path_weight) gpu = torch.cuda.is_available() # subfolder to save predicted point clouds folder2save = pjn( '/'.join(path_weight.split('/')[:-1]), 'prediction') if not os.path.isdir(folder2save): os.mkdir(folder2save) #### ONLY FOR EVALUATION #### conf['loss_patch_area'] = True conf['show_overlap_criterion'] = True conf['overlap_threshold'] = 0.05 conf['loss_smooth_surfaces'] = True conf['loss_patch_stitching'] = False conf['alpha_stitching'] = 0.001 conf['show_analyticalNormalDiff'] = True conf['surface_normal'] = True conf['surface_varinace'] = True conf['knn_Global'] = 20 conf['knn_Patch'] = 20 conf['PredNormalforpatchwise'] = False #### ONLY FOR EVALUATION #### # resume pretrained model model = AtlasNetReimplEncImg( M=conf['M'], code= conf['code'], num_patches=conf['num_patches'], normalize_cw = conf['normalize_cw'], freeze_encoder = conf['enc_freeze'], enc_load_weights = conf['enc_weights'], dec_activ_fns = conf['dec_activ_fns'], dec_use_tanh = conf['dec_use_tanh'], dec_batch_norm = conf['dec_batch_norm'], loss_scaled_isometry = conf['loss_scaled_isometry'], loss_patch_areas = conf['loss_patch_area'], # zhantao loss_smooth_surfaces = conf['loss_smooth_surfaces'], # zhantao loss_patch_stitching = conf['loss_patch_stitching'], # zhantao numNeighborGlobal = conf['knn_Global'], # zhantao numNeighborPatchwise = conf['knn_Patch'], # zhantao alpha_scaled_isometry = conf['alpha_scaled_isometry'], alphas_sciso = conf['alphas_sciso'], alpha_scaled_surfProp = conf['alpha_surfProp'], # zhantao alpha_stitching = conf['alpha_stitching'], # zhantao useSurfaceNormal = conf['surface_normal'], # zhantao useSurfaceVariance = conf['surface_varinace'], # zhantao angleThreshold = conf['angle_threshold']/180*np.pi, # zhantao rejGlobalandPatch = conf["reject_GlobalandPatch"], # zhantao predNormalasPatchwise = conf['PredNormalforpatchwise'], # zhantao overlap_criterion = conf['show_overlap_criterion'], # zhantao overlap_threshold = conf['overlap_threshold'], # zhantao enableAnaNormalErr = conf['show_analyticalNormalDiff'], # zhantao marginSize = conf['margin_size'], # zhantao gpu=gpu) model.load_state_dict(trstate['weights']) # using regular grid for evaluation model.sampler = FNSamplerRegularGrid( (0., 1.), (0., 1.), model._num_patches * model._spp, model._num_patches, gpu=gpu) # prepare data set K = np.loadtxt(conf['path_intrinsic_matrix']) ds_va = ImgAndPcloudFromDmapAndNormalsSyncedDataset( conf['path_root'], conf['obj_seqs_te'], K, conf['N'], compute_area=True) dl_va = DataLoaderDevice(DataLoader( ds_va, batch_size=conf['batch_size'], shuffle=False, num_workers=2), gpu=gpu) # point cloud inference stitchCriterion = [] normalDifference= [] ConsistencyLoss = [] overlapCriterion= [] analyNormalError= [] chamferDistance = [] for bi, batch in enumerate(dl_va): it = bi model(batch['img'], it=it) losses = model.loss(batch['pc'], normals_gt=batch['normals'], areas_gt=batch['area']) stitchCriterion.append (losses['Err_stitching'].to('cpu')) normalDifference.append(losses['normalDiff'].to('cpu')) ConsistencyLoss.append (losses['L_surfProp'].detach().to('cpu')) overlapCriterion.append(losses['overlapCriterion'].to('cpu')) analyNormalError.append(losses['analyticalNormalDiff'].to('cpu')) chamferDistance.append (losses['loss_chd'].detach().to('cpu')) torch.cuda.empty_cache() # torch.save( model.pc_pred.detach().cpu(), pjn(folder2save, 'regularSample{}.pt'.format(bi))) criterion = torch.cat((torch.tensor(stitchCriterion) [:,None], torch.tensor(normalDifference)[:,None], torch.tensor(ConsistencyLoss) [:,None], torch.tensor(overlapCriterion)[:,None], torch.tensor(analyNormalError)[:,None], torch.tensor(chamferDistance) [:,None]), dim=1).numpy() # print(criterion) # save all results for reference error_file = open( pjn( folder2save,'regularSampleFull{}_errors.txt'.format(bi)), 'w') np.savetxt( error_file, criterion, delimiter=',', header = 'stitching_error, normalAngulardiff, consistency_loss, overlapCriterion, analyticalNorrmalAngularDiff, CHD', comments="#") error_file.close() # save the average error avgErr_file = open( pjn( folder2save,'regularSampleFull{}_avgErrors.txt'.format(bi)), 'w') avgError = criterion.mean(axis = 0) avgError[3] = criterion[criterion[:,3] > 0, 3].mean() # remove invalid cases before averaging np.savetxt( avgErr_file, avgError, delimiter=',', header = 'stitching_error, normalAngulardiff, consistency_loss, overlapCriterion, analyticalNorrmalAngularDiff, CHD', comments="#") avgErr_file.close() return stitchCriterion, normalDifference
group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--config', help='Path to configuration file.') group.add_argument( '--cont', help='Path to train run directory in which the training will be ' 'continued.') parser.add_argument('--model_state', help='Path to model weights to load.') parser.add_argument('--optim_state', help='Path to optimizer config and params to load.') args = parser.parse_args() # Load the config file, prepare paths. if args.cont: path_conf, path_mparams, path_oparams = \ helpers.get_conf_model_optim(args.cont) conf = hlp.load_conf(path_conf) path_trrun = args.cont args.model_state = path_mparams args.optim_state = path_oparams else: conf, path_trrun = \ hlp.load_save_conf(args.config, fn=helpers.cerate_trrun_name) use_nmap = conf['normals_stream'] use_dmap = conf['depth_stream'] use_pc = conf['mesh_stream'] # Set TF session. if conf['flag_use_gpu_fraction']: gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=conf['gpu_fraction'])
from model import AtlasNetReimpl import helpers import torch path_conf = 'config.yaml' path_weights = '/cvlabdata2/home/zdeng/weights/pcae_shapenet/chkpt_plane.tar' conf = helpers.load_conf(path_conf) trstate = torch.load(path_weights) model = AtlasNetReimpl( M=conf['M'], code=conf['code'], num_patches=conf['num_patches'], normalize_cw=conf['normalize_cw'], freeze_encoder=conf['enc_freeze'], enc_load_weights=conf['enc_weights'], dec_activ_fns=conf['dec_activ_fns'], dec_use_tanh=conf['dec_use_tanh'], dec_batch_norm=conf['dec_batch_norm'], loss_scaled_isometry=conf['loss_scaled_isometry'], alpha_scaled_isometry=conf['alpha_scaled_isometry'], alphas_sciso=conf['alphas_sciso'], gpu=True) model.load_state_dict(trstate['weights'])