def pcInference(path_conf: str, path_weight: str, path_save: str, numepoch: int = 1):
    '''
    It reconstructs and saves point clouds, given the path to the configuration
    file, pretrained weight and the path to save the inferenced point cloud. 
    
    This function has to be run under the training folder, i.e. where the 
    model.py and helpers.py are.

    Parameters
    ----------
    path_conf : str
        path to the configuration file for training.
    path_weight : str
        path to the pretrained weight.
    path_save : str
        where to save the inferenced point cloud.
    numepoch : int
        the number of batches to do point cloud inference.

    Returns
    -------
    None.

    '''
    # load configuration and weight. check gpu state
    conf = helpers.load_conf(path_conf)
    trstate = torch.load(path_weight)
    gpu  = torch.cuda.is_available()

    # resume pretrained model
    model = AtlasNetReimpl(
        M=conf['M'], code=conf['code'], num_patches=conf['num_patches'],
        normalize_cw=conf['normalize_cw'],
        freeze_encoder=conf['enc_freeze'],
        enc_load_weights=conf['enc_weights'],
        dec_activ_fns=conf['dec_activ_fns'],
        dec_use_tanh=conf['dec_use_tanh'],
        dec_batch_norm=conf['dec_batch_norm'],
        loss_scaled_isometry=conf['loss_scaled_isometry'],
        alpha_scaled_isometry=conf['alpha_scaled_isometry'],
        alphas_sciso=conf['alphas_sciso'], gpu=True)
    model.load_state_dict(trstate['weights'])
    
    # prepare data set
    ds_va = ShapeNet(
        conf['path_root_imgs'], conf['path_root_pclouds'],
        conf['path_category_file'], class_choice=conf['va_classes'], train=False,
        npoints=conf['N'], load_area=True)
    
    dl_va = DataLoaderDevice( DataLoader(
        ds_va, batch_size = conf['batch_size'], shuffle=False, num_workers=2,   # shuffle is turned off
        drop_last=True), gpu=gpu )
        
    # point cloud inference
    for e in range(numepoch):
        for bi, batch in enumerate(dl_va):
            model(batch['pcloud'])
            torch.save( model.pc_pred.detach().cpu(), pjn( path_save, 'pc{}.pt'.format(bi + e*conf['batch_size']) ) )
            torch.save( batch['pcloud'].cpu(), pjn( path_save, 'gtpc{}.pt'.format(bi + e*conf['batch_size']) ) )
Esempio n. 2
0
    SVR=True,
    npoints=conf['N'],
    load_area=True)
ds_va = ShapeNet(
    conf['path_root_imgs'],
    conf['path_root_pclouds'],
    conf['path_category_file'],
    class_choice=conf['va_classes'],
    train=False,  # validation set (train False, test True => test set)
    test=False,
    SVR=True,
    npoints=conf['N'],
    load_area=True)
dl_tr = DataLoaderDevice(DataLoader(ds_tr,
                                    batch_size=conf['batch_size'],
                                    shuffle=True,
                                    num_workers=4,
                                    drop_last=True),
                         gpu=gpu)
dl_va = DataLoaderDevice(DataLoader(ds_va,
                                    batch_size=conf['batch_size'],
                                    shuffle=True,
                                    num_workers=2,
                                    drop_last=True),
                         gpu=gpu)

print('Train ds: {} samples'.format(len(ds_tr)))
print('Valid ds: {} samples'.format(len(ds_va)))

# Prepare training.
opt = torch.optim.Adam(model.parameters(), lr=conf['lr'])
Esempio n. 3
0
def compareOurs(path_conf: str, path_weight: str):
    '''
    It compute the stitching error and normal difference for the given model 
    with the given configurations.

    Parameters
    ----------
    path_conf : str
        path to the configuration file.
    path_weight : str
        path to the pretrained model.

    Returns
    -------
    stitchCriterion : list
        Stitiching loss.
    normalDifference : list
        normal difference.

    '''
    # load configuration and weight. check gpu state
    conf = helpers.load_conf(path_conf)
    trstate = torch.load(path_weight)
    gpu  = torch.cuda.is_available()
    
    # subfolder to save predicted point clouds
    folder2save = pjn( '/'.join(path_weight.split('/')[:-1]), 'prediction')
    if not os.path.isdir(folder2save):
        os.mkdir(folder2save)
    
    #### ONLY FOR EVALUATION ####    
    conf['loss_patch_area']        = True
    conf['show_overlap_criterion'] = True
    conf['overlap_threshold']    = 0.05
    conf['loss_smooth_surfaces'] = True
    conf['loss_patch_stitching'] = False
    conf['alpha_stitching']      = 0.001
    conf['show_analyticalNormalDiff'] = True
    conf['surface_normal']       = True
    conf['surface_varinace']     = True
    conf['knn_Global']           = 20
    conf['knn_Patch']            = 20
    conf['PredNormalforpatchwise'] = False
    #### ONLY FOR EVALUATION ####

    # resume pretrained model
    model = AtlasNetReimplEncImg(
        M=conf['M'], code= conf['code'], num_patches=conf['num_patches'],
        normalize_cw     = conf['normalize_cw'],
        freeze_encoder   = conf['enc_freeze'],
        enc_load_weights = conf['enc_weights'],
        dec_activ_fns    = conf['dec_activ_fns'],
        dec_use_tanh     = conf['dec_use_tanh'],
        dec_batch_norm   = conf['dec_batch_norm'],
        loss_scaled_isometry  = conf['loss_scaled_isometry'],
        loss_patch_areas      = conf['loss_patch_area'],           # zhantao 
        loss_smooth_surfaces  = conf['loss_smooth_surfaces'],      # zhantao
        loss_patch_stitching  = conf['loss_patch_stitching'],      # zhantao
        numNeighborGlobal     = conf['knn_Global'],                # zhantao
        numNeighborPatchwise  = conf['knn_Patch'],                 # zhantao
        alpha_scaled_isometry = conf['alpha_scaled_isometry'],
        alphas_sciso     = conf['alphas_sciso'], 
        alpha_scaled_surfProp = conf['alpha_surfProp'],            # zhantao
        alpha_stitching  = conf['alpha_stitching'],                # zhantao
        useSurfaceNormal   = conf['surface_normal'],               # zhantao
        useSurfaceVariance = conf['surface_varinace'],             # zhantao
        angleThreshold     = conf['angle_threshold']/180*np.pi,    # zhantao
        rejGlobalandPatch  = conf["reject_GlobalandPatch"],        # zhantao
        predNormalasPatchwise = conf['PredNormalforpatchwise'],    # zhantao
        overlap_criterion  = conf['show_overlap_criterion'],       # zhantao 
        overlap_threshold  = conf['overlap_threshold'],            # zhantao 
        enableAnaNormalErr = conf['show_analyticalNormalDiff'],    # zhantao
        marginSize       = conf['margin_size'],                    # zhantao
        gpu=gpu)

    model.load_state_dict(trstate['weights'])
    
    # using regular grid for evaluation
    model.sampler = FNSamplerRegularGrid( (0., 1.), (0., 1.), 
                                         model._num_patches * model._spp, 
                                         model._num_patches, gpu=gpu)
    
    # prepare data set
    K = np.loadtxt(conf['path_intrinsic_matrix'])   
    ds_va = ImgAndPcloudFromDmapAndNormalsSyncedDataset(
        conf['path_root'], conf['obj_seqs_te'], K, conf['N'], compute_area=True)
    dl_va = DataLoaderDevice(DataLoader(
        ds_va, batch_size=conf['batch_size'], shuffle=False, num_workers=2), gpu=gpu)
        
    # point cloud inference
    stitchCriterion = []
    normalDifference= []
    ConsistencyLoss = []
    overlapCriterion= []
    analyNormalError= []
    chamferDistance = []
    
    for bi, batch in enumerate(dl_va):
        
        it = bi
        model(batch['img'], it=it)
        
        losses = model.loss(batch['pc'], normals_gt=batch['normals'], areas_gt=batch['area'])
        
        stitchCriterion.append (losses['Err_stitching'].to('cpu'))
        normalDifference.append(losses['normalDiff'].to('cpu'))
        ConsistencyLoss.append (losses['L_surfProp'].detach().to('cpu'))
        overlapCriterion.append(losses['overlapCriterion'].to('cpu'))
        analyNormalError.append(losses['analyticalNormalDiff'].to('cpu'))
        chamferDistance.append (losses['loss_chd'].detach().to('cpu'))
        
        torch.cuda.empty_cache() 
                    
        # torch.save( model.pc_pred.detach().cpu(), pjn(folder2save, 'regularSample{}.pt'.format(bi))) 
    
    criterion  = torch.cat((torch.tensor(stitchCriterion) [:,None], 
                            torch.tensor(normalDifference)[:,None],
                            torch.tensor(ConsistencyLoss) [:,None],
                            torch.tensor(overlapCriterion)[:,None],
                            torch.tensor(analyNormalError)[:,None],
                            torch.tensor(chamferDistance) [:,None]), dim=1).numpy()
    
    # print(criterion)
    
    # save all results for reference
    error_file = open( pjn( folder2save,'regularSampleFull{}_errors.txt'.format(bi)), 'w')
    np.savetxt( error_file, 
                criterion, 
                delimiter=',', header = 'stitching_error, normalAngulardiff, consistency_loss, overlapCriterion, analyticalNorrmalAngularDiff, CHD', comments="#")
    error_file.close()
    
    # save the average error
    avgErr_file = open( pjn( folder2save,'regularSampleFull{}_avgErrors.txt'.format(bi)), 'w')
    avgError    = criterion.mean(axis = 0)
    avgError[3] = criterion[criterion[:,3] > 0, 3].mean()    # remove invalid cases before averaging
    np.savetxt( avgErr_file, 
                avgError, 
                delimiter=',', header = 'stitching_error, normalAngulardiff, consistency_loss, overlapCriterion, analyticalNorrmalAngularDiff, CHD', comments="#")
    avgErr_file.close()
        
    return stitchCriterion, normalDifference