コード例 #1
0
def main(cfg):
    for t in ['train_data', 'test_data']:
        the_dir = pjn(cfg.dataset.dataset_rootdir, f"part_{cfg.dataset.part}",
                      t)
        imgs_dir = pjn(the_dir, "images")
        annot_dir = pjn(the_dir, "ground-truth")
        check_consist_imgs_annots(imgs_dir, annot_dir)

        bn2points_dict = get_headpoints_dict(annot_dir)

        print(
            f"  Calling generate_density_maps_paral() for "
            f"part_{cfg.dataset.part} {t[:-5]}... ",
            end='',
            flush=True)

        if cfg.resources.num_proc == 1:
            # single-process mode for debugging
            dmaps_dict = {}
            generate_density_maps(bn2points_dict, dmaps_dict, imgs_dir, cfg)
        else:
            dmaps_dict = generate_density_maps_paral(bn2points_dict, imgs_dir,
                                                     cfg)

        print(f"Done")

        npz_name = f"density_maps_part_{cfg.dataset.part}_{t[:-5]}.npz"
        print(f"  Saving the file {npz_name}")
        np.savez(npz_name, **dmaps_dict)

        if t == 'test_data':
            xhp_dmaps_dict = xhp_density_maps(cfg.dataset.xhp_gt_dmaps_dir)
            compare_to_xhp_dmaps(dmaps_dict, xhp_dmaps_dict, cfg)
コード例 #2
0
def inferenceAll(conf_path: str, weightFolder: str):
    '''
    Given a folder where different weights are stored in different subfolders,
    this function loads these weight and conducts pointcloud inference one by
    one.

    Parameters
    ----------
    conf_path : str
        path to the configuration file.
    weightFolder : str
        path to the folder where each subfolders store different weights.
    numepoch : int, optional
        how many epoches would be used, i.e. how many pointcloud to be 
        inferenced. The default is 4.

    Returns
    -------
    None.

    '''
    for folder in sorted(glob.glob(pjn(weightFolder, '*'))):

        if len(glob.glob(pjn(folder, '*.tar'))) == 0:
            continue

        weightpath = glob.glob(pjn(folder, '*.tar'))[0]

        if conf_path is None:
            temp_path = glob.glob(pjn(folder, 'config.yaml'))[0]
            print(temp_path)
            compareOurs(temp_path, weightpath)
        else:
            compareOurs(conf_path, weightpath)
コード例 #3
0
def inferenceAll(conf_path: str, weightFolder : str, save_path : str = None, 
                 numepoch: int = 4):
    '''
    Given a folder where different weights are stored in different subfolders,
    this function loads these weight and conducts pointcloud inference one by
    one.

    Parameters
    ----------
    conf_path : str
        path to the configuration file.
    weightFolder : str
        path to the folder where each subfolders store different weights.
    save_path : str, optional
        where to save the inferenced pointcloud. The default is None meaning 
        that store the pointcloud to the same folder of the weight.
    numepoch : int, optional
        how many epoches would be used, i.e. how many pointcloud to be 
        inferenced. The default is 4.

    Returns
    -------
    None.

    '''
    for folder in sorted(glob.glob( pjn(weightFolder, '*'))):
        weightpath =  glob.glob( pjn(folder, '*.tar') )[0]
        
        if save_path == None:
            savePath = '/'.join(weightpath.split('/')[:-1])
            
        pcInference(conf_path, weightpath, savePath, numepoch)
コード例 #4
0
def calc_rgb_mean_train(cfg):
    """
    Calculate the mean pixel value for each color channel
    for all images in the train set.
    The mean values for ShanghaiTech are typically in range 
    90--115 (approximately).

    Args:
        cfg: the global configuration (hydra).

    Returns:
        ndarray of shape (3,) containing the per-channel mean pixel values.
    """
    common_dir = pjn(
        hydra.utils.get_original_cwd(),
        cfg.dataset.dataset_rootdir,
        f'part_{cfg.dataset.part}',
        'train_data',
    )
    imgs_dir = pjn(common_dir, 'images')
    assert os.path.isdir(imgs_dir)
    jpg_files = sorted(glob.glob(pjn(imgs_dir, "*.jpg")))

    rgb_sum_vals = np.zeros((3, ))
    num_pixels = 0.0

    for img_fpath in jpg_files:
        img_pil = Image.open(img_fpath)
        img_np = np.array(img_pil)
        h, w = img_np.shape[0:2]
        assert (w, h) == img_pil.size
        rgb_sum_vals += np.sum(img_np, axis=(0, 1))
        num_pixels += h * w

    return rgb_sum_vals / num_pixels
コード例 #5
0
def pcInference(path_conf: str, path_weight: str, path_save: str, numepoch: int = 1):
    '''
    It reconstructs and saves point clouds, given the path to the configuration
    file, pretrained weight and the path to save the inferenced point cloud. 
    
    This function has to be run under the training folder, i.e. where the 
    model.py and helpers.py are.

    Parameters
    ----------
    path_conf : str
        path to the configuration file for training.
    path_weight : str
        path to the pretrained weight.
    path_save : str
        where to save the inferenced point cloud.
    numepoch : int
        the number of batches to do point cloud inference.

    Returns
    -------
    None.

    '''
    # load configuration and weight. check gpu state
    conf = helpers.load_conf(path_conf)
    trstate = torch.load(path_weight)
    gpu  = torch.cuda.is_available()

    # resume pretrained model
    model = AtlasNetReimpl(
        M=conf['M'], code=conf['code'], num_patches=conf['num_patches'],
        normalize_cw=conf['normalize_cw'],
        freeze_encoder=conf['enc_freeze'],
        enc_load_weights=conf['enc_weights'],
        dec_activ_fns=conf['dec_activ_fns'],
        dec_use_tanh=conf['dec_use_tanh'],
        dec_batch_norm=conf['dec_batch_norm'],
        loss_scaled_isometry=conf['loss_scaled_isometry'],
        alpha_scaled_isometry=conf['alpha_scaled_isometry'],
        alphas_sciso=conf['alphas_sciso'], gpu=True)
    model.load_state_dict(trstate['weights'])
    
    # prepare data set
    ds_va = ShapeNet(
        conf['path_root_imgs'], conf['path_root_pclouds'],
        conf['path_category_file'], class_choice=conf['va_classes'], train=False,
        npoints=conf['N'], load_area=True)
    
    dl_va = DataLoaderDevice( DataLoader(
        ds_va, batch_size = conf['batch_size'], shuffle=False, num_workers=2,   # shuffle is turned off
        drop_last=True), gpu=gpu )
        
    # point cloud inference
    for e in range(numepoch):
        for bi, batch in enumerate(dl_va):
            model(batch['pcloud'])
            torch.save( model.pc_pred.detach().cpu(), pjn( path_save, 'pc{}.pt'.format(bi + e*conf['batch_size']) ) )
            torch.save( batch['pcloud'].cpu(), pjn( path_save, 'gtpc{}.pt'.format(bi + e*conf['batch_size']) ) )
コード例 #6
0
ファイル: MGN_augmentation.py プロジェクト: GentleDell/DEBOR
def save_offsets(offsets_hres, offsets_std, savePath):
    """save the given offsets to the given path."""
    check_folder(savePath)

    with open(pjn(savePath, 'offsets_hres.npy'), 'wb') as f:
        np.save(f, offsets_hres)
    with open(pjn(savePath, 'offsets_std.npy'), 'wb') as f:
        np.save(f, offsets_std)
コード例 #7
0
def io_fix():
    return dict(irs_res_f=pjn(data_dir, 'io', 'irs26173_graphite_res.nxs'),
                irs_red_f=pjn(data_dir, 'io', 'irs26176_graphite002_red.nxs'),
                q_values=(0.525312757876, 0.7291668809127, 0.9233951329944,
                          1.105593679447, 1.273206832528, 1.42416584459,
                          1.556455009584, 1.668282739099, 1.758225254224,
                          1.825094271503),
                dave=pjn(data_dir, 'io', 'dave_file.grp'))
コード例 #8
0
 def func(my_dmaps_dict_keys, xhp_dmaps_dict_keys, my_dmaps_dict,
          xhp_dmaps_dict, abs_path):
     for my_k, xhp_k in zip(my_dmaps_dict_keys, xhp_dmaps_dict_keys):
         dm1 = my_dmaps_dict[my_k]
         dm2 = xhp_dmaps_dict[xhp_k]
         skimage.io.imsave(pjn(abs_path, f"{xhp_k}_my.png"),
                           (dm1 / np.max(dm1) * 255).astype(np.uint8))
         skimage.io.imsave(pjn(abs_path, f"{xhp_k}_xhp.png"),
                           (dm2 / np.max(dm2) * 255).astype(np.uint8))
コード例 #9
0
ファイル: evaluate.py プロジェクト: wakaba130/S-DCNet
def main(cfg):
    orig_cwd = hydra.utils.get_original_cwd()
    print(f"  Evaluating the checkpoint "
          f"'{cfg.test.trained_ckpt_for_inference}'")
    loaded_struct = torch.load(
        pjn(orig_cwd, cfg.test.trained_ckpt_for_inference))

    cfg.train.train_val_split = 1.0
    # ^ associate all of the train data with the train_loader below
    #   (do not split the train data into train + validation)
    train_loader, _, test_loader = train.get_dataloaders(cfg, (1, 0, 1))

    interval_bounds, label2count_list = make_label2count_list(cfg)
    model = SDCNet(label2count_list,
                   cfg.model.supervised,
                   load_pretr_weights_vgg=False)
    model.load_state_dict(loaded_struct['model_state_dict'], strict=True)

    additional_cfg = {'device': None}
    if not cfg.resources.disable_cuda and torch.cuda.is_available():
        additional_cfg['device'] = torch.device('cuda')
        model = model.cuda()
    else:
        additional_cfg['device'] = torch.device('cpu')

    optimizer = None

    trainer = train.TrainManager(
        model,
        optimizer,
        cfg,
        additional_cfg,
        train_loader=None,
        val_loader=None,
    )

    print()
    datadir = pjn(cfg.dataset.dataset_rootdir, f"part_{cfg.dataset.part}")
    print(f"  Evaluating on the (whole) train data and on the test data "
          f"(in '{datadir}')")

    mae_train, mse_train = trainer.validate(train_loader)
    print(f"  Metrics on the (whole) train data: "
          f"MAE: {mae_train:.2f}, MSE: {mse_train:.2f}")

    mae_test, mse_test = trainer.validate(test_loader)
    print(f"  Metrics on the test data:          "
          f"MAE: {mae_test:.2f}, MSE: {mse_test:.2f}")

    if cfg.test.visualize:
        vis_dir_name = f"visualized_part_{cfg.dataset.part}_test_set_predictions"
        vis_dir_print = pjn(os.path.relpath(os.getcwd(), orig_cwd),
                            vis_dir_name)
        print(
            f"  Visualized predictions are being saved to '{vis_dir_print}':")
        visualize_predictions(cfg, model, test_loader, vis_dir_name)
コード例 #10
0
def get_headpoints_dict(annot_dir):
    """
    Load the '*.mat' files from the annotation directory
    and convert their contents (coordinates of the head points)
    to the {basename: numpy ndarray} dictionary (OrderedDict()).
    """
    mat_files = sorted(glob.glob(pjn(annot_dir, "*.mat")))
    mat_basenames = [
        os.path.splitext(os.path.split(f)[1])[0] for f in mat_files
    ]

    basename2headpoints_dict = OrderedDict()

    for f, bn in zip(mat_files, mat_basenames):
        mat = scipy.io.loadmat(f)
        numpy_void_obj = mat['image_info'][0][0][0][0]
        headpoints = numpy_void_obj[0]
        num_headpoints = numpy_void_obj[1][0][0]
        assert headpoints.shape[0] == num_headpoints, \
            "number of headpoints entries != specified " \
            "total number of headpoints"
        assert headpoints.shape[1] == 2, \
            "<2 or >2 coordinate values for one headpoint entry"
        basename2headpoints_dict[bn] = headpoints

    return basename2headpoints_dict
コード例 #11
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
                  (file in 'reference data' folder) """
        # load reference data
        ref_data = numpy.loadtxt(
            pjn(data_dir, "equivalent_sites_circle_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        actual_data = numpy.column_stack([
            w,
            QENSmodels.sqwEquivalentSitesCircle(w,
                                                q,
                                                scale=.01,
                                                center=0.5,
                                                Nsites=3,
                                                radius=100.0,
                                                resTime=10.)
        ])
        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=12)
コード例 #12
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
        (file in 'reference data' folder)
        """

        # load reference data
        ref_data = numpy.loadtxt(
            pjn(data_dir, "gaussian_model_3d_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        actual_data = numpy.column_stack([
            w,
            QENSmodels.sqwGaussianModel3D(w,
                                          q,
                                          scale=5.,
                                          center=0.5,
                                          D=1.,
                                          variance_ux=1.)
        ])

        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=10)
コード例 #13
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
        (file in 'reference data' folder)
        """
        # load reference data
        ref_data = numpy.loadtxt(
            pjn(data_dir, "jump_sites_log_norm_dist_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        actual_data = numpy.column_stack([
            w,
            QENSmodels.sqwJumpSitesLogNormDist(w,
                                               q,
                                               scale=2,
                                               center=0.8,
                                               Nsites=7,
                                               radius=5,
                                               resTime=2,
                                               sigma=0.6)
        ])

        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=12)
    def test_reference_data(self):
        """ Test output values in comparison with reference data
                  (file in 'reference data' folder) """

        # load reference data
        ref_data = numpy.loadtxt(
            pjn(data_dir, "isotropic_rotational_diffusion_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        actual_data = numpy.column_stack([
            w,
            QENSmodels.sqwIsotropicRotationalDiffusion(w,
                                                       q,
                                                       scale=1.0,
                                                       center=0.0,
                                                       radius=2.0,
                                                       DR=0.05)
        ])

        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=11)
コード例 #15
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
                  (file in 'reference data' folder) """

        # load reference data
        ref_data = numpy.loadtxt(
            pjn(data_dir, "jump_translational_diffusion_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        actual_data = numpy.column_stack([
            w,
            QENSmodels.sqwJumpTranslationalDiffusion(w,
                                                     q,
                                                     scale=1,
                                                     center=0,
                                                     D=0.23,
                                                     resTime=1.25)
        ])
        # compare the 2 arrays
        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=12)
コード例 #16
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
        (file in 'reference data' folder)
        """

        # load reference data
        ref_data = numpy.loadtxt(
            pjn(data_dir, "delta_two_lorentz_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        output = numpy.column_stack([
            w,
            QENSmodels.sqwDeltaTwoLorentz(w,
                                          q,
                                          scale=1.,
                                          center=0,
                                          A0=0.01,
                                          A1=0.4,
                                          hwhm1=0.25,
                                          hwhm2=0.75)
        ])

        # compare the 2 arrays
        numpy.testing.assert_array_almost_equal(ref_data, output, decimal=12)
コード例 #17
0
def meshToOBJ(pathToPc: str, numPatch: int, folderToSave: str):
    '''
    It converts the point cloud in the given path which is generated from 
    regularly sampled uv points to a ONJ file. This file could be opened 
    and rendered by softwares like blender.

    Parameters
    ----------
    pathToPc : str
        The path to the point cloud.
    numPatch : int
        The number of patches in each point cloud.
    folderToSave : str
        Where to save the generated OBJ file.

    Returns
    -------
    None.

    '''
    vertex = torch.load(pathToPc)
    facets = genTrifacet(vertex.shape[1], numPatch).int()
    batches = vertex.shape[0]

    prefix = pathToPc.split('/')[-1][:-3]

    for ct in range(batches):

        OBJ = mesh2obj(vertex[ct], facets, numPatch)

        with open(pjn(folderToSave, prefix + '_object{:d}.obj'.format(ct)),
                  'w') as f:
            f.writelines(OBJ)
コード例 #18
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
           (file in 'reference data' folder) """

        # load reference data
        ref_data = \
            numpy.loadtxt(pjn(data_dir, "water_teixeira_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        q = 0.7
        actual_data = numpy.column_stack([
            w,
            QENSmodels.sqwWaterTeixeira(w,
                                        q,
                                        scale=1,
                                        center=0,
                                        D=1,
                                        resTime=1,
                                        radius=1,
                                        DR=1)
        ])
        # compare the 2 arrays
        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=13)
コード例 #19
0
ファイル: vis_util.py プロジェクト: GentleDell/DEBOR
def vis_subjectFromPath(path_object: str, path_SMPLmodel: str, 
                        path_bodyMesh: str, is_hres = False):
    '''
    This function visualize object file in the given path.

    Parameters
    ----------
    path_object : str
        Path to the folder constaining the object.
    path_SMPLmodel : str
        Path to the SMPL model .pkl file.

    Returns
    -------
    None.

    '''
    assert isfile(path_SMPLmodel), 'SMPL model not found.'
    assert isdir(path_object), 'the path to object folder is invalid.'
    assert isfile(path_bodyMesh) , 'reference mesh not found.'
    assert ('smpl_registered' in path_bodyMesh) == is_hres, \
        'reference mesh has different resolution from that specified by is_hres.'
    
    dp = SmplPaths()
    SMPL = Smpl( dp.get_hres_smpl_model_data() if is_hres else dp.get_smpl_file() )
    
    # read pose
    regfile = pjn(path_object, 'registration.pkl')    # smpl model params
    smplpara = pickle.load(open(regfile, 'rb'),  encoding='iso-8859-1')
    
    # read offsets
    offsets_t = np.load( pjn(path_object, 'gt_offsets/offsets_%s.npy')%\
                      (['std', 'hres'][is_hres]) )
    
    # prepare texture
    tex_path = pjn(path_object, 'registered_tex.jpg')
    
    vis_subjectFromData_psbody(
        SMPL, 
        offsets_t, 
        smplpara['pose'],
        smplpara['betas'], 
        smplpara['trans'],
        path_bodyMesh, 
        tex_path, 
        is_hres)
コード例 #20
0
ファイル: train_structure.py プロジェクト: GentleDell/DEBOR
    def save_sample(self, data, prediction, ind = 0, saveTo = 'test'):
        """Saving a sample for visualization and comparison"""
        
        assert saveTo in ('test', 'train'), 'save to train or test folder'
        
        folder = pjn(self.options.summary_dir, '%s/'%(saveTo))
        _input = pjn(folder, 'input_image.png')
        # batchs = self.options.batch_size
        
        # save the input image, if not saved
        if not isfile(_input):
            plt.imsave(_input, data['img'][ind].cpu().permute(1,2,0).clamp(0,1).numpy())
            
        # overlap the prediction to the real image; as the mesh .obj has diff 
        # ft/uv coord from MPI lib and MGN, we flip the predicted texture.
        save_renderer = simple_renderer(batch_size = 1)
        predTrans = torch.stack(
            [prediction['theta'][ind, 1],
             prediction['theta'][ind, 2],
             2 * 1000. / (224. * prediction['theta'][ind, 0] + 1e-9)], dim=-1)
        tex = prediction['tex_image'][ind].flip(dims=(0,))[None]
        pred_img = save_renderer(
            verts = prediction['verts'][ind][None],
            faces = self.faces[ind][None],
            verts_uvs = self.smpl_verts_uvs[None],
            faces_uvs = self.smpl_tri_ind[None],
            tex_image = tex,
            R = torch.eye(3)[None].to('cuda'),
            T = predTrans,
            f = torch.ones([1,1]).to('cuda')*1000,
            C = torch.ones([1,2]).to('cuda')*112,
            imgres = 224)
        
        overlayimg = 0.9*pred_img[0,:,:,:3] + 0.1*data['img'][ind].permute(1,2,0)
        save_path = pjn(folder,'overlay_%s_iters%d.png'%(data['imgname'][ind].split('/')[-1][:-4], self.step_count))
        plt.imsave(save_path, (overlayimg.clamp(0, 1)*255).cpu().numpy().astype('uint8'))
        save_path = pjn(folder,'tex_%s_iters%d.png'%(data['imgname'][ind].split('/')[-1][:-4], self.step_count))
        plt.imsave(save_path, (pred_img[0,:,:,:3].clamp(0, 1)*255).cpu().numpy().astype('uint8'))
        save_path = pjn(folder,'unwarptex_%s_iters%d.png'%(data['imgname'][ind].split('/')[-1][:-4], self.step_count))
        plt.imsave(save_path, (prediction['unwarp_tex'][ind].clamp(0, 1)*255).cpu().numpy().astype('uint8'))
        save_path = pjn(folder,'predtex_%s_iters%d.png'%(data['imgname'][ind].split('/')[-1][:-4], self.step_count))
        plt.imsave(save_path, (prediction['tex_image'][ind].clamp(0, 1)*255).cpu().numpy().astype('uint8'))


        # create predicted posed undressed body vetirces       
        offPred_t  = (prediction['verts_disp'][ind]*self.dispPara[1]+self.dispPara[0]).cpu()[None,:] 
        predDressbody = create_smplD_psbody(
            self.smplD, offPred_t, 
            prediction['theta'][ind][3:75][None].cpu(), 
            prediction['theta'][ind][75:][None].cpu(), 
            0, 
            rtnMesh=True)[1]
                            
        # Create meshes and save 
        savepath = pjn(folder,'%s_iters%d.obj'%\
                       (data['imgname'][ind].split('/')[-1][:-4], self.step_count))
        predDressbody.write_obj(savepath)
コード例 #21
0
def check_consist_imgs_annots(imgs_dir, annot_dir):
    """
    Check the correspondence between the images and annotations.
    `imgs_dir` must contain '*.jpg' files, `annot_dir` must contain 
    the same number of '*.mat' files, the basenames of the files must
    differ only by the leading 'GT_' substring
    (in the '*.mat' file basenames).
    """
    if not os.path.isdir(imgs_dir):
        raise FileNotFoundError(f"images directory '{imgs_dir}' is not found")

    jpg_files = sorted(glob.glob(pjn(imgs_dir, "*.jpg")))
    if not jpg_files:
        raise FileNotFoundError(
            f"directory '{imgs_dir}' contains no '*.jpg' files")

    jpg_basenames = [
        os.path.splitext(os.path.split(f)[1])[0] for f in jpg_files
    ]

    if not os.path.isdir(annot_dir):
        raise FileNotFoundError(
            f"annotations directory '{annot_dir}' is not found")

    mat_files = sorted(glob.glob(pjn(annot_dir, "*.mat")))
    if not mat_files:
        raise FileNotFoundError(
            f"directory '{annot_dir}' contains no '*.mat' files")

    mat_basenames = [
        os.path.splitext(os.path.split(f)[1])[0] for f in mat_files
    ]

    assert len(jpg_basenames) == len(mat_basenames), \
        "different number of image files and annotation files"

    corresp_basenames = [
        (bn_mat == "GT_" + bn_jpg)
        for bn_jpg, bn_mat in zip(jpg_basenames, mat_basenames)
    ]
    assert all(corresp_basenames), \
        "image and ground truth file basenames are not consistent"
コード例 #22
0
def main(cfg):
    orig_cwd = hydra.utils.get_original_cwd()
    print(f"  Exporting the checkpoint "
          f"'{cfg.test.trained_ckpt_for_inference}'")
    ckpt_path = pjn(orig_cwd, cfg.test.trained_ckpt_for_inference)
    loaded_struct = torch.load(ckpt_path)

    interval_bounds, label2count_list = make_label2count_list(cfg)
    model = SDCNet(label2count_list,
                   cfg.model.supervised,
                   load_pretr_weights_vgg=False)
    model.load_state_dict(loaded_struct['model_state_dict'], strict=True)

    batch_size = 1
    x = torch.randn(batch_size, 3, 64 * 1, 64 * 1, requires_grad=False)

    if not cfg.resources.disable_cuda and torch.cuda.is_available():
        model = model.cuda()
        x = x.cuda()

    p1, ext = os.path.splitext(ckpt_path)
    dir_, bname = os.path.split(p1)

    try:
        torch.onnx.export(model, x, p1 + ".onnx", opset_version=13)
    except:
        print("  (!) Failed to export the checkpoint to ONNX format")
    else:
        print("  Successfully exported the checkpoint to ONNX format")

    try:
        traced_script_module = torch.jit.trace(model, x)
        traced_script_module.save(bname + "_jit_trace.pt")
    except:
        print(
            "  (!) Failed to export the checkpoint to 'torch jit trace' format"
        )
    else:
        print(
            "  Successfully exported the checkpoint to 'torch jit trace' format"
        )

    try:
        script_module = torch.jit.script(model)
        script_module.save(bname + "_jit_script.pt")
    except:
        print(
            "  (!) Failed to export the checkpoint to 'torch jit script' format"
        )
    else:
        print(
            "  Successfully exported the checkpoint to 'torch jit script' format"
        )
コード例 #23
0
ファイル: inference.py プロジェクト: dmburd/S-DCNet
def main(cfg):
    orig_cwd = hydra.utils.get_original_cwd()
    print(f"  Running inference using checkpoint "
          f"'{cfg.test.trained_ckpt_for_inference}'")
    ckpt_path = pjn(orig_cwd, cfg.test.trained_ckpt_for_inference)
    loaded_struct = torch.load(ckpt_path)

    interval_bounds, label2count_list = make_label2count_list(cfg)
    model = SDCNet(label2count_list,
                   cfg.model.supervised,
                   load_pretr_weights_vgg=False)
    model.load_state_dict(loaded_struct['model_state_dict'], strict=True)

    if not cfg.resources.disable_cuda and torch.cuda.is_available():
        model = model.cuda()

    if cfg.test.visualize:
        vis_dir_print = pjn(os.path.relpath(os.getcwd(), orig_cwd),
                            'visualized_predictions')
        print(f"  Visualized predictions are being saved to '{vis_dir_print}'")

    get_predictions(cfg, model)
コード例 #24
0
ファイル: render_util.py プロジェクト: GentleDell/DEBOR
def getMaterialPath(bodyRoot: str):
    '''
    This function stores paths to the materials of the MGN dataset as a list
    of dict.
    
    Members:
    ------
    sampleRootList: list
        the root paths to the objects of MGN.
    meshList: list 
        list of meshes of MGN.
    scanList: list 
        list of scans of MGN.
    meshtextureList: list 
        list of textures of meshes of MGN.
    scantextureList: list 
        list of texture of meshes of MGN.
        
    Returns:
    -------
    List
        A list of dictionary.
    
    '''
    
    assert os.path.isdir(bodyRoot), "The given folder does not exist."
    
    output = []    
    
    for sample in sorted(glob( pjn(bodyRoot, '*') )):
        sample = os.path.abspath(sample)
        sampleDict = {"sampleRootPath": sample,
                      "smpl_registered_path": pjn(sample, 'smpl_registered.obj'),
                      "smpl_registered_texturePath": pjn(sample, 'registered_tex.jpg')
                      }
        output.append(sampleDict)
        
    return output
コード例 #25
0
def prepareData(cfgs: dict):
    '''
    This function prepares and stores paths, cameras and lights for rendering.

    Parameters
    ----------
    cfgs : dict
        The configuration for rendering.

    Returns
    -------
    None.

    '''
    # get material paths
    MGN_data = getMaterialPath(cfgs['datarootMGN'])

    # create camera list
    cameraList = easyCameras(numCircCameras=cfgs['numCircCameras'],
                             heights=cfgs['camera_heights'],
                             resolution=cfgs['camera_resolution'],
                             horiz_distance=cfgs['camera_horiz_distance'])

    # create light list
    lightList = easyLights(numLight=cfgs['numLight'],
                           power=cfgs['light_power'],
                           heights=cfgs['light_heights'],
                           initAngle=cfgs['light_initAngle'],
                           horiz_distance=cfgs['light_horiz_distance'])

    # save data to pickle
    with open(pjn(cfgs['cachePath'], 'MGN_data.pickle'), 'wb') as handle:
        pickle.dump(MGN_data, handle)
    with open(pjn(cfgs['cachePath'], 'cameras.pickle'), 'wb') as handle:
        pickle.dump(cameraList, handle)
    with open(pjn(cfgs['cachePath'], 'lights.pickle'), 'wb') as handle:
        pickle.dump(lightList, handle)
コード例 #26
0
ファイル: helper_dataset.py プロジェクト: GentleDell/DEBOR
def compute_offset_tPose(smpl,
                         garPath,
                         thresholds,
                         num_separations,
                         enableVis=False):
    """compute the per-vertex offsets in t-pose using the normal guided method."""

    ## Get the original body mesh of the garment
    garFolder = '/'.join(garPath.split('/')[:-1])
    orig_body = pkl.load(open(pjn(garFolder, 'registration.pkl'), 'rb'),
                         encoding='iso-8859-1')

    smpl = smplFromParas(smpl, np.zeros_like(smpl.r), 0, orig_body['betas'], 0)
    garment_org_body_unposed = Mesh(smpl.r, smpl.f)

    ## Get the original garment
    garment_unposed = Mesh(filename=garPath)

    ## remove interpenetration
    garment_unposed_interp = remove_interpenetration_fast(
        garment_unposed, garment_org_body_unposed)

    ## compute normal guided displacement
    body_normal = Tensor(garment_org_body_unposed.estimate_vertex_normals())

    offsets_tPose = computeNormalGuidedDiff(
        Tensor(garment_org_body_unposed.v),
        body_normal,
        Tensor(garment_unposed_interp.v),
        Tensor(garment_unposed_interp.f.copy().astype('int32')).long(),
        num_separation=num_separations,
        max_displacement=thresholds['max_offsets'])

    offsets_tPose = meshDisplacementFilter(
        Tensor(offsets_tPose),
        Tensor(garment_org_body_unposed.f.copy().astype('int32')).long(),
        filterThres=thresholds['offsets_check_threshold'],
        diffThreshold=thresholds['max_diffOffsets'])

    if enableVis:
        print('show mesh in compute_offset_tPose().')
        v = garment_org_body_unposed.v + offsets_tPose
        body = Mesh(v, garment_org_body_unposed.f)

        mvs = MeshViewers((1, 1))
        mvs[0][0].set_static_meshes([body])

    return offsets_tPose
コード例 #27
0
    def test_reference_data(self):
        """ Test output values in comparison with reference data
                   (file in 'reference data' folder) """

        # load reference data
        ref_data = numpy.loadtxt(pjn(data_dir, "delta_ref_data.dat"))

        # generate data from current model
        # for info: the parameters' values used for the reference data are
        # specified in the README file in the 'reference data' folder
        w = numpy.arange(-2, 2.01, 0.01)
        actual_data = numpy.column_stack(
            [w, QENSmodels.delta(w, scale=3.3, center=0)])

        # compare the 2 arrays
        numpy.testing.assert_array_almost_equal(ref_data,
                                                actual_data,
                                                decimal=10)
コード例 #28
0
ファイル: train.py プロジェクト: dmburd/S-DCNet
def main(cfg):
    """
    Create data loaders, the model instance, optimizer and TrainManager()
    object. Run the training process.
    """
    orig_cwd = hydra.utils.get_original_cwd()
    train_loader, val_loader, test_loader = get_dataloaders(cfg, (1, 1, 0))
    interval_bounds, label2count_list = make_label2count_list(cfg)
    
    model = SDCNet(
        label2count_list,
        cfg.model.supervised,
        load_pretr_weights_vgg=True)

    if cfg.train.pretrained_ckpt:
        print(f"  Using pretrained model and its checkpoint "
              f"'{cfg.train.pretrained_ckpt}'")
        loaded_struct = torch.load(pjn(orig_cwd, cfg.train.pretrained_ckpt))
        model.load_state_dict(loaded_struct['model_state_dict'], strict=True)
    
    additional_cfg = {'device': None}
    if not cfg.resources.disable_cuda and torch.cuda.is_available():
        additional_cfg['device'] = torch.device('cuda')
        model = model.cuda()
    else:
        additional_cfg['device'] = torch.device('cpu')

    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=cfg.train.lr_schedule.lr_init,
        momentum=cfg.train.optimizer.momentum,
        weight_decay=cfg.train.optimizer.weight_decay)

    additional_cfg['tbx_wrtr_dir'] = os.getcwd()
    with SummaryWriter(additional_cfg['tbx_wrtr_dir']) as tbx_wrtr:
        additional_cfg['tbx_wrtr'] = tbx_wrtr
        trainer = TrainManager(
            model,
            optimizer,
            cfg,
            additional_cfg,
            train_loader=train_loader,
            val_loader=val_loader)
        trainer.train()
コード例 #29
0
def xhp_density_maps(xhp_dir):
    """
    Extract the density maps provided in the official reposity
    https://github.com/xhp-hust-2018-2011/S-DCNet

    Args:
        xhp_dir: Directory containing '*.mat' files.

    Returns:
        bname2dmap_dict: Dictionary containing the mapping between the 
        basenames and density maps from the official repository.
    """
    if not xhp_dir:
        print("--xhp-dir was not specified at the command line")
        return

    if not os.path.isdir(xhp_dir):
        print(f"  Directory '{xhp_dir}' (that is expected to contain "
              f"xhp density maps) not found")
        return

    xhp_mat_files = sorted(glob.glob(pjn(xhp_dir, "*.mat")))
    if not xhp_mat_files:
        print(f"  Directory '{xhp_dir}' (that is expected to contain "
              f"xhp density maps) contains no '*.mat' files")
        return

    xhp_mat_bnames = [
        os.path.splitext(os.path.split(f)[1])[0] for f in xhp_mat_files
    ]
    bname2dmap_dict = OrderedDict()

    for fpath, bn in zip(xhp_mat_files, xhp_mat_bnames):
        mat = scipy.io.loadmat(fpath)
        bname2dmap_dict[bn] = mat['density_map']

    return bname2dmap_dict
コード例 #30
0
import os
import sys
import unittest
import numpy
from os.path import join as pjn

import QENSmodels

# resolve path to reference_data
this_module_path = sys.modules[__name__].__file__
data_dir = pjn(os.path.dirname(this_module_path), 'reference_data')


class TestJumpsSitesLogNorm(unittest.TestCase):
    """ Tests QENSmodels.jump_sites_log_norm_dist function"""
    def test_size_hwhm_jump_sites_log_norm(self):
        """ Test size of output of hwhmJumpSitesLogNormDist
        The output should contains 3 elements
        """
        self.assertEqual(len(QENSmodels.hwhmJumpSitesLogNormDist(1.)), 3)

        self.assertEqual(len(QENSmodels.hwhmJumpSitesLogNormDist([1., 2.])), 3)

    def test_type_size_hwhm_jump_sites_log_norm_q_nb(self):
        """ Tests type and size of outputs if input q is a float """
        hwhm, eisf, qisf = QENSmodels.hwhmJumpSitesLogNormDist(1.)
        self.assertIsInstance(hwhm, numpy.ndarray)
        self.assertIsInstance(eisf, numpy.ndarray)
        self.assertIsInstance(qisf, numpy.ndarray)

        self.assertEqual(hwhm.shape, (1, 3, 21))