Exemplo n.º 1
0
def prepare_data_loaders():

    if not os.path.exists(BPS_CACHE_FILE):
        # load modelnet point clouds
        xtr, ytr, xte, yte = load_modelnet40(root_data_dir=DATA_PATH)

        # this will normalise your point clouds and return scaler parameters for inverse operation
        xtr_normalized = bps.normalize(xtr)
        xte_normalized = bps.normalize(xte)

        # this will encode your normalised point clouds with random basis of 512 points,
        # each BPS cell containing l2-distance to closest point
        print("converting data to BPS representation..")
        print("number of basis points: %d" % N_BPS_POINTS)
        print("BPS sampling radius: %f" % BPS_RADIUS)

        print("converting train..")
        xtr_bps = bps.encode(xtr_normalized,
                             bps_arrangement='grid',
                             n_bps_points=N_BPS_POINTS,
                             radius=BPS_RADIUS,
                             bps_cell_type='deltas')
        xtr_bps = xtr_bps.reshape([-1, 32, 32, 32, 3])

        print("converting test..")
        xte_bps = bps.encode(xte_normalized,
                             bps_arrangement='grid',
                             n_bps_points=N_BPS_POINTS,
                             radius=BPS_RADIUS,
                             bps_cell_type='deltas')

        xte_bps = xte_bps.reshape([-1, 32, 32, 32, 3])

        print("saving cache file for future runs..")
        np.savez(BPS_CACHE_FILE, xtr=xtr_bps, ytr=ytr, xte=xte_bps, yte=yte)

    else:
        print("loading converted data from cache..")
        data = np.load(BPS_CACHE_FILE)
        xtr_bps = data['xtr']
        ytr = data['ytr']
        xte_bps = data['xte']
        yte = data['yte']

    xtr_bps = xtr_bps.transpose(0, 4, 2, 3, 1)
    dataset_tr = pt.utils.data.TensorDataset(pt.Tensor(xtr_bps),
                                             pt.Tensor(ytr[:, 0]).long())
    tr_loader = pt.utils.data.DataLoader(dataset_tr,
                                         batch_size=64,
                                         shuffle=True)

    xte_bps = xte_bps.transpose(0, 4, 2, 3, 1)
    dataset_te = pt.utils.data.TensorDataset(pt.Tensor(xte_bps),
                                             pt.Tensor(yte[:, 0]).long())
    te_loader = pt.utils.data.DataLoader(dataset_te,
                                         batch_size=64,
                                         shuffle=True)

    return tr_loader, te_loader
Exemplo n.º 2
0
def get_alignment(x_scan, ckpt_path):
    """
    Predict alignment with a pre-trained model given pre-processed scan

    Parameters
    ----------
    x_scan: numpy array [n_sample_points, 3]
        preprocessed scan (downsampled and denoised)
    ckpt_path: str
        path to model checkpoint

    Returns
    -------
    x_align: numpy array [6890, 3]
        predicted SMPL mesh vertices

    """
    x_norm, x_mean, x_max = bps.normalize(x_scan.reshape([1, -1, 3]),
                                          max_rescale=False,
                                          return_scalers=True,
                                          verbose=False)
    x_bps = bps.encode(x_norm,
                       radius=BPS_RADIUS,
                       n_bps_points=N_BPS_POINTS,
                       bps_cell_type='dists',
                       verbose=False)

    model = MeshRegressorMLP(n_features=N_BPS_POINTS)
    model.load_state_dict(torch.load(ckpt_path, map_location='cpu'))
    model.eval()

    x_align = model(torch.Tensor(x_bps)).detach().numpy()
    x_align /= MESH_SCALER
    x_align += x_mean

    return x_align[0]
Exemplo n.º 3
0
def prepare_data_loaders(n_parts=2):

    if n_parts == 1:
        APTBPS_CACHE_FILE = os.path.join(DATA_PATH, 'aptbps_mlp_data.npz')
    else:
        file_name = str(n_parts) + 'aptbps_mlp_data.npz'
        APTBPS_CACHE_FILE = os.path.join(DATA_PATH, file_name)

    if not os.path.exists(BPS_CACHE_FILE):

        # load modelnet point clouds
        xtr, ytr, xte, yte = load_modelnet40(root_data_dir=DATA_PATH)

        # this will normalise your point clouds and return scaler parameters for inverse operation
        xtr_normalized = bps.normalize(xtr)
        xte_normalized = bps.normalize(xte)

        # this will encode your normalised point clouds with random basis of 512 points,
        # each BPS cell containing l2-distance to closest point
        print("converting data to BPS representation..")
        print("number of basis points: %d" % N_BPS_POINTS)
        print("BPS sampling radius: %f" % BPS_RADIUS)
        print("converting train..")
        xtr_bps = aptbps.adaptive_encode(xtr_normalized,
                                         n_parts=n_parts,
                                         n_bps_points=N_BPS_POINTS,
                                         bps_cell_type='dists',
                                         radius=BPS_RADIUS)
        print("converting test..")
        xte_bps = aptbps.adaptive_encode(xte_normalized,
                                         n_parts=n_parts,
                                         n_bps_points=N_BPS_POINTS,
                                         bps_cell_type='dists',
                                         radius=BPS_RADIUS)
        print("conversion finished. ")
        print("saving cache file for future runs..")

        np.savez(APTBPS_CACHE_FILE, xtr=xtr_bps, ytr=ytr, xte=xte_bps, yte=yte)

    else:
        print("loading converted data from cache..")
        data = np.load(APTBPS_CACHE_FILE)
        xtr_bps = data['xtr']
        ytr = data['ytr']
        xte_bps = data['xte']
        yte = data['yte']

    dataset_tr = pt.utils.data.TensorDataset(pt.Tensor(xtr_bps),
                                             pt.Tensor(ytr[:, 0]).long())
    print("----")
    print(dataset_tr)
    train_loader = pt.utils.data.DataLoader(dataset_tr,
                                            batch_size=512,
                                            shuffle=True)
    print("---")
    print(train_loader)

    dataset_te = pt.utils.data.TensorDataset(pt.Tensor(xte_bps),
                                             pt.Tensor(yte[:, 0]).long())
    test_loader = pt.utils.data.DataLoader(dataset_te,
                                           batch_size=512,
                                           shuffle=True)

    return train_loader, test_loader