Пример #1
0
def test_suite():
    from dataset.datasets import FullPartDatasetMenu
    from util.mesh.plots import plot_mesh
    ds = FullPartDatasetMenu.get('FaustPyProj')
    single_ldr = ds.loaders(
        s_nums=1000,
        s_shuffle=True,
        s_transform=[RandomGaussianNoise((0, 0.05), okeys=('gt_noise', ))],
        n_channels=6,
        method='rand_f2f',
        batch_size=1,
        device='cpu-single')
    for dp in single_ldr:
        dp['gt'] = dp['gt'].squeeze()
        gt = dp['gt']
        # mask = dp['gt_mask'][0]
        # gt_part = gt[mask, :]
        # trans = RandomTranslate(0.01, keys=['gt'])
        # print(trans)
        # v = gt_part[:, :3]
        # n = gt_part[:, 3:6]
        # _, f = trunc_to_vertex_mask(gt[:, :3], ds.faces(), mask)
        plot_mesh_montage(vb=[gt[:, :3], dp['gt_noise']],
                          fb=ds.faces(),
                          strategy='spheres')
        dp = trans(dp)
        v = gt[:, :3]
        n = gt[:, 3:6]
        plot_mesh(v=v, n=n, f=ds.faces(), strategy='mesh')
        break
Пример #2
0
def denoising_loader_set(hp):
    ds = FullPartDatasetMenu.get('DFaustPyProj')
    assert hp.in_channels == 3  # Normals are not valid after noise
    ldrs = ds.loaders(split=[0.8, 0.1, 0.1], s_nums=[10000, 1000, 1000], s_shuffle=[True] * 3,
                      s_transform=[Center(), RandomGaussianNoise((0, 0.05), okeys=('gt_noise',))],
                      batch_size=hp.batch_size, device=hp.dev,
                      n_channels=hp.in_channels, method='rand_f2f', s_dynamic=[True, False, False])
    ldrs[1], ldrs[2] = [ldrs[1]], [ldrs[2]]

    ds = FullPartDatasetMenu.get('AmassTrainPyProj')
    tv_ldrs = ds.loaders(split=[0.2, 0.8], s_nums=[1000, 1000],s_shuffle=[True] * 2,
                         s_transform=[Center(), RandomGaussianNoise((0, 0.05), okeys=('gt_noise',))],
                         batch_size=hp.batch_size, device=hp.dev,
                         n_channels=hp.in_channels,method='rand_f2f',  s_dynamic=[False, False])
    ldrs[1].append(tv_ldrs[0]), ldrs[2].append(tv_ldrs[1])

    return ldrs
Пример #3
0
def bring_in_test_data():
    from dataset.datasets import FullPartDatasetMenu
    from dataset.transforms import Center
    ds = FullPartDatasetMenu.get('FaustPyProj')
    samp = ds.sample(num_samples=5, transforms=[Center()],
                     method='f2p')  # dim:
    vb = samp['gt'][:, :, :3]
    f = torch.from_numpy(ds.faces()).long()
    return vb, f
Пример #4
0
def test_main():
    nn = F2PEncoderDecoder(parser())

    ds = FullPartDatasetMenu.get('DFaustPyProj')
    test_ldr = ds.loaders(s_nums=[1000, 1000], s_transform=[Center()], batch_size=nn.hp.batch_size,
                          device=nn.hp.dev, n_channels=nn.hp.in_channels, method='f2p')

    trainer = LightningTrainer(nn, [None, None, test_ldr])
    trainer.test()
    trainer.finalize()
def mixamo_loader_set(hp):
    # TODO - remember to change me path to Mixamo.
    ds_mixamo = FullPartDatasetMenu.get('MixamoPyProj', data_dir_override="Z:\ShapeCompletion\Mixamo")
    ldrs = ds_mixamo.loaders(split=[0.8, 0.1, 0.1], s_nums=[10000, 1000, 1000], s_shuffle=[True] * 3,
                             s_transform=[Center()] * 3, batch_size=hp.batch_size, device=hp.dev,
                             n_channels=hp.in_channels, method='rand_f2p', s_dynamic=[True, False, False])
    ldrs[1], ldrs[2] = [ldrs[1]], [ldrs[2]]

    ds = FullPartDatasetMenu.get('FaustPyProj')
    # MIXAMO is composed from Faust subjects - Do not use them in the test/validation due to contamination
    tv_ldrs = ds.loaders(split=[0.8,0.1,0.1], s_nums=[1000]*3, s_transform=[Center()] * 3,
                         batch_size=hp.batch_size, device=hp.dev, n_channels=hp.in_channels,
                         method='f2p', s_shuffle=[True] * 3, s_dynamic=[False]*3)
    ldrs[1].append(tv_ldrs[1]), ldrs[2].append(tv_ldrs[2])

    ds = FullPartDatasetMenu.get('DFaustPyProj')
    tv_ldrs = ds.loaders(split=[0.2, 0.8], s_nums=[1000, 1000], s_transform=[Center()] * 2,
                         batch_size=hp.batch_size, device=hp.dev, n_channels=hp.in_channels,
                         method='rand_f2p', s_shuffle=[True] * 2, s_dynamic=[False, False])
    ldrs[1].append(tv_ldrs[0]), ldrs[2].append(tv_ldrs[1])

    # ds = FullPartDatasetMenu.get('DFaustPyProjSeq',
    #                              data_dir_override=hp.PRIMARY_DATA_DIR / 'synthetic' / 'DFaustPyProj')
    # tv_ldrs = ds.loaders(split=[0.2, 0.8], s_nums=[1000, 1000], s_transform=[Center()] * 2,
    #                      batch_size=hp.batch_size, device=hp.dev, n_channels=hp.in_channels,
    #                      method='rand_f2p_seq', s_shuffle=[True] * 2, s_dynamic=[False, False])
    # ldrs[1].append(tv_ldrs[0]), ldrs[2].append(tv_ldrs[1])

    # ds = FullPartDatasetMenu.get('AmassValdPyProj')  # AmassTestPyProj sucks
    # tv_ldrs = ds.loaders(split=[0.2, 0.8], s_nums=[1000, 1000], s_transform=[Center()] * 2,
    #                      batch_size=hp.batch_size, device=hp.dev, n_channels=hp.in_channels,
    #                      method='rand_f2p', s_shuffle=[True] * 2, s_dynamic=[False, False])
    # ldrs[1].append(tv_ldrs[0]), ldrs[2].append(tv_ldrs[1])

    ds = FullPartDatasetMenu.get('AmassTrainPyProj')
    tv_ldrs = ds.loaders(split=[0.2, 0.8], s_nums=[1000, 1000], s_transform=[Center()] * 2,
                         batch_size=hp.batch_size, device=hp.dev, n_channels=hp.in_channels,
                         method='rand_f2p', s_shuffle=[True] * 2, s_dynamic=[False, False])
    ldrs[1].append(tv_ldrs[0]), ldrs[2].append(tv_ldrs[1])

    return ldrs
Пример #6
0
def visuals_tester():
    from dataset.datasets import FullPartDatasetMenu
    from dataset.transforms import Center

    ds = FullPartDatasetMenu.get('MixamoPyProj',
                                 data_dir_override="Z:\ShapeCompletion\Mixamo")
    samp = ds.sample(1, transforms=[Center()], n_channels=3,
                     method='rand_f2p')  # dim:
    gt = samp['gt'][0]
    mask = samp['gt_mask'][0]
    tp = samp['tp'][0]
    gt_part = gt[mask, :]

    plot_mesh_montage([tp, gt, gt_part],
                      strategy='spheres',
                      clr='lightblue',
                      grid_on=True)
Пример #7
0
def dataset_tutorial():
    # Use the menu to see which datasets are implemented
    print(FullPartDatasetMenu.which())
    ds = FullPartDatasetMenu.get(
        'FaustPyProj')  # This will fail if you don't have the data on disk
    # ds.validate_dataset()  # Make sure all files are available - Only run this once, to make sure.

    # For simplicity's sake, we support the old random dataloader as well:
    ldr = ds.rand_loader(num_samples=1000,
                         transforms=[Center()],
                         batch_size=16,
                         n_channels=6,
                         device='cpu-single',
                         mode='f2p')
    for point in ldr:
        print(point)
        break

    banner('The HIT')
    ds.report_index_tree(
    )  # Take a look at how the dataset is indexed - using the hit [HierarchicalIndexTree]

    banner('Collateral Info')
    print(f'Dataset Name = {ds.name()}')
    print(f'Number of indexed files = {ds.num_indexed()}')
    print(f'Number of full shapes = {ds.num_full_shapes()}')
    print(f'Number of projections = {ds.num_projections()}')
    print(f'Required disk space in bytes = {ds.disk_space()}')
    # You can also request a summary printout with:
    ds.data_summary(with_tree=False)  # Don't print out the tree again

    # For models with a single set of faces (SMPL or SMLR for example) you can request the face set/number of vertices
    # directly:
    banner('Face Array')
    print(ds.faces())
    print(ds.num_faces())
    print(ds.num_verts())
    # You can also ask for the null-shape the dataset - with hi : [0,0...,0]
    print(ds.null_shape(n_channels=6))
    ds.plot_null_shape(strategy='spheres', with_vnormals=True)

    # Let's look at the various sampling methods available to us:
    print(ds.defined_methods())
    # We can ask for a sample of the data with this sampling method:
    banner('Data Sample')
    samp = ds.sample(num_samples=2,
                     transforms=[Center(keys=['gt'])],
                     n_channels=6,
                     method='full')
    print(samp)  # Dict with gt_hi & gt
    print(ds.num_datapoints_by_method('full'))  # 100

    samp = ds.sample(num_samples=2,
                     transforms=[Center(keys=['gt'])],
                     n_channels=6,
                     method='part')
    print(samp)  # Dict with gt_hi & gt & gt_mask & gt_mask
    print(ds.num_datapoints_by_method('part'))  # 1000

    samp = ds.sample(num_samples=2,
                     transforms=[Center(keys=['gt'])],
                     n_channels=6,
                     method='f2p')
    print(samp)  # Dict with gt_hi & gt & gt_mask & gt_mask & tp
    print(ds.num_datapoints_by_method(
        'f2p'))  # 10000 tuples of (gt,tp) where the subjects are the same

    # # You can also ask for a simple loader, given by the ids you'd like to see.
    # # Pass ids = None to index the entire dataset, form point_cloud = 0 to point_cloud = num_datapoints_by_method -1
    banner('Loaders')
    single_ldr = ds.loaders(s_nums=1000,
                            s_shuffle=True,
                            s_transform=[Center()],
                            n_channels=6,
                            method='f2p',
                            batch_size=3,
                            device='cpu-single')
    for d in single_ldr:
        print(d)
        break

    print(single_ldr.num_verts())
    # There are also operations defined on the loaders themselves. See utils.torch_data for details

    # To receive train/validation splits or train/validation/test splits use:
    my_loaders = ds.loaders(split=[0.8, 0.1, 0.1],
                            s_nums=[2000, 1000, 1000],
                            s_shuffle=[True] * 3,
                            s_transform=[Center()] * 3,
                            global_shuffle=True,
                            method='p2p',
                            s_dynamic=[True, False, False])