コード例 #1
0
    beta = 100

    partial_input = True
    data_completeness = 0.7
    data_sparsity = 100

    try:
        volume = np.load(
            'sdf' + save_fold +
            '/sdf_{}_{}_{}.npy'.format(split, checkpoint, conditioned_ind))
    except FileNotFoundError:
        volume = None

    if volume is None:
        DATA_PATH = 'data/ShapeNet'
        fields = {'inputs': dataset.PointCloudField('pointcloud.npz')}
        category = ['02958343']
        test_dataset = dataset.ShapenetDataset(
            dataset_folder=DATA_PATH,
            fields=fields,
            categories=category,
            split=split,
            partial_input=partial_input,
            data_completeness=data_completeness,
            data_sparsity=data_sparsity,
            evaluation=True)

        # conditioned_input = test_dataset.__getitem__(conditioned_ind)['points'].unsqueeze(0)
        ds_kitti = dataset.KITTI360Dataset('data/KITTI-360/data_3d_pointcloud',
                                           'train',
                                           'building',
コード例 #2
0
            for k, v in saved_model_state.items()
        })

    # set multi-gpu if available
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        net = torch.nn.DataParallel(net)
    net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    # create dataloader
    # ShapeNet
    DATA_PATH = cfg['data']['path']
    fields = {
        'inputs': dataset.PointCloudField(cfg['data']['pointcloud_file'])
    }
    category = cfg['data']['classes']
    shapenet_dataset = dataset.ShapenetDataset(
        dataset_folder=DATA_PATH,
        fields=fields,
        categories=category,
        split='train',
        with_normals=use_normal,
        points_batch=points_batch,
        partial_input=partial_input,
        data_completeness=data_completeness,
        data_sparsity=data_sparsity)
    shapenet_loader = torch.utils.data.DataLoader(
        shapenet_dataset,
        batch_size=batch_size_shapenet,