def __init__(self, dataset_opt):
        super().__init__(dataset_opt)

        number = dataset_opt.number
        if str(number) not in self.AVAILABLE_NUMBERS:
            raise Exception("Only ModelNet10 and ModelNet40 are available")

        name = "ModelNet{}".format(number)
        self._data_path = osp.join(osp.dirname(osp.realpath(__file__)), "..",
                                   "data", name)

        self.train_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=True,
            transform=self.train_transform,
            pre_transform=self.pre_transform,
        )

        self.test_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=False,
            transform=self.test_transform,
            pre_transform=self.pre_transform,
        )
    def __init__(self, dataset_opt, training_opt):
        super().__init__(dataset_opt, training_opt)

        number = dataset_opt.number
        if str(number) not in AVAILABLE_NUMBERS:
            raise Exception("Only ModelNet10 and ModelNet40 are available")
        name = "ModelNet{}".format(number)
        self._data_path = osp.join(osp.dirname(osp.realpath(__file__)), "..",
                                   "data", name)
        pre_transform = T.Compose([T.NormalizeScale(), MeshToNormal()])
        transform = (T.SamplePoints(dataset_opt.num_points) if contains_key(
            dataset_opt, "num_points") else None)

        train_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=True,
            transform=transform,
            pre_transform=pre_transform,
        )

        test_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=False,
            transform=transform,
            pre_transform=pre_transform,
        )

        self._create_dataloaders(train_dataset, test_dataset, validation=None)
Exemple #3
0
def load_modelnet(version='10', point_flag=False):
    """
    :param point_flag: Sample points if point_flag true. Otherwise load mesh
    :return: train_dataset, test_dataset
    """
    assert version in ['10', '40']
    if point_flag:
        pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
    else:
        pre_transform, transform = FaceToEdge(), None

    # path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet' + version)
    path = '/home/cai.507/Documents/DeepLearning/ModelNet' + version

    train_dataset = ModelNet(path,
                             version,
                             True,
                             transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet(path,
                            version,
                            False,
                            transform=transform,
                            pre_transform=pre_transform)
    return train_dataset, test_dataset
Exemple #4
0
def get_dataset(num_points):
    name = 'ModelNet10'
    path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name)
    pre_transform = T.NormalizeScale()
    transform = T.SamplePoints(num_points)

    train_dataset = ModelNet(path, name='10', train=True, transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet(path, name='10', train=False, transform=transform,
                            pre_transform=pre_transform)

    return train_dataset, test_dataset
def load_dataset(path,
                 transform=None,
                 pre_transform=None,
                 pre_filter=None,
                 category=None,
                 name='10',
                 test_area=6):
    if path.name == 'ShapeNet':
        train_dataset = ShapeNet(path,
                                 category,
                                 split='trainval',
                                 transform=transform,
                                 pre_transform=pre_transform,
                                 pre_filter=pre_filter)
        test_dataset = ShapeNet(path,
                                category,
                                split='test',
                                transform=transform,
                                pre_transform=pre_transform,
                                pre_filter=pre_filter)
    elif path.name == 'ModelNet':
        train_dataset = ModelNet(path,
                                 name=name,
                                 train=True,
                                 transform=transform,
                                 pre_transform=pre_transform,
                                 pre_filter=pre_filter)
        test_dataset = ModelNet(path,
                                name=name,
                                train=False,
                                transform=transform,
                                pre_transform=pre_transform,
                                pre_filter=pre_filter)
    elif path.name == 'S3DIS':
        train_dataset = S3DIS(path,
                              test_area=test_area,
                              train=True,
                              transform=transform,
                              pre_transform=pre_transform,
                              pre_filter=pre_filter)
        test_dataset = S3DIS(path,
                             test_area=test_area,
                             train=False,
                             transform=transform,
                             pre_transform=pre_transform,
                             pre_filter=pre_filter)

    return train_dataset, test_dataset
Exemple #6
0
 def __init__(self, train):
     dataset = "ModelNet40"
     pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
     path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
     if not osp.exists(path):
         ModelNet(path, "40", transform, pre_transform)
     super(ModelNet40, self).__init__(path, name="40", train=train, transform=transform, pre_transform=pre_transform)
Exemple #7
0
def get_modelnet_data(cfg, train=True, name='40'):
    if train:
        print('-' * 20 + "Loading ModelNet Training Data" + '-' * 20)
        model_net_data = ModelNet(root=os.path.join(cfg["root_path"],
                                                    cfg["data_path"],
                                                    "modelnet_train"),
                                  name=name,
                                  train=True)
    else:
        print('-' * 20 + "Loading ModelNet Testing Data" + '-' * 20)
        model_net_data = ModelNet(root=os.path.join(cfg["root_path"],
                                                    cfg["data_path"],
                                                    "modelnet_test"),
                                  name=name,
                                  train=False)

    return model_net_data
Exemple #8
0
def load_dataset(args):

    # load ShapeNet dataset
    if args.dataset == 'shapenet':
        pre_transform, transform = augment_transforms(args)

        categories = args.categories.split(',')
        train_dataset = ShapeNet('../data_root/ShapeNet_normal', categories, split='trainval', include_normals=False,
                                 pre_transform=pre_transform, transform=transform)
        test_dataset = ShapeNet('../data_root/ShapeNet_normal', categories, split='test', include_normals=False,
                                pre_transform=pre_transform, transform=T.FixedPoints(args.num_pts))
        train_dataloader = DataLoader(train_dataset, batch_size=args.bsize, shuffle=True,
                                      num_workers=6, drop_last=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.bsize, shuffle=True,
                                     num_workers=6, drop_last=True)

    # load ModelNet dataset
    if args.dataset == 'modelnet':
        pre_transform, transform = augment_transforms(args)

        train_dataset = ModelNet('../data_root/ModelNet40', name='40', train=True,
                                 pre_transform=pre_transform, transform=transform)
        test_dataset = ModelNet('../data_root/ModelNet40', name='40', train=False,
                                 pre_transform=pre_transform, transform=T.SamplePoints(args.num_pts))
        train_dataloader = DataLoader(train_dataset, batch_size=args.bsize, shuffle=True,
                                      num_workers=6, drop_last=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.bsize, shuffle=True,
                                     num_workers=6, drop_last=True)

    # load completion3D dataset
    if args.dataset == 'completion3D':
        pre_transform, transform = augment_transforms(args)

        categories = args.categories.split(',')
        train_dataset = completion3D_class('../data_root/completion3D', categories, split='train',
                            include_normals=False, pre_transform=pre_transform, transform=transform)
        test_dataset = completion3D_class('../data_root/completion3D', categories, split='val',
                            include_normals=False, pre_transform=pre_transform, transform=transform)
        train_dataloader = DataLoader(train_dataset, batch_size=args.bsize, shuffle=True,
                                      num_workers=8, drop_last=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.bsize, shuffle=False,
                                     num_workers=8, drop_last=True)

    return train_dataloader, test_dataloader
def get_dataloader(num_points, b_size, name='10'):
    path = 'ModelNet' + name
    pre_transform = T.NormalizeScale()
    transform = T.SamplePoints(num_points)

    train_dataset = ModelNet('dataset/' + path,
                             name=name,
                             train=True,
                             transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet('dataset/' + path,
                            name=name,
                            train=False,
                            transform=transform,
                            pre_transform=pre_transform)

    train_loader = DataLoader(train_dataset, batch_size=b_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=b_size, shuffle=False)

    return train_loader, test_loader
Exemple #10
0
 def __init__(self, path: str):
     pyg_dataset = ModelNet(
         os.path.join(path, '_pyg'),
         '40',
         False,
         pre_transform=torch_geometric.transforms.FaceToEdge())
     if hasattr(pyg_dataset, "__data_list__"):
         delattr(pyg_dataset, "__data_list__")
     if hasattr(pyg_dataset, "_data_list"):
         delattr(pyg_dataset, "_data_list")
     super(ModelNet40TestDataset, self).__init__([
         GeneralStaticGraphGenerator.create_homogeneous_static_graph(
             {'pos': pyg_data.pos},
             pyg_data.edge_index,
             graph_data={'y': pyg_data.y}) for pyg_data in pyg_dataset
     ])
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle,
                 validation_split,
                 num_workers,
                 num_points,
                 training=True):
        # trsfm = transforms.Compose([
        #     transforms.ToTensor(),
        #     transforms.Normalize((0.1307,), (0.3081,))
        # ])

        self.data_dir = data_dir
        path = osp.join(self.data_dir, 'ModelNet10')
        pre_transform, transform = T.NormalizeScale(), T.SamplePoints(
            num_points)

        train_dataset = ModelNet(path, '10', training, transform,
                                 pre_transform)

        super(MyModelNetDataLoader, self).__init__(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=shuffle)
def LoadDataset(dataset_train,
                dataset_test,
                pre_transform_train=None,
                transform_train=None,
                pre_transform_test=None,
                transform_test=None,
                category=None):
    path_train = osp.join(DATASET_PATH, dataset_test)
    path_test = osp.join(DATASET_PATH, dataset_train)

    if not osp.exists(path_train):
        os.makedirs(path_train)

    if not osp.exists(path_test):
        os.makedirs(path_test)

    _train_dataset = None
    if (dataset_train == 'ModelNet10'):
        _train_dataset = ModelNet(path_train, '10', True, transform_train,
                                  pre_transform_train)

    elif (dataset_train == 'ModelNet40'):
        _train_dataset = ModelNet(path_train, '40', True, transform_train,
                                  pre_transform_train)

    elif (dataset_train == 'PoissonModelNet40'):
        _train_dataset = PoissonModelNet(path_train, '40', True)

    elif (dataset_train == 'PoissonModelNet10'):
        _train_dataset = PoissonModelNet(path_train, '10', True)

    elif (dataset_train == 'ShapeNet'):
        _train_dataset = ShapeNet(path_train,
                                  category,
                                  train=True,
                                  transform=transform_train,
                                  pre_transform=pre_transform_train)

    _test_dataset = None
    if (dataset_test == 'ModelNet10'):
        _test_dataset = ModelNet(path_test, '10', False, transform_test,
                                 pre_transform_test)

    elif (dataset_test == 'ModelNet40'):
        _test_dataset = ModelNet(path_test, '40', False, transform_test,
                                 pre_transform_test)

    elif (dataset_test == 'PoissonModelNet40'):
        _test_dataset = PoissonModelNet(path_test, '40', False)

    elif (dataset_test == 'PoissonModelNet10'):
        _test_dataset = PoissonModelNet(path_test, '10', False)

    elif (dataset_test == 'ShapeNet'):
        _test_dataset = ShapeNet(path_test,
                                 category,
                                 train=False,
                                 pre_transform=pre_transform_test)

    if _train_dataset is None or _test_dataset is None:
        print("Invalid dataset requested!")
        return (None, None)
    else:
        print('Dataset Train: {}'.format(dataset_train))
        logging.info('Dataset Train: {}'.format(dataset_train))

        print('Dataset Test: {}'.format(dataset_test))
        logging.info('Dataset Test: {}'.format(dataset_test))

        return (_train_dataset, _test_dataset)
Exemple #13
0
 def get_test_split(self):
     return ModelNet(self.root,
                     self.name,
                     train=False,
                     transform=self.transform,
                     pre_transform=self.pre_transform)
        data_path = os.path.join("/data", "pkurei", pl_path)
    elif dataset_type == "MPEG":
        # pl_path = 'pku'
        dataset_name = "data-5.0"
        data_path = os.path.join(dataset_name)
    print(colorama.Fore.RED + "Testing on dataset %s at %s" % (dataset_type, data_path))

    for path in (data_path,):
        check_dir(path, color=colorama.Fore.CYAN)

    # dataset and dataloader
    if dataset_type == "MN40":
        samplePoints = 1024
        train_dataset = ModelNet(
            root=data_path,
            name="40",
            train=True,
            pre_transform=transform(samplePoints=samplePoints),
        )
        test_dataset = ModelNet(
            root=data_path,
            name="40",
            train=False,
            pre_transform=transform(samplePoints=samplePoints),
        )
        if parallel:
            train_loader = DataListLoader(
                train_dataset,
                batch_size=batch_size,
                shuffle=False,
                drop_last=False,
                num_workers=16,
Exemple #15
0
# https://github.com/rusty1s/pytorch_geometric
import os.path as osp

import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch import nn, optim
from torch_geometric.data import DataLoader
from torch_geometric.datasets import ModelNet
from torch_geometric.nn import DynamicEdgeConv, global_max_pool

from pointnet2_classification import MLP

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet10')
pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
train_dataset = ModelNet(path, '10', True, transform, pre_transform)
test_dataset = ModelNet(path, '10', False, transform, pre_transform)
train_loader = DataLoader(train_dataset,
                          batch_size=32,
                          shuffle=True,
                          num_workers=6)
test_loader = DataLoader(test_dataset,
                         batch_size=32,
                         shuffle=False,
                         num_workers=6)


class Net(torch.nn.Module):
    def __init__(self, out_channels, k=20, aggr='max'):
        super().__init__()
Exemple #16
0
    return graphs, labels


if __name__ == '__main__':
    version = '10'  # 10 or 40
    idx = 1

    # path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet' + version)
    path = '/home/cai.507/Documents/DeepLearning/ModelNet' + version

    pre_transform, transform = FaceToEdge(), None

    # train_dataset = ModelNet(path,path version, True, transform=transform, pre_transform=pre_transform)
    test_dataset = ModelNet(path,
                            version,
                            False,
                            transform=transform,
                            pre_transform=pre_transform)
    data = test_dataset  # train_dataset + test_dataset

    test_data = data[idx]

    data = FaceToEdge()(test_data)
    from torch_geometric.utils import to_networkx
    print(data)
    print(f'num of nodes is {data.pos.shape[0]}')
    g = to_networkx(data, num_nodes=data.pos.shape[0]).to_undirected()
    # gs, _ = torch_geometric_2nx([data])
    # g = gs[0]
    print(nx.info(g))
Exemple #17
0
    assert dataset_type in ['10', '40']
    if dataset_type == '10':
        pl_path = 'modelnet-10-pointcloud'
        model_path = 'modelnet10-pointnet'
        fout = 10
    elif dataset_type == '40':
        pl_path = 'modelnet40-2500'
        model_path = 'modelnet40-pointnet'
        fout = 40
    assert pl_path and model_path and fout

    model = DGCNNClassifier(in_channels=3, classes=40).to(device)
    train_dataset = ModelNet(
        root=os.path.join('data', pl_path),
        name='40',
        train=True,
        # pre_transform=tg.transforms.SamplePoints(samplePoints),
        # transform=tg.transforms.KNNGraph(k=10))
        pre_transform=tg.transforms.SamplePoints(2500))
    test_dataset = ModelNet(
        root=os.path.join('data', pl_path),
        name='40',
        train=False,
        # pre_transform=tg.transforms.SamplePoints(samplePoints),
        # transform=tg.transforms.KNNGraph(k=10))
        pre_transform=tg.transforms.SamplePoints(2500))

    loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
    for b in loader:
        x, batch = b.pos, b.batch
        # if summary is not None:
Exemple #18
0
    # model_geo = GCNClassifierSparse(in_channels=3, classes=10).to(device)
    # modelnet = ModelNet(root='data/modelnet10', name='10', train=True,
    #                     transform=transform)
    # loader = DataLoader(modelnet, batch_size=24, shuffle=True)
    # for i, batch in enumerate(loader, 0):
    #     out = model_geo(batch.to(device))
    #     print(out, out.shape, sep='\n')
    #     break
    # del model_geo
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = GCNClassifierSparse(in_channels=3,
                                classes=40).to(device)  # train on ModelNet-40
    train_dataset = ModelNet(
        root='data/modelnet40',
        name='40',
        train=True,
        pre_transform=tg.transforms.SamplePoints(samplePoints),
        transform=tg.transforms.KNNGraph(k=10))
    test_dataset = ModelNet(
        root='data/modelnet40',
        name='40',
        train=False,
        pre_transform=tg.transforms.SamplePoints(samplePoints),
        transform=tg.transforms.KNNGraph(k=10))
    loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False)

# %%
Exemple #19
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--data', default='10', choices=['10', '40'])
    parser.add_argument('--moex_prob', default=0., type=float)
    parser.add_argument('--moex_lambda', default=1., type=float)
    parser.add_argument('--moex_norm', default='pono', type=str)
    parser.add_argument('--moex_epsilon', default=1e-5, type=float)
    parser.add_argument('--seed', default=1, type=int)
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    path = osp.join(osp.dirname(osp.realpath(__file__)), '..',
                    f'data/ModelNet{args.data}')
    pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
    train_dataset = ModelNet(path, args.data, True, transform, pre_transform)
    test_dataset = ModelNet(path, args.data, False, transform, pre_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=32,
                              shuffle=True,
                              num_workers=6)
    test_loader = DataLoader(test_dataset,
                             batch_size=32,
                             shuffle=False,
                             num_workers=6)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = Net(int(args.data)).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    for epoch in range(1, 201):
Exemple #20
0
    correct = []
    for i_batch, batch in enumerate(dataloader):
        x = batch.pos.reshape((-1, 1024, 3)).to(DEVICE)
        pred = model(x)
        pred = torch.argmax(pred, dim=-1)
        pred, y = pred.cpu().detach().numpy(), batch.y.detach().numpy()
        correct.append(np.mean(pred == y))
    return np.mean(correct)


DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
dataset = ModelNet('./data/model-net', '10', True, transform, pre_transform)
dataloader = DataLoader(dataset, num_workers=2, batch_size=16, shuffle=True)

loss_dict = {}
for lr in (5e-3, ):
    model = PointNet(3, [64, 64], [128, 128, 128], 10).to(DEVICE)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    key = lr
    loss_dict[key] = []

    for epoch in range(5):
        loss = train_epoch()
        loss_dict[key].append(loss)
        acc = test()
Exemple #21
0
from torch_geometric.datasets import ModelNet
import torch_geometric.transforms as T
import time
from tqdm import tqdm_notebook

pre_transform = T.NormalizeScale()
transform = T.Compose([
    T.SamplePoints(1024),
    T.RandomRotate(30),
    T.RandomScale((0.5, 2)),
])
name = '40'

train_ds = ModelNet(root='./',
                    train=True,
                    name=name,
                    pre_transform=pre_transform,
                    transform=transform)

test_ds = ModelNet(root='./',
                   train=True,
                   name=name,
                   pre_transform=pre_transform,
                   transform=T.SamplePoints(1024 * 4))

# In[3]:

device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
device

# Now we have to define our dataloader, these guys will handle the thread queue to feed the GPU
            print("NaN!")
        # print(res.max(), res.min(), torch.isnan(res).sum())
        return res

def scale_normalize(data):
    data = tg.transforms.SamplePoints(1024)(data)
    data = tg.transforms.NormalizeScale()(data)
    return data

if __name__ == "__main__":
    # load ModelNet-10 dataset
    # modelnet = ModelNet(root='data/modelnet10-ori', name='10', train=True, transform=tg.transforms.SamplePoints(samplePoints))
    # test_modelnet = ModelNet(root='data/modelnet10-ori', name='10', train=False, transform=tg.transforms.SamplePoints(samplePoints))

    # load ModelNet-40 dataset
    modelnet = ModelNet(root='F:\\PointNet\\data/modelnet40-%d-normal' % (samplePoints), name='40', train=True,
                        pre_transform=scale_normalize)
    test_modelnet = ModelNet(root='F:\\PointNet\\data/modelnet40-%d-normal' % (samplePoints), name='40',
                             train=False, pre_transform=scale_normalize)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print(modelnet, test_modelnet)
    # => ModelNet10(3991) ModelNet10(908)

    # save point cloud formed dataset
    # train = modelnet[0].to(device)
    print('Processing dataset into tensor')
    train_len, test_len = len(modelnet), len(test_modelnet) # 3991 / 9843 || ... / 2468
    train_tensor, test_tensor = torch.zeros([train_len, samplePoints, 3]), torch.zeros([test_len, samplePoints, 3])
    train_y, test_y = torch.zeros([train_len], dtype=torch.int32), torch.zeros([test_len], dtype=torch.int32)
    for i, data in enumerate(modelnet, 0):